]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'for-linus/i2c-33' of git://git.fluff.org/bjdooks/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 18 Jan 2012 21:46:13 +0000 (13:46 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 18 Jan 2012 21:46:13 +0000 (13:46 -0800)
* 'for-linus/i2c-33' of git://git.fluff.org/bjdooks/linux:
  i2c-eg20t: Change-company-name-OKI-SEMICONDUCTOR to LAPIS Semiconductor
  i2c-eg20t: Support new device LAPIS Semiconductor ML7831 IOH
  i2c-eg20t: modified the setting of transfer rate.
  i2c-eg20t: use i2c_add_numbered_adapter to get a fixed bus number
  i2c: OMAP: Add DT support for i2c controller
  I2C: OMAP: NACK without STP
  I2C: OMAP: correct SYSC register offset for OMAP4

404 files changed:
Documentation/DocBook/media/dvb/dvbproperty.xml
Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
Documentation/DocBook/media/v4l/vidioc-g-input.xml
Documentation/DocBook/media/v4l/vidioc-g-output.xml
Documentation/devices.txt
Documentation/devicetree/bindings/dma/atmel-dma.txt [new file with mode: 0644]
Documentation/dmaengine.txt
Documentation/feature-removal-schedule.txt
Documentation/ioctl/ioctl-number.txt
Documentation/scsi/ChangeLog.megaraid_sas
Documentation/scsi/LICENSE.qla4xxx
Documentation/video4linux/v4l2-controls.txt
MAINTAINERS
arch/arm/include/asm/kprobes.h
arch/arm/include/asm/ptrace.h
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/unified.h
arch/arm/kernel/entry-common.S
arch/arm/kernel/ptrace.c
arch/arm/mach-ep93xx/include/mach/dma.h
arch/arm/mach-exynos/headsmp.S
arch/arm/mach-exynos/mach-origen.c
arch/arm/mach-exynos/platsmp.c
arch/arm/mach-highbank/highbank.c
arch/arm/mach-imx/src.c
arch/arm/mach-msm/headsmp.S
arch/arm/mach-msm/vreg.c
arch/arm/mach-picoxcell/time.c
arch/arm/mach-realview/platsmp.c
arch/arm/mach-s3c64xx/include/mach/crag6410.h
arch/arm/mach-s3c64xx/mach-crag6410.c
arch/arm/mach-s3c64xx/pm.c
arch/arm/mach-shmobile/setup-sh7372.c
arch/arm/mach-ux500/headsmp.S
arch/arm/mach-vexpress/platsmp.c
arch/arm/plat-mxc/include/mach/mx3fb.h
arch/arm/plat-nomadik/include/plat/ste_dma40.h
arch/arm/plat-samsung/dma-ops.c
arch/arm/plat-samsung/include/plat/dma-ops.h
arch/arm/plat-samsung/include/plat/dma.h
arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
arch/arm/plat-versatile/headsmp.S
arch/ia64/include/asm/ptrace.h
arch/ia64/kernel/ptrace.c
arch/microblaze/include/asm/ptrace.h
arch/microblaze/kernel/ptrace.c
arch/microblaze/kernel/setup.c
arch/mips/include/asm/ptrace.h
arch/mips/kernel/ptrace.c
arch/powerpc/include/asm/ptrace.h
arch/powerpc/kernel/ptrace.c
arch/s390/include/asm/ptrace.h
arch/s390/kernel/ptrace.c
arch/sh/include/asm/ptrace_32.h
arch/sh/include/asm/ptrace_64.h
arch/sh/kernel/ptrace_32.c
arch/sh/kernel/ptrace_64.c
arch/sparc/include/asm/ptrace.h
arch/sparc/kernel/ptrace_64.c
arch/um/kernel/ptrace.c
arch/x86/ia32/ia32entry.S
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/ptrace.c
arch/x86/kernel/vm86_32.c
arch/x86/um/shared/sysdep/ptrace.h
arch/xtensa/kernel/ptrace.c
block/cfq-iosched.c
drivers/ata/ata_piix.c
drivers/ata/libata-core.c
drivers/ata/libata-transport.c
drivers/ata/pata_bf54x.c
drivers/ata/sata_fsl.c
drivers/bcma/bcma_private.h
drivers/bcma/host_pci.c
drivers/bcma/main.c
drivers/block/Kconfig
drivers/block/Makefile
drivers/block/nvme.c [new file with mode: 0644]
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/amba-pl08x.c
drivers/dma/at_hdmac.c
drivers/dma/at_hdmac_regs.h
drivers/dma/coh901318.c
drivers/dma/coh901318_lli.c
drivers/dma/coh901318_lli.h
drivers/dma/dmaengine.c
drivers/dma/dw_dmac.c
drivers/dma/dw_dmac_regs.h
drivers/dma/ep93xx_dma.c
drivers/dma/fsldma.c
drivers/dma/imx-dma.c
drivers/dma/imx-sdma.c
drivers/dma/intel_mid_dma.c
drivers/dma/intel_mid_dma_regs.h
drivers/dma/iop-adma.c
drivers/dma/ipu/ipu_idmac.c
drivers/dma/mpc512x_dma.c
drivers/dma/mxs-dma.c
drivers/dma/pch_dma.c
drivers/dma/pl330.c
drivers/dma/shdma.c
drivers/dma/sirf-dma.c [new file with mode: 0644]
drivers/dma/ste_dma40.c
drivers/dma/ste_dma40_ll.h
drivers/dma/timb_dma.c
drivers/dma/txx9dmac.c
drivers/media/common/tuners/tuner-xc2028.c
drivers/media/common/tuners/xc4000.c
drivers/media/dvb/dvb-core/dvb_frontend.c
drivers/media/dvb/dvb-usb/anysee.c
drivers/media/dvb/dvb-usb/dib0700.h
drivers/media/dvb/dvb-usb/dib0700_core.c
drivers/media/dvb/dvb-usb/dib0700_devices.c
drivers/media/dvb/frontends/cxd2820r_core.c
drivers/media/dvb/frontends/ds3000.c
drivers/media/dvb/frontends/mb86a20s.c
drivers/media/dvb/frontends/tda18271c2dd.c
drivers/media/video/as3645a.c
drivers/media/video/cx18/cx18-fileops.c
drivers/media/video/cx231xx/cx231xx-cards.c
drivers/media/video/cx23885/cx23885-cards.c
drivers/media/video/cx23885/cx23885-dvb.c
drivers/media/video/cx23885/cx23885-video.c
drivers/media/video/cx88/cx88-cards.c
drivers/media/video/ivtv/ivtv-driver.c
drivers/media/video/ivtv/ivtv-driver.h
drivers/media/video/ivtv/ivtv-fileops.c
drivers/media/video/ivtv/ivtv-ioctl.c
drivers/media/video/ivtv/ivtv-irq.c
drivers/media/video/ivtv/ivtv-streams.c
drivers/media/video/ivtv/ivtv-yuv.c
drivers/media/video/mx3_camera.c
drivers/media/video/omap/omap_vout.c
drivers/media/video/pwc/pwc-ctrl.c
drivers/media/video/pwc/pwc-dec1.c
drivers/media/video/pwc/pwc-dec1.h
drivers/media/video/pwc/pwc-dec23.c
drivers/media/video/pwc/pwc-dec23.h
drivers/media/video/pwc/pwc-if.c
drivers/media/video/pwc/pwc-misc.c
drivers/media/video/pwc/pwc-v4l.c
drivers/media/video/pwc/pwc.h
drivers/media/video/s5p-fimc/fimc-capture.c
drivers/media/video/s5p-fimc/fimc-core.c
drivers/media/video/s5p-fimc/fimc-mdevice.c
drivers/media/video/s5p-g2d/g2d.c
drivers/media/video/s5p-jpeg/jpeg-core.c
drivers/media/video/s5p-mfc/s5p_mfc.c
drivers/media/video/s5p-mfc/s5p_mfc_dec.c
drivers/media/video/saa7164/saa7164-cards.c
drivers/media/video/timblogiw.c
drivers/media/video/tlg2300/pd-main.c
drivers/media/video/v4l2-ctrls.c
drivers/media/video/v4l2-ioctl.c
drivers/media/video/zoran/zoran_driver.c
drivers/misc/carma/carma-fpga-program.c
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/sh_mmcif.c
drivers/mmc/host/tmio_mmc_dma.c
drivers/mtd/nand/gpmi-nand/gpmi-lib.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
drivers/net/ethernet/micrel/ks8842.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlwifi/iwl-scan.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/scsi/Kconfig
drivers/scsi/bfa/bfa_defs_svc.h
drivers/scsi/bfa/bfa_fc.h
drivers/scsi/bfa/bfa_fcpim.c
drivers/scsi/bfa/bfa_fcpim.h
drivers/scsi/bfa/bfa_svc.h
drivers/scsi/bfa/bfad.c
drivers/scsi/bfa/bfad_attr.c
drivers/scsi/bfa/bfad_bsg.c
drivers/scsi/bfa/bfad_drv.h
drivers/scsi/bfa/bfad_im.c
drivers/scsi/bfa/bfad_im.h
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe.h
drivers/scsi/hpsa.c
drivers/scsi/isci/firmware/Makefile [deleted file]
drivers/scsi/isci/firmware/README [deleted file]
drivers/scsi/isci/firmware/create_fw.c [deleted file]
drivers/scsi/isci/firmware/create_fw.h [deleted file]
drivers/scsi/isci/host.c
drivers/scsi/isci/host.h
drivers/scsi/isci/init.c
drivers/scsi/isci/isci.h
drivers/scsi/isci/phy.c
drivers/scsi/isci/port.c
drivers/scsi/isci/port.h
drivers/scsi/isci/port_config.c
drivers/scsi/isci/probe_roms.c
drivers/scsi/isci/probe_roms.h
drivers/scsi/isci/remote_device.c
drivers/scsi/isci/task.c
drivers/scsi/isci/task.h
drivers/scsi/libfc/fc_disc.c
drivers/scsi/libfc/fc_elsct.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/megaraid.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fp.c
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_nx.c
drivers/scsi/qla4xxx/ql4_nx.h
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/sg.c
drivers/scsi/sym53c8xx_2/sym_glue.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-ep93xx.c
drivers/spi/spi-pl022.c
drivers/spi/spi-topcliff-pch.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/pch_uart.c
drivers/tty/serial/sh-sci.c
drivers/usb/host/ehci-xilinx-of.c
drivers/usb/musb/ux500_dma.c
drivers/usb/renesas_usbhs/fifo.c
drivers/vhost/net.c
drivers/video/mx3fb.c
drivers/xen/biomerge.c
drivers/xen/xen-balloon.c
firmware/Makefile
firmware/isci/isci_firmware.bin.ihex [deleted file]
fs/btrfs/Kconfig
fs/btrfs/Makefile
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/btrfs_inode.h
fs/btrfs/check-integrity.c [new file with mode: 0644]
fs/btrfs/check-integrity.h [new file with mode: 0644]
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/export.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode-map.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ioctl.h
fs/btrfs/locking.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/ulist.c [new file with mode: 0644]
fs/btrfs/ulist.h [new file with mode: 0644]
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/btrfs/xattr.c
fs/namei.c
fs/proc/base.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_attr.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_dfrag.c
fs/xfs/xfs_file.c
fs/xfs/xfs_fs_subr.c
fs/xfs/xfs_iget.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_super.c
fs/xfs/xfs_sync.c
fs/xfs/xfs_trace.h
fs/xfs/xfs_vnodeops.c
include/linux/amba/pl08x.h
include/linux/audit.h
include/linux/bcma/bcma.h
include/linux/digsig.h
include/linux/dmaengine.h
include/linux/dw_dmac.h
include/linux/key.h
include/linux/kref.h
include/linux/miscdevice.h
include/linux/mtd/gpmi-nand.h [new file with mode: 0644]
include/linux/netfilter/nf_conntrack_common.h
include/linux/netfilter/xt_CT.h
include/linux/nvme.h [new file with mode: 0644]
include/linux/ptrace.h
include/linux/sh_dma.h
include/linux/sirfsoc_dma.h [new file with mode: 0644]
include/linux/tty_driver.h
include/media/tuner.h
include/net/flow.h
include/scsi/libfc.h
include/trace/events/btrfs.h
init/Kconfig
kernel/audit.c
kernel/audit.h
kernel/auditfilter.c
kernel/auditsc.c
kernel/capability.c
kernel/exit.c
kernel/fork.c
kernel/seccomp.c
lib/Kconfig
lib/Makefile
mm/memcontrol.c
net/bridge/br_fdb.c
net/caif/caif_dev.c
net/caif/caif_usb.c
net/core/dev.c
net/core/net-sysfs.c
net/core/secure_seq.c
net/ipv4/inetpeer.c
net/ipv4/ipconfig.c
net/ipv4/ping.c
net/ipv4/udp_diag.c
net/ipv6/datagram.c
net/ipv6/proc.c
net/ipv6/route.c
net/mac80211/cfg.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/tx.c
net/mac80211/wpa.c
net/mac80211/wpa.h
net/netfilter/ipset/ip_set_core.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/xt_CT.c
net/netfilter/xt_hashlimit.c
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/flow.c
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport.c
security/integrity/Kconfig
security/integrity/Makefile
security/integrity/ima/ima_audit.c
security/integrity/integrity.h
security/keys/encrypted-keys/encrypted.c
security/keys/encrypted-keys/masterkey_trusted.c
security/keys/gc.c
security/keys/keyring.c
security/keys/trusted.c
security/lsm_audit.c
security/tomoyo/util.c
sound/atmel/abdac.c
sound/atmel/ac97c.c
sound/core/Kconfig
sound/pci/au88x0/au88x0.c
sound/pci/au88x0/au88x0.h
sound/pci/au88x0/au88x0_pcm.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_sigmatel.c
sound/pci/oxygen/xonar_wm87x6.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/wm8993.c
sound/soc/ep93xx/ep93xx-pcm.c
sound/soc/imx/imx-pcm-dma-mx2.c
sound/soc/mxs/mxs-pcm.c
sound/soc/samsung/dma.c
sound/soc/sh/siu_pcm.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/txx9/txx9aclc.c

index ffee1fbbc001ae316f40be9def3b2456e20c9118..c7a4ca51785980264b37b19b3b63987a3871fe33 100644 (file)
@@ -163,14 +163,16 @@ get/set up to 64 properties. The actual meaning of each property is described on
        <section id="DTV-FREQUENCY">
                <title><constant>DTV_FREQUENCY</constant></title>
 
-               <para>Central frequency of the channel, in HZ.</para>
+               <para>Central frequency of the channel.</para>
 
                <para>Notes:</para>
-               <para>1)For ISDB-T, the channels are usually transmitted with an offset of 143kHz.
+               <para>1)For satellital delivery systems, it is measured in kHz.
+                       For the other ones, it is measured in Hz.</para>
+               <para>2)For ISDB-T, the channels are usually transmitted with an offset of 143kHz.
                        E.g. a valid frequncy could be 474143 kHz. The stepping is bound to the bandwidth of
                        the channel which is 6MHz.</para>
 
-               <para>2)As in ISDB-Tsb the channel consists of only one or three segments the
+               <para>3)As in ISDB-Tsb the channel consists of only one or three segments the
                        frequency step is 429kHz, 3*429 respectively. As for ISDB-T the
                        central frequency of the channel is expected.</para>
        </section>
@@ -735,14 +737,10 @@ typedef enum fe_hierarchy {
                        <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
-                       <listitem><para><link linkend="DTV-MODULATION"><constant>DTV_MODULATION</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem>
-                       <listitem><para><link linkend="DTV-CODE-RATE-HP"><constant>DTV_CODE_RATE_HP</constant></link></para></listitem>
-                       <listitem><para><link linkend="DTV-CODE-RATE-LP"><constant>DTV_CODE_RATE_LP</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-GUARD-INTERVAL"><constant>DTV_GUARD_INTERVAL</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-TRANSMISSION-MODE"><constant>DTV_TRANSMISSION_MODE</constant></link></para></listitem>
-                       <listitem><para><link linkend="DTV-HIERARCHY"><constant>DTV_HIERARCHY</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-ISDBT-LAYER-ENABLED"><constant>DTV_ISDBT_LAYER_ENABLED</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-ISDBT-PARTIAL-RECEPTION"><constant>DTV_ISDBT_PARTIAL_RECEPTION</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-ISDBT-SOUND-BROADCASTING"><constant>DTV_ISDBT_SOUND_BROADCASTING</constant></link></para></listitem>
index 6f1f9a629dc34d1508bd550342d810f05f2b5028..b17a7aac6997482379e1b43c8b0d7e5cf914e1f0 100644 (file)
@@ -183,7 +183,12 @@ applications must set the array to zero.</entry>
            <entry>__u32</entry>
            <entry><structfield>ctrl_class</structfield></entry>
            <entry>The control class to which all controls belong, see
-<xref linkend="ctrl-class" />.</entry>
+<xref linkend="ctrl-class" />. Drivers that use a kernel framework for handling
+controls will also accept a value of 0 here, meaning that the controls can
+belong to any control class. Whether drivers support this can be tested by setting
+<structfield>ctrl_class</structfield> to 0 and calling <constant>VIDIOC_TRY_EXT_CTRLS</constant>
+with a <structfield>count</structfield> of 0. If that succeeds, then the driver
+supports this feature.</entry>
          </row>
          <row>
            <entry>__u32</entry>
@@ -194,10 +199,13 @@ also be zero.</entry>
          <row>
            <entry>__u32</entry>
            <entry><structfield>error_idx</structfield></entry>
-           <entry>Set by the driver in case of an error. It is the
-index of the control causing the error or equal to 'count' when the
-error is not associated with a particular control. Undefined when the
-ioctl returns 0 (success).</entry>
+           <entry>Set by the driver in case of an error. If it is equal
+to <structfield>count</structfield>, then no actual changes were made to
+controls. In other words, the error was not associated with setting a particular
+control. If it is another value, then only the controls up to <structfield>error_idx-1</structfield>
+were modified and control <structfield>error_idx</structfield> is the one that
+caused the error. The <structfield>error_idx</structfield> value is undefined
+if the ioctl returned 0 (success).</entry>
          </row>
          <row>
            <entry>__u32</entry>
index 93817f33703305beae49acafa52b1bceb0613dd2..7c63815e7afd0eb964a1848513dea5be14eb677e 100644 (file)
@@ -364,15 +364,20 @@ capability and it is cleared otherwise.</entry>
          <row>
            <entry><constant>V4L2_FBUF_FLAG_OVERLAY</constant></entry>
            <entry>0x0002</entry>
-           <entry>The frame buffer is an overlay surface the same
-size as the capture. [?]</entry>
-         </row>
-         <row>
-           <entry spanname="hspan">The purpose of
-<constant>V4L2_FBUF_FLAG_OVERLAY</constant> was never quite clear.
-Most drivers seem to ignore this flag. For compatibility with the
-<wordasword>bttv</wordasword> driver applications should set the
-<constant>V4L2_FBUF_FLAG_OVERLAY</constant> flag.</entry>
+           <entry>If this flag is set for a video capture device, then the
+driver will set the initial overlay size to cover the full framebuffer size,
+otherwise the existing overlay size (as set by &VIDIOC-S-FMT;) will be used.
+
+Only one video capture driver (bttv) supports this flag. The use of this flag
+for capture devices is deprecated. There is no way to detect which drivers
+support this flag, so the only reliable method of setting the overlay size is
+through &VIDIOC-S-FMT;.
+
+If this flag is set for a video output device, then the video output overlay
+window is relative to the top-left corner of the framebuffer and restricted
+to the size of the framebuffer. If it is cleared, then the video output
+overlay window is relative to the video output display.
+            </entry>
          </row>
          <row>
            <entry><constant>V4L2_FBUF_FLAG_CHROMAKEY</constant></entry>
index 16431813bebd253f87965029088ee733328c36e5..66e9a5257861ab1c5f65b197e3bc7e52304659e6 100644 (file)
@@ -98,8 +98,11 @@ the &v4l2-output; <structfield>modulator</structfield> field and the
            <entry>&v4l2-tuner-type;</entry>
            <entry><structfield>type</structfield></entry>
            <entry>The tuner type. This is the same value as in the
-&v4l2-tuner; <structfield>type</structfield> field. The field is not
-applicable to modulators, &ie; ignored by drivers.</entry>
+&v4l2-tuner; <structfield>type</structfield> field. The type must be set
+to <constant>V4L2_TUNER_RADIO</constant> for <filename>/dev/radioX</filename>
+device nodes, and to <constant>V4L2_TUNER_ANALOG_TV</constant>
+for all others. The field is not applicable to modulators, &ie; ignored
+by drivers.</entry>
          </row>
          <row>
            <entry>__u32</entry>
index 08ae82f131f2b952df2a0f5488b49d0747776153..1d43065090dd1087be1c26a8f54804ac8a4ee2c2 100644 (file)
@@ -61,8 +61,8 @@ desired input in an integer and call the
 <constant>VIDIOC_S_INPUT</constant> ioctl with a pointer to this
 integer. Side effects are possible. For example inputs may support
 different video standards, so the driver may implicitly switch the
-current standard. It is good practice to select an input before
-querying or negotiating any other parameters.</para>
+current standard. Because of these possible side effects applications
+must select an input before querying or negotiating any other parameters.</para>
 
     <para>Information about video inputs is available using the
 &VIDIOC-ENUMINPUT; ioctl.</para>
index fd45f1c13ccf445b89e582036b40c3a7a94ab86c..4533068ecb8ad5dd221512d5f6dc8a7266b66ecb 100644 (file)
@@ -61,8 +61,9 @@ desired output in an integer and call the
 <constant>VIDIOC_S_OUTPUT</constant> ioctl with a pointer to this integer.
 Side effects are possible. For example outputs may support different
 video standards, so the driver may implicitly switch the current
-standard. It is good practice to select an output before querying or
-negotiating any other parameters.</para>
+standard.
+standard. Because of these possible side effects applications
+must select an output before querying or negotiating any other parameters.</para>
 
     <para>Information about video outputs is available using the
 &VIDIOC-ENUMOUTPUT; ioctl.</para>
index cec8864ce4e8cbb4089b68279255c51645938554..00383186d8fb3e2c0fc4d5a12852844cb5b2721f 100644 (file)
@@ -447,6 +447,9 @@ Your cooperation is appreciated.
                234 = /dev/btrfs-control        Btrfs control device
                235 = /dev/autofs       Autofs control device
                236 = /dev/mapper/control       Device-Mapper control device
+               237 = /dev/loop-control Loopback control device
+               238 = /dev/vhost-net    Host kernel accelerator for virtio net
+
                240-254                 Reserved for local use
                255                     Reserved for MISC_DYNAMIC_MINOR
 
diff --git a/Documentation/devicetree/bindings/dma/atmel-dma.txt b/Documentation/devicetree/bindings/dma/atmel-dma.txt
new file mode 100644 (file)
index 0000000..3c046ee
--- /dev/null
@@ -0,0 +1,14 @@
+* Atmel Direct Memory Access Controller (DMA)
+
+Required properties:
+- compatible: Should be "atmel,<chip>-dma"
+- reg: Should contain DMA registers location and length
+- interrupts: Should contain DMA interrupt
+
+Examples:
+
+dma@ffffec00 {
+       compatible = "atmel,at91sam9g45-dma";
+       reg = <0xffffec00 0x200>;
+       interrupts = <21>;
+};
index 94b7e0f96b38fa8086ea14f7cd88718b45e84d70..bbe6cb3d1856b8943dd04d3449af9d39d5c1e7dc 100644 (file)
@@ -75,6 +75,10 @@ The slave DMA usage consists of following steps:
    slave_sg    - DMA a list of scatter gather buffers from/to a peripheral
    dma_cyclic  - Perform a cyclic DMA operation from/to a peripheral till the
                  operation is explicitly stopped.
+   interleaved_dma - This is common to Slave as well as M2M clients. For slave
+                address of devices' fifo could be already known to the driver.
+                Various types of operations could be expressed by setting
+                appropriate values to the 'dma_interleaved_template' members.
 
    A non-NULL return of this transfer API represents a "descriptor" for
    the given transaction.
@@ -89,6 +93,10 @@ The slave DMA usage consists of following steps:
                struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
                size_t period_len, enum dma_data_direction direction);
 
+       struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
+               struct dma_chan *chan, struct dma_interleaved_template *xt,
+               unsigned long flags);
+
    The peripheral driver is expected to have mapped the scatterlist for
    the DMA operation prior to calling device_prep_slave_sg, and must
    keep the scatterlist mapped until the DMA operation has completed.
index d725c0dfe032f0692ab9de9a4358dfdcdd711f39..1bea46a54b1ca252b5390c422f291dafe41a7c32 100644 (file)
@@ -439,17 +439,6 @@ Who:       Jean Delvare <khali@linux-fr.org>
 
 ----------------------------
 
-What:  For VIDIOC_S_FREQUENCY the type field must match the device node's type.
-       If not, return -EINVAL.
-When:  3.2
-Why:   It makes no sense to switch the tuner to radio mode by calling
-       VIDIOC_S_FREQUENCY on a video node, or to switch the tuner to tv mode by
-       calling VIDIOC_S_FREQUENCY on a radio node. This is the first step of a
-       move to more consistent handling of tv and radio tuners.
-Who:   Hans Verkuil <hans.verkuil@cisco.com>
-
-----------------------------
-
 What:  Opening a radio device node will no longer automatically switch the
        tuner mode from tv to radio.
 When:  3.3
index 54078ed96b3751724acaf0bd3fdab95df962480f..4840334ea97b30705df13a2c642e976a83621800 100644 (file)
@@ -149,6 +149,7 @@ Code  Seq#(hex)     Include File            Comments
 'M'    01-03   drivers/scsi/megaraid/megaraid_sas.h
 'M'    00-0F   drivers/video/fsl-diu-fb.h      conflict!
 'N'    00-1F   drivers/usb/scanner.h
+'N'    40-7F   drivers/block/nvme.c
 'O'     00-06   mtd/ubi-user.h         UBI
 'P'    all     linux/soundcard.h       conflict!
 'P'    60-6F   sound/sscape_ioctl.h    conflict!
index 64adb98b181c717f763bff24109bc8bf9aff104a..57566bacb4c56c8182f0e9b16bafe74c9b1233ab 100644 (file)
@@ -1,3 +1,13 @@
+Release Date    : Fri. Jan 6, 2012 17:00:00 PST 2010 -
+                       (emaild-id:megaraidlinux@lsi.com)
+                       Adam Radford
+Current Version : 00.00.06.14-rc1
+Old Version     : 00.00.06.12-rc1
+    1. Fix reglockFlags for degraded raid5/6 for MR 9360/9380.
+    2. Mask off flags in ioctl path to prevent memory scribble with older
+       MegaCLI versions.
+    3. Remove poll_mode_io module paramater, sysfs node, and associated code.
+-------------------------------------------------------------------------------
 Release Date    : Wed. Oct 5, 2011 17:00:00 PST 2010 -
                        (emaild-id:megaraidlinux@lsi.com)
                        Adam Radford
index 494980e404912a861a238423e9189708549eaa66..ab899591ecb7a93dfe6c8b936042c3b8f400bd42 100644 (file)
@@ -1,32 +1,11 @@
 Copyright (c) 2003-2011 QLogic Corporation
-QLogic Linux iSCSI HBA Driver
+QLogic Linux iSCSI Driver
 
 This program includes a device driver for Linux 3.x.
 You may modify and redistribute the device driver code under the
 GNU General Public License (a copy of which is attached hereto as
 Exhibit A) published by the Free Software Foundation (version 2).
 
-REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
-THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
-EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
-BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
-TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
-CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
-OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
-TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
-ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
-COMBINATION WITH THIS PROGRAM.
-
 
 EXHIBIT A
 
index 26aa0573933e8f5f970f7f18b3f53f0afd37eb6d..e2492a9d1027b95be62b9fde675b043643a7f01c 100644 (file)
@@ -666,27 +666,6 @@ a control of this type whenever the first control belonging to a new control
 class is added.
 
 
-Differences from the Spec
-=========================
-
-There are a few places where the framework acts slightly differently from the
-V4L2 Specification. Those differences are described in this section. We will
-have to see whether we need to adjust the spec or not.
-
-1) It is no longer required to have all controls contained in a
-v4l2_ext_control array be from the same control class. The framework will be
-able to handle any type of control in the array. You need to set ctrl_class
-to 0 in order to enable this. If ctrl_class is non-zero, then it will still
-check that all controls belong to that control class.
-
-If you set ctrl_class to 0 and count to 0, then it will only return an error
-if there are no controls at all.
-
-2) Clarified the way error_idx works. For get and set it will be equal to
-count if nothing was done yet. If it is less than count then only the controls
-up to error_idx-1 were successfully applied.
-
-
 Proposals for Extensions
 ========================
 
index 2a90101309d1b396a9f8417a52dd55256b333731..89b70df91f4f66d01e728ef4346f7cf6d598f121 100644 (file)
@@ -745,6 +745,7 @@ M:  Barry Song <baohua.song@csr.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-prima2/
+F:     drivers/dma/sirf-dma*
 
 ARM/EBSA110 MACHINE SUPPORT
 M:     Russell King <linux@arm.linux.org.uk>
@@ -1411,6 +1412,7 @@ F:        net/ax25/
 B43 WIRELESS DRIVER
 M:     Stefano Brivio <stefano.brivio@polimi.it>
 L:     linux-wireless@vger.kernel.org
+L:     b43-dev@lists.infradead.org (moderated for non-subscribers)
 W:     http://linuxwireless.org/en/users/Drivers/b43
 S:     Maintained
 F:     drivers/net/wireless/b43/
@@ -1587,6 +1589,13 @@ L:       linux-scsi@vger.kernel.org
 S:     Supported
 F:     drivers/scsi/bnx2fc/
 
+BROADCOM SPECIFIC AMBA DRIVER (BCMA)
+M:     RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
+L:     linux-wireless@vger.kernel.org
+S:     Maintained
+F:     drivers/bcma/
+F:     include/linux/bcma/
+
 BROCADE BFA FC SCSI DRIVER
 M:     Jing Huang <huangj@brocade.com>
 L:     linux-scsi@vger.kernel.org
@@ -5846,7 +5855,7 @@ F:        drivers/mmc/host/sdhci-spear.c
 SECURITY SUBSYSTEM
 M:     James Morris <jmorris@namei.org>
 L:     linux-security-module@vger.kernel.org (suggested Cc:)
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
 W:     http://security.wiki.kernel.org/
 S:     Supported
 F:     security/
@@ -6116,13 +6125,6 @@ S:       Maintained
 F:     drivers/ssb/
 F:     include/linux/ssb/
 
-BROADCOM SPECIFIC AMBA DRIVER (BCMA)
-M:     RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
-L:     linux-wireless@vger.kernel.org
-S:     Maintained
-F:     drivers/bcma/
-F:     include/linux/bcma/
-
 SONY VAIO CONTROL DEVICE DRIVER
 M:     Mattia Dongili <malattia@linux.it>
 L:     platform-driver-x86@vger.kernel.org
@@ -7198,7 +7200,7 @@ S:        Maintained
 F:     drivers/net/vmxnet3/
 
 VMware PVSCSI driver
-M:     Alok Kataria <akataria@vmware.com>
+M:     Arvind Kumar <arvindkumar@vmware.com>
 M:     VMware PV-Drivers <pv-drivers@vmware.com>
 L:     linux-scsi@vger.kernel.org
 S:     Maintained
index feec86768f9c967c946a74ff23ed898d6207543c..f82ec22eeb1174a3ba0b431da119868cd8a1d36d 100644 (file)
@@ -24,7 +24,6 @@
 #define MAX_INSN_SIZE                  2
 #define MAX_STACK_SIZE                 64      /* 32 would probably be OK */
 
-#define regs_return_value(regs)                ((regs)->ARM_r0)
 #define flush_insn_slot(p)             do { } while (0)
 #define kretprobe_blacklist_size       0
 
index 96187ff58c247cf61953ff8d06922412f0acaca2..451808ba1211f2a547af60b35545b220d69886a9 100644 (file)
@@ -189,6 +189,11 @@ static inline int valid_user_regs(struct pt_regs *regs)
        return 0;
 }
 
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->ARM_r0;
+}
+
 #define instruction_pointer(regs)      (regs)->ARM_pc
 
 #ifdef CONFIG_SMP
index 0f30c3a78fc10815668b56c2ae5789ea38b4fdd9..d4c24d412a8ddbdaba9f11a6217cd6a8b592c22c 100644 (file)
@@ -129,6 +129,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
 /*
  * thread information flags:
  *  TIF_SYSCALL_TRACE  - syscall trace active
+ *  TIF_SYSCAL_AUDIT   - syscall auditing active
  *  TIF_SIGPENDING     - signal pending
  *  TIF_NEED_RESCHED   - rescheduling necessary
  *  TIF_NOTIFY_RESUME  - callback before returning to user
@@ -139,6 +140,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
 #define TIF_NEED_RESCHED       1
 #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
 #define TIF_SYSCALL_TRACE      8
+#define TIF_SYSCALL_AUDIT      9
 #define TIF_POLLING_NRFLAG     16
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
@@ -149,11 +151,15 @@ extern void vfp_flush_hwstate(struct thread_info *);
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_USING_IWMMXT      (1 << TIF_USING_IWMMXT)
 #define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
 
+/* Checks for any syscall work in entry-common.S */
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
+
 /*
  * Change these and you break ASM code in entry-common.S
  */
index bc631161e9c6d29eb0f9e73d408da4ae4c13da0e..f5989f46b4d2d450f18b24faa946de750394ec13 100644 (file)
@@ -37,8 +37,8 @@
 #define THUMB(x...)    x
 #ifdef __ASSEMBLY__
 #define W(instr)       instr.w
-#endif
 #define BSYM(sym)      sym + 1
+#endif
 
 #else  /* !CONFIG_THUMB2_KERNEL */
 
@@ -49,8 +49,8 @@
 #define THUMB(x...)
 #ifdef __ASSEMBLY__
 #define W(instr)       instr
-#endif
 #define BSYM(sym)      sym
+#endif
 
 #endif /* CONFIG_THUMB2_KERNEL */
 
index b2a27b6b0046ee6c1e0f0928c3269ab5f6c63cd2..520889cf1b5bfd6d1ff3d4eac603e9ba9de3d7e4 100644 (file)
@@ -87,7 +87,7 @@ ENTRY(ret_from_fork)
        get_thread_info tsk
        ldr     r1, [tsk, #TI_FLAGS]            @ check for syscall tracing
        mov     why, #1
-       tst     r1, #_TIF_SYSCALL_TRACE         @ are we tracing syscalls?
+       tst     r1, #_TIF_SYSCALL_WORK          @ are we tracing syscalls?
        beq     ret_slow_syscall
        mov     r1, sp
        mov     r0, #1                          @ trace exit [IP = 1]
@@ -443,7 +443,7 @@ ENTRY(vector_swi)
 1:
 #endif
 
-       tst     r10, #_TIF_SYSCALL_TRACE                @ are we tracing syscalls?
+       tst     r10, #_TIF_SYSCALL_WORK         @ are we tracing syscalls?
        bne     __sys_trace
 
        cmp     scno, #NR_syscalls              @ check upper syscall limit
index 483727ad68923a3e27af6d693c9132db0148af7f..e1d5e1929fbd6a36e9266a2c4d254cba5060e438 100644 (file)
@@ -906,11 +906,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
 {
        unsigned long ip;
 
-       if (!test_thread_flag(TIF_SYSCALL_TRACE))
-               return scno;
-       if (!(current->ptrace & PT_PTRACED))
-               return scno;
-
        /*
         * Save IP.  IP is used to denote syscall entry/exit:
         *  IP = 0 -> entry, = 1 -> exit
@@ -918,6 +913,17 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
        ip = regs->ARM_ip;
        regs->ARM_ip = why;
 
+       if (!ip)
+               audit_syscall_exit(regs);
+       else
+               audit_syscall_entry(AUDIT_ARCH_ARMEB, scno, regs->ARM_r0,
+                                   regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
+
+       if (!test_thread_flag(TIF_SYSCALL_TRACE))
+               return scno;
+       if (!(current->ptrace & PT_PTRACED))
+               return scno;
+
        current_thread_info()->syscall = scno;
 
        /* the 0x80 provides a way for the tracing parent to distinguish
index 46d4d876e6fb853e9cf051ed3f88edc852c8a57d..e82c642fa53cd49ac88afc412b4194319fc3303f 100644 (file)
@@ -37,7 +37,7 @@
  */
 struct ep93xx_dma_data {
        int                             port;
-       enum dma_data_direction         direction;
+       enum dma_transfer_direction     direction;
        const char                      *name;
 };
 
@@ -80,14 +80,14 @@ static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
  * channel supports given DMA direction. Only M2P channels have such
  * limitation, for M2M channels the direction is configurable.
  */
-static inline enum dma_data_direction
+static inline enum dma_transfer_direction
 ep93xx_dma_chan_direction(struct dma_chan *chan)
 {
        if (!ep93xx_dma_chan_is_m2p(chan))
                return DMA_NONE;
 
        /* even channels are for TX, odd for RX */
-       return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+       return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
 }
 
 #endif /* __ASM_ARCH_DMA_H */
index 3cdeb3647542592a4a62f98e0c6b647da358b0d4..5364d4bfa8bc79efe6d51b27ff361811d98b8227 100644 (file)
@@ -36,6 +36,8 @@ pen:  ldr     r7, [r6]
         * should now contain the SVC stack for this core
         */
        b       secondary_startup
+ENDPROC(exynos4_secondary_startup)
 
+       .align 2
 1:     .long   .
        .long   pen_release
index 2b11e046d3919dc393a2875b528abf34e3664d85..0679b8ad2d1e1a4263521f805d7afd246334a037 100644 (file)
@@ -597,7 +597,8 @@ static struct s3c_fb_pd_win origen_fb_win0 = {
 static struct s3c_fb_platdata origen_lcd_pdata __initdata = {
        .win[0]         = &origen_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
-       .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+       .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
+                               VIDCON1_INV_VCLK,
        .setup_gpio     = exynos4_fimd0_gpio_setup_24bpp,
 };
 
index 60bc45e3e7099045560f71bf4aaf29664341e488..683aec786b78975bd8a791923134b1e0958de215 100644 (file)
@@ -24,7 +24,6 @@
 #include <asm/cacheflush.h>
 #include <asm/hardware/gic.h>
 #include <asm/smp_scu.h>
-#include <asm/unified.h>
 
 #include <mach/hardware.h>
 #include <mach/regs-clock.h>
@@ -137,7 +136,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
        while (time_before(jiffies, timeout)) {
                smp_rmb();
 
-               __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)),
+               __raw_writel(virt_to_phys(exynos4_secondary_startup),
                        CPU1_BOOT_REG);
                gic_raise_softirq(cpumask_of(cpu), 1);
 
@@ -192,6 +191,6 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
         * until it receives a soft interrupt, and then the
         * secondary CPU branches to this address.
         */
-       __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)),
+       __raw_writel(virt_to_phys(exynos4_secondary_startup),
                        CPU1_BOOT_REG);
 }
index 804c4a55f8038c75cbf0731168ee6d0d36de004a..7afbe1e55bebe710f926b01dbb84846de4ca0e9d 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/smp.h>
 
 #include <asm/cacheflush.h>
-#include <asm/unified.h>
 #include <asm/smp_scu.h>
 #include <asm/hardware/arm_timer.h>
 #include <asm/hardware/timer-sp.h>
@@ -76,7 +75,7 @@ void highbank_set_cpu_jump(int cpu, void *jump_addr)
 #ifdef CONFIG_SMP
        cpu = cpu_logical_map(cpu);
 #endif
-       writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu));
+       writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu));
        __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
        outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
                          HB_JUMP_TABLE_PHYS(cpu) + 15);
index 4bde04f99e38ceda85fd4cb43f89bf2eb080ead7..29bd1243781ede5156ca622de5fccbba82160947 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/smp.h>
-#include <asm/unified.h>
 
 #define SRC_SCR                                0x000
 #define SRC_GPR1                       0x020
@@ -43,7 +42,7 @@ void imx_enable_cpu(int cpu, bool enable)
 void imx_set_cpu_jump(int cpu, void *jump_addr)
 {
        cpu = cpu_logical_map(cpu);
-       writel_relaxed(BSYM(virt_to_phys(jump_addr)),
+       writel_relaxed(virt_to_phys(jump_addr),
                       src_base + SRC_GPR1 + cpu * 8);
 }
 
index 0c631a9f8647f5b8ec35dd5998da65bc937860d0..bcd5af223deabaf48451bf3a4d67cea23c82566e 100644 (file)
@@ -34,6 +34,7 @@ pen:  ldr     r7, [r6]
         * should now contain the SVC stack for this core
         */
        b       secondary_startup
+ENDPROC(msm_secondary_startup)
 
        .align
 1:     .long   .
index a9103bc6615f01ac526d8b085939fb4f20433ffb..bd66ed04d6dc0edfdc489a56a7f1e9fb84dbafc1 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/debugfs.h>
+#include <linux/module.h>
 #include <linux/string.h>
 #include <mach/vreg.h>
 
index 6c89cf8ab22eece43dd765896bf0e53824f8753e..2ecba6743b8e66b050f7bb79665878a34878484b 100644 (file)
@@ -67,7 +67,7 @@ static void picoxcell_add_clocksource(struct device_node *source_timer)
 
 static void __iomem *sched_io_base;
 
-unsigned u32 notrace picoxcell_read_sched_clock(void)
+static u32 picoxcell_read_sched_clock(void)
 {
        return __raw_readl(sched_io_base);
 }
index e83c654a58d0f4540445840e1b7f1af377c12bff..17c878ddbc70d1da5a63b31a6d1bba663841c704 100644 (file)
@@ -17,7 +17,6 @@
 #include <asm/hardware/gic.h>
 #include <asm/mach-types.h>
 #include <asm/smp_scu.h>
-#include <asm/unified.h>
 
 #include <mach/board-eb.h>
 #include <mach/board-pb11mp.h>
@@ -75,6 +74,6 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
         * until it receives a soft interrupt, and then the
         * secondary CPU branches to this address.
         */
-       __raw_writel(BSYM(virt_to_phys(versatile_secondary_startup)),
+       __raw_writel(virt_to_phys(versatile_secondary_startup),
                     __io_address(REALVIEW_SYS_FLAGSSET));
 }
index 5d55ab018b6b3f74b1946a67d0efd84a534e3caf..4cb2f951f1e9155d9b4434fac10ff4ea330e9039 100644 (file)
@@ -21,5 +21,6 @@
 #define CODEC_GPIO_BASE                        (GPIO_BOARD_START + 8)
 #define GLENFARCLAS_PMIC_GPIO_BASE     (GPIO_BOARD_START + 32)
 #define BANFF_PMIC_GPIO_BASE           (GPIO_BOARD_START + 64)
+#define MMGPIO_GPIO_BASE               (GPIO_BOARD_START + 96)
 
 #endif
index 1cc91d794c973e538e52a9678bff72fa4e1b8d0e..8077f650eb0e6c291ac607aff70a02b35dfa62b8 100644 (file)
@@ -260,6 +260,7 @@ static struct platform_device crag6410_dm9k_device = {
 
 static struct resource crag6410_mmgpio_resource[] = {
        [0] = {
+               .name   = "dat",
                .start  = S3C64XX_PA_XM0CSN4 + 1,
                .end    = S3C64XX_PA_XM0CSN4 + 1,
                .flags  = IORESOURCE_MEM,
@@ -272,7 +273,7 @@ static struct platform_device crag6410_mmgpio = {
        .resource       = crag6410_mmgpio_resource,
        .num_resources  = ARRAY_SIZE(crag6410_mmgpio_resource),
        .dev.platform_data = &(struct bgpio_pdata) {
-               .base   = -1,
+               .base   = MMGPIO_GPIO_BASE,
        },
 };
 
@@ -328,7 +329,6 @@ static struct platform_device wallvdd_device = {
 
 static struct platform_device *crag6410_devices[] __initdata = {
        &s3c_device_hsmmc0,
-       &s3c_device_hsmmc1,
        &s3c_device_hsmmc2,
        &s3c_device_i2c0,
        &s3c_device_i2c1,
@@ -355,7 +355,7 @@ static struct platform_device *crag6410_devices[] __initdata = {
 
 static struct pca953x_platform_data crag6410_pca_data = {
        .gpio_base      = PCA935X_GPIO_BASE,
-       .irq_base       = 0,
+       .irq_base       = -1,
 };
 
 /* VDDARM is controlled by DVS1 connected to GPK(0) */
@@ -683,12 +683,6 @@ static struct s3c_sdhci_platdata crag6410_hsmmc2_pdata = {
        .cd_type                = S3C_SDHCI_CD_PERMANENT,
 };
 
-static struct s3c_sdhci_platdata crag6410_hsmmc1_pdata = {
-       .max_width              = 4,
-       .cd_type                = S3C_SDHCI_CD_GPIO,
-       .ext_cd_gpio            = S3C64XX_GPF(11),
-};
-
 static void crag6410_cfg_sdhci0(struct platform_device *dev, int width)
 {
        /* Set all the necessary GPG pins to special-function 2 */
@@ -723,7 +717,6 @@ static void __init crag6410_machine_init(void)
        gpio_direction_output(S3C64XX_GPF(10), 1);
 
        s3c_sdhci0_set_platdata(&crag6410_hsmmc0_pdata);
-       s3c_sdhci1_set_platdata(&crag6410_hsmmc1_pdata);
        s3c_sdhci2_set_platdata(&crag6410_hsmmc2_pdata);
 
        s3c_i2c0_set_platdata(&i2c0_pdata);
index 055dac90e0e247c95258ecd412e47bb2297de1cc..7d3e81b9dd06229034603aa260064384d8d44a5d 100644 (file)
@@ -346,23 +346,10 @@ int __init s3c64xx_pm_init(void)
 
 static __init int s3c64xx_pm_initcall(void)
 {
-       u32 val;
-
        pm_cpu_prep = s3c64xx_pm_prepare;
        pm_cpu_sleep = s3c64xx_cpu_suspend;
        pm_uart_udivslot = 1;
 
-       /*
-        * Unconditionally disable power domains that contain only
-        * blocks which have no mainline driver support.
-        */
-       val = __raw_readl(S3C64XX_NORMAL_CFG);
-       val &= ~(S3C64XX_NORMALCFG_DOMAIN_G_ON |
-                S3C64XX_NORMALCFG_DOMAIN_V_ON |
-                S3C64XX_NORMALCFG_DOMAIN_I_ON |
-                S3C64XX_NORMALCFG_DOMAIN_P_ON);
-       __raw_writel(val, S3C64XX_NORMAL_CFG);
-
 #ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
        gpio_request(S3C64XX_GPN(12), "DEBUG_LED0");
        gpio_request(S3C64XX_GPN(13), "DEBUG_LED1");
index 1ea89be63e29e1d13e7783d0d7f2ff31f64057a9..6fcf304d3cdf53869f4d7b64d701e4d5e92345ce 100644 (file)
@@ -445,31 +445,39 @@ static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = {
        },
 };
 
+#define SH7372_CHCLR 0x220
+
 static const struct sh_dmae_channel sh7372_dmae_channels[] = {
        {
                .offset = 0,
                .dmars = 0,
                .dmars_bit = 0,
+               .chclr_offset = SH7372_CHCLR + 0,
        }, {
                .offset = 0x10,
                .dmars = 0,
                .dmars_bit = 8,
+               .chclr_offset = SH7372_CHCLR + 0x10,
        }, {
                .offset = 0x20,
                .dmars = 4,
                .dmars_bit = 0,
+               .chclr_offset = SH7372_CHCLR + 0x20,
        }, {
                .offset = 0x30,
                .dmars = 4,
                .dmars_bit = 8,
+               .chclr_offset = SH7372_CHCLR + 0x30,
        }, {
                .offset = 0x50,
                .dmars = 8,
                .dmars_bit = 0,
+               .chclr_offset = SH7372_CHCLR + 0x50,
        }, {
                .offset = 0x60,
                .dmars = 8,
                .dmars_bit = 8,
+               .chclr_offset = SH7372_CHCLR + 0x60,
        }
 };
 
@@ -487,6 +495,7 @@ static struct sh_dmae_pdata dma_platform_data = {
        .ts_shift       = ts_shift,
        .ts_shift_num   = ARRAY_SIZE(ts_shift),
        .dmaor_init     = DMAOR_DME,
+       .chclr_present  = 1,
 };
 
 /* Resource order important! */
@@ -494,7 +503,7 @@ static struct resource sh7372_dmae0_resources[] = {
        {
                /* Channel registers and DMAOR */
                .start  = 0xfe008020,
-               .end    = 0xfe00808f,
+               .end    = 0xfe00828f,
                .flags  = IORESOURCE_MEM,
        },
        {
@@ -522,7 +531,7 @@ static struct resource sh7372_dmae1_resources[] = {
        {
                /* Channel registers and DMAOR */
                .start  = 0xfe018020,
-               .end    = 0xfe01808f,
+               .end    = 0xfe01828f,
                .flags  = IORESOURCE_MEM,
        },
        {
@@ -550,7 +559,7 @@ static struct resource sh7372_dmae2_resources[] = {
        {
                /* Channel registers and DMAOR */
                .start  = 0xfe028020,
-               .end    = 0xfe02808f,
+               .end    = 0xfe02828f,
                .flags  = IORESOURCE_MEM,
        },
        {
index 64fa451edcfd486bdbaa0163f55e93399597d8ed..08da5589bcd8a60179cc458dba05f735ea6f919f 100644 (file)
@@ -32,6 +32,8 @@ pen:  ldr     r7, [r6]
         * should now contain the SVC stack for this core
         */
        b       secondary_startup
+ENDPROC(u8500_secondary_startup)
 
+       .align 2
 1:     .long   .
        .long   pen_release
index 2b5f7ac001a3326a160c346b9a3a0706d9e3c858..124ffb16909382f1383673783812fd5a1354f70c 100644 (file)
@@ -13,8 +13,6 @@
 #include <linux/smp.h>
 #include <linux/io.h>
 
-#include <asm/unified.h>
-
 #include <mach/motherboard.h>
 #define V2M_PA_CS7 0x10000000
 
@@ -46,6 +44,6 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
         * secondary CPU branches to this address.
         */
        writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR));
-       writel(BSYM(virt_to_phys(versatile_secondary_startup)),
+       writel(virt_to_phys(versatile_secondary_startup),
                MMIO_P2V(V2M_SYS_FLAGSSET));
 }
index ac24c5c4bc83c48cfe005fb2cea0e9bd7d2fc89e..fdbe60001542615595b5f178f6856d8d5c0455bb 100644 (file)
 #define FB_SYNC_SWAP_RGB       0x04000000
 #define FB_SYNC_CLK_SEL_EN     0x02000000
 
+/*
+ * Specify the way your display is connected. The IPU can arbitrarily
+ * map the internal colors to the external data lines. We only support
+ * the following mappings at the moment.
+ */
+enum disp_data_mapping {
+       /* blue -> d[0..5], green -> d[6..11], red -> d[12..17] */
+       IPU_DISP_DATA_MAPPING_RGB666,
+       /* blue -> d[0..4], green -> d[5..10], red -> d[11..15] */
+       IPU_DISP_DATA_MAPPING_RGB565,
+       /* blue -> d[0..7], green -> d[8..15], red -> d[16..23] */
+       IPU_DISP_DATA_MAPPING_RGB888,
+};
+
 /**
  * struct mx3fb_platform_data - mx3fb platform data
  *
@@ -33,6 +47,7 @@ struct mx3fb_platform_data {
        const char                      *name;
        const struct fb_videomode       *mode;
        int                             num_modes;
+       enum disp_data_mapping          disp_data_fmt;
 };
 
 #endif
index 685c78716d95234d1cb955b69d23a0bd929d107b..fd0ee84c45d1bc66b51e0a5527f75d9981040eeb 100644 (file)
@@ -113,7 +113,8 @@ struct stedma40_half_channel_info {
  * @dst_dev_type: Dst device type
  * @src_info: Parameters for dst half channel
  * @dst_info: Parameters for dst half channel
- *
+ * @use_fixed_channel: if true, use physical channel specified by phy_channel
+ * @phy_channel: physical channel to use, only if use_fixed_channel is true
  *
  * This structure has to be filled by the client drivers.
  * It is recommended to do all dma configurations for clients in the machine.
@@ -129,6 +130,9 @@ struct stedma40_chan_cfg {
        int                                      dst_dev_type;
        struct stedma40_half_channel_info        src_info;
        struct stedma40_half_channel_info        dst_info;
+
+       bool                                     use_fixed_channel;
+       int                                      phy_channel;
 };
 
 /**
@@ -153,6 +157,7 @@ struct stedma40_platform_data {
        struct stedma40_chan_cfg        *memcpy_conf_phy;
        struct stedma40_chan_cfg        *memcpy_conf_log;
        int                              disabled_channels[STEDMA40_MAX_PHYS];
+       bool                             use_esram_lcla;
 };
 
 #ifdef CONFIG_STE_DMA40
@@ -187,7 +192,7 @@ static inline struct
 dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
                                            dma_addr_t addr,
                                            unsigned int size,
-                                           enum dma_data_direction direction,
+                                           enum dma_transfer_direction direction,
                                            unsigned long flags)
 {
        struct scatterlist sg;
@@ -209,7 +214,7 @@ static inline struct
 dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
                                            dma_addr_t addr,
                                            unsigned int size,
-                                           enum dma_data_direction direction,
+                                           enum dma_transfer_direction direction,
                                            unsigned long flags)
 {
        return NULL;
index 2cded872f22b3debd634282a408d67169ceff981..0747c77a2fd53d0d2a66a1f1f2377b3d7e7722f6 100644 (file)
@@ -37,14 +37,14 @@ static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
                                (void *)dma_ch;
        chan = dma_request_channel(mask, pl330_filter, filter_param);
 
-       if (info->direction == DMA_FROM_DEVICE) {
+       if (info->direction == DMA_DEV_TO_MEM) {
                memset(&slave_config, 0, sizeof(struct dma_slave_config));
                slave_config.direction = info->direction;
                slave_config.src_addr = info->fifo;
                slave_config.src_addr_width = info->width;
                slave_config.src_maxburst = 1;
                dmaengine_slave_config(chan, &slave_config);
-       } else if (info->direction == DMA_TO_DEVICE) {
+       } else if (info->direction == DMA_MEM_TO_DEV) {
                memset(&slave_config, 0, sizeof(struct dma_slave_config));
                slave_config.direction = info->direction;
                slave_config.dst_addr = info->fifo;
index 22eafc310bd7858a8fb10b39087fc6064e78e04c..71a6827c7706b21e10200bd834b5dbaad1fa9e45 100644 (file)
 #define __SAMSUNG_DMA_OPS_H_ __FILE__
 
 #include <linux/dmaengine.h>
+#include <mach/dma.h>
 
 struct samsung_dma_prep_info {
        enum dma_transaction_type cap;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        dma_addr_t buf;
        unsigned long period;
        unsigned long len;
@@ -27,7 +28,7 @@ struct samsung_dma_prep_info {
 
 struct samsung_dma_info {
        enum dma_transaction_type cap;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        enum dma_slave_buswidth width;
        dma_addr_t fifo;
        struct s3c2410_dma_client *client;
index b9061128abdef3bcf76a2e9cdd4bab76732b4fd5..7b02143ccd9a2236b1ba0f32cc47dfb54d8b3d54 100644 (file)
@@ -10,6 +10,9 @@
  * published by the Free Software Foundation.
 */
 
+#ifndef __PLAT_DMA_H
+#define __PLAT_DMA_H
+
 #include <linux/dma-mapping.h>
 
 enum s3c2410_dma_buffresult {
@@ -122,5 +125,6 @@ extern int s3c2410_dma_getposition(enum dma_ch channel,
 extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
 extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
 
-
 #include <plat/dma-ops.h>
+
+#endif
index aea68b60ef98af7bac8c5841536c8150d803888f..fa95e9a009729f41ddd1c39174e4f6908f515226 100644 (file)
@@ -11,6 +11,8 @@
 #ifndef __S3C64XX_PLAT_SPI_H
 #define __S3C64XX_PLAT_SPI_H
 
+struct platform_device;
+
 /**
  * struct s3c64xx_spi_csinfo - ChipSelect description
  * @fb_delay: Slave specific feedback delay.
index d397a1fb2f5414ac7337d9aca3e8d328bf39f4c8..dd703ef09b8d9d623d83e4a131c12e87e329e838 100644 (file)
@@ -38,3 +38,4 @@ pen:  ldr     r7, [r6]
        .align
 1:     .long   .
        .long   pen_release
+ENDPROC(versatile_secondary_startup)
index f5cb27614e35df7c4027094e7ac1d6626091021c..68c98f5b3ca625aaca9f342addd31c68c48c27af 100644 (file)
@@ -246,7 +246,18 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
        return regs->ar_bspstore;
 }
 
-#define regs_return_value(regs) ((regs)->r8)
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+       return regs->r10 != -1;
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       if (is_syscall_success(regs))
+               return regs->r8;
+       else
+               return -regs->r8;
+}
 
 /* Conserve space in histogram by encoding slot bits in address
  * bits 2 and 3 rather than bits 0 and 1.
index 8848f43d819e55ba91bf07fc6ae8756f88e7ad36..dad91661ddf96e8b54aa4fbeafb9190879a65ccf 100644 (file)
@@ -1246,15 +1246,8 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
        if (test_thread_flag(TIF_RESTORE_RSE))
                ia64_sync_krbs();
 
-       if (unlikely(current->audit_context)) {
-               long syscall;
-               int arch;
 
-               syscall = regs.r15;
-               arch = AUDIT_ARCH_IA64;
-
-               audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
-       }
+       audit_syscall_entry(AUDIT_ARCH_IA64, regs.r15, arg0, arg1, arg2, arg3);
 
        return 0;
 }
@@ -1268,14 +1261,7 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
 {
        int step;
 
-       if (unlikely(current->audit_context)) {
-               int success = AUDITSC_RESULT(regs.r10);
-               long result = regs.r8;
-
-               if (success != AUDITSC_SUCCESS)
-                       result = -result;
-               audit_syscall_exit(success, result);
-       }
+       audit_syscall_exit(&regs);
 
        step = test_thread_flag(TIF_SINGLESTEP);
        if (step || test_thread_flag(TIF_SYSCALL_TRACE))
index 816bee64b1961d202f35a1aa9c4d2be0fc1d640e..94e92c8058592f786cfd6fac942c90b308664e85 100644 (file)
@@ -61,6 +61,11 @@ struct pt_regs {
 #define instruction_pointer(regs)      ((regs)->pc)
 #define profile_pc(regs)               instruction_pointer(regs)
 
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->r3;
+}
+
 #else /* __KERNEL__ */
 
 /* pt_regs offsets used by gdbserver etc in ptrace syscalls */
index 043cb58f9c443e72843fa44a9ebd399442a965b5..6eb2aa927d8966b842f388b219ae19b8dadd7309 100644 (file)
@@ -147,10 +147,8 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
                 */
                ret = -1L;
 
-       if (unlikely(current->audit_context))
-               audit_syscall_entry(EM_MICROBLAZE, regs->r12,
-                                   regs->r5, regs->r6,
-                                   regs->r7, regs->r8);
+       audit_syscall_entry(EM_MICROBLAZE, regs->r12, regs->r5, regs->r6,
+                           regs->r7, regs->r8);
 
        return ret ?: regs->r12;
 }
@@ -159,8 +157,7 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
 {
        int step;
 
-       if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(regs->r3), regs->r3);
+       audit_syscall_exit(regs);
 
        step = test_thread_flag(TIF_SINGLESTEP);
        if (step || test_thread_flag(TIF_SYSCALL_TRACE))
index 604cd9dd133362712e5220510be83be134d96ec1..d4fc1a9717796610623cefa50c0ffd706225087e 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/cache.h>
 #include <linux/of_platform.h>
 #include <linux/dma-mapping.h>
+#include <linux/cpu.h>
 #include <asm/cacheflush.h>
 #include <asm/entry.h>
 #include <asm/cpuinfo.h>
@@ -226,5 +227,23 @@ static int __init setup_bus_notifier(void)
 
        return 0;
 }
-
 arch_initcall(setup_bus_notifier);
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+       int i, ret;
+
+       for_each_present_cpu(i) {
+               struct cpu *c = &per_cpu(cpu_devices, i);
+
+               ret = register_cpu(c, i);
+               if (ret)
+                       printk(KERN_WARNING "topology_init: register_cpu %d "
+                                               "failed (%d)\n", i, ret);
+       }
+
+       return 0;
+}
+subsys_initcall(topology_init);
index 7b99c670e478ed9ab79998762c839489cefb66a1..4b7f5252d2fd31ba156723c23db11e57e983442b 100644 (file)
@@ -137,7 +137,19 @@ extern int ptrace_set_watch_regs(struct task_struct *child,
  */
 #define user_mode(regs) (((regs)->cp0_status & KU_MASK) == KU_USER)
 
-#define regs_return_value(_regs) ((_regs)->regs[2])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+       return !regs->regs[7];
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       if (is_syscall_success(regs))
+               return regs->regs[2];
+       else
+               return -regs->regs[2];
+}
+
 #define instruction_pointer(regs) ((regs)->cp0_epc)
 #define profile_pc(regs) instruction_pointer(regs)
 
index 4e6ea1ffad46617b3e1c0880c4061371f7aa5885..7786b608d9322289ce23a83eb5205b50b57f3cb6 100644 (file)
@@ -560,10 +560,9 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
        }
 
 out:
-       if (unlikely(current->audit_context))
-               audit_syscall_entry(audit_arch(), regs->regs[2],
-                                   regs->regs[4], regs->regs[5],
-                                   regs->regs[6], regs->regs[7]);
+       audit_syscall_entry(audit_arch(), regs->regs[2],
+                           regs->regs[4], regs->regs[5],
+                           regs->regs[6], regs->regs[7]);
 }
 
 /*
@@ -572,9 +571,7 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
  */
 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
 {
-       if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]),
-                                  -regs->regs[2]);
+       audit_syscall_exit(regs);
 
        if (!(current->ptrace & PT_PTRACED))
                return;
index 48223f9b8728d5e1433c6d13ab9c1c583780d2cb..78a205162fd7d48ad0b1a3de78bd42453dae5c0a 100644 (file)
@@ -86,7 +86,18 @@ struct pt_regs {
 #define instruction_pointer(regs) ((regs)->nip)
 #define user_stack_pointer(regs) ((regs)->gpr[1])
 #define kernel_stack_pointer(regs) ((regs)->gpr[1])
-#define regs_return_value(regs) ((regs)->gpr[3])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+       return !(regs->ccr & 0x10000000);
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       if (is_syscall_success(regs))
+               return regs->gpr[3];
+       else
+               return -regs->gpr[3];
+}
 
 #ifdef CONFIG_SMP
 extern unsigned long profile_pc(struct pt_regs *regs);
index 5de73dbd15c7e404a16257ae5239e9b1af293f65..5b43325402bcc8e5435c1e2804685e81cb24df72 100644 (file)
@@ -1724,22 +1724,20 @@ long do_syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->gpr[0]);
 
-       if (unlikely(current->audit_context)) {
 #ifdef CONFIG_PPC64
-               if (!is_32bit_task())
-                       audit_syscall_entry(AUDIT_ARCH_PPC64,
-                                           regs->gpr[0],
-                                           regs->gpr[3], regs->gpr[4],
-                                           regs->gpr[5], regs->gpr[6]);
-               else
+       if (!is_32bit_task())
+               audit_syscall_entry(AUDIT_ARCH_PPC64,
+                                   regs->gpr[0],
+                                   regs->gpr[3], regs->gpr[4],
+                                   regs->gpr[5], regs->gpr[6]);
+       else
 #endif
-                       audit_syscall_entry(AUDIT_ARCH_PPC,
-                                           regs->gpr[0],
-                                           regs->gpr[3] & 0xffffffff,
-                                           regs->gpr[4] & 0xffffffff,
-                                           regs->gpr[5] & 0xffffffff,
-                                           regs->gpr[6] & 0xffffffff);
-       }
+               audit_syscall_entry(AUDIT_ARCH_PPC,
+                                   regs->gpr[0],
+                                   regs->gpr[3] & 0xffffffff,
+                                   regs->gpr[4] & 0xffffffff,
+                                   regs->gpr[5] & 0xffffffff,
+                                   regs->gpr[6] & 0xffffffff);
 
        return ret ?: regs->gpr[0];
 }
@@ -1748,9 +1746,7 @@ void do_syscall_trace_leave(struct pt_regs *regs)
 {
        int step;
 
-       if (unlikely(current->audit_context))
-               audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
-                                  regs->result);
+       audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_exit(regs, regs->result);
index 56da355678f4aa7feed766a016b84282381451ba..aeb77f01798504cd250aebbd8748516b8c64e113 100644 (file)
@@ -541,9 +541,13 @@ struct user_regs_struct
 #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
 #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
 #define user_stack_pointer(regs)((regs)->gprs[15])
-#define regs_return_value(regs)((regs)->gprs[2])
 #define profile_pc(regs) instruction_pointer(regs)
 
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->gprs[2];
+}
+
 int regs_query_register_offset(const char *name);
 const char *regs_query_register_name(unsigned int offset);
 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
index 573bc29551ef471fee58b89d02df0ea0ff956503..9d82ed4bcb273a91bd6c4875d204218da2d420a2 100644 (file)
@@ -740,20 +740,17 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->gprs[2]);
 
-       if (unlikely(current->audit_context))
-               audit_syscall_entry(is_compat_task() ?
-                                       AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
-                                   regs->gprs[2], regs->orig_gpr2,
-                                   regs->gprs[3], regs->gprs[4],
-                                   regs->gprs[5]);
+       audit_syscall_entry(is_compat_task() ?
+                               AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
+                           regs->gprs[2], regs->orig_gpr2,
+                           regs->gprs[3], regs->gprs[4],
+                           regs->gprs[5]);
        return ret ?: regs->gprs[2];
 }
 
 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
 {
-       if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
-                                  regs->gprs[2]);
+       audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_exit(regs, regs->gprs[2]);
index 6c2239cca1a2d86204a52ecef53ab5514f8a6073..2d3e906aa72252d3b827a12405546465127d5f78 100644 (file)
@@ -76,7 +76,10 @@ struct pt_dspregs {
 #ifdef __KERNEL__
 
 #define MAX_REG_OFFSET         offsetof(struct pt_regs, tra)
-#define regs_return_value(_regs)       ((_regs)->regs[0])
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->regs[0];
+}
 
 #endif /* __KERNEL__ */
 
index bf9be7764d69f250978300cfb5c3e556e0d71e76..eb3fcceaf64b7bb5942a081637e78c5d479bad86 100644 (file)
@@ -13,7 +13,10 @@ struct pt_regs {
 #ifdef __KERNEL__
 
 #define MAX_REG_OFFSET         offsetof(struct pt_regs, tregs[7])
-#define regs_return_value(_regs)       ((_regs)->regs[3])
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->regs[3];
+}
 
 #endif /* __KERNEL__ */
 
index 92b3c276339a3a50d95a027c1d2a2e902a507ee3..a3e651563763aaabf76818e68729a53bc98aeaf7 100644 (file)
@@ -518,10 +518,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->regs[0]);
 
-       if (unlikely(current->audit_context))
-               audit_syscall_entry(audit_arch(), regs->regs[3],
-                                   regs->regs[4], regs->regs[5],
-                                   regs->regs[6], regs->regs[7]);
+       audit_syscall_entry(audit_arch(), regs->regs[3],
+                           regs->regs[4], regs->regs[5],
+                           regs->regs[6], regs->regs[7]);
 
        return ret ?: regs->regs[0];
 }
@@ -530,9 +529,7 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
 {
        int step;
 
-       if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
-                                  regs->regs[0]);
+       audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_exit(regs, regs->regs[0]);
index c8f97649f354b5f4b80366b79a95c9a9a0f4d4bb..3d0080b5c976bb9b19fbbc8ac5116239c7d841e3 100644 (file)
@@ -536,10 +536,9 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->regs[9]);
 
-       if (unlikely(current->audit_context))
-               audit_syscall_entry(audit_arch(), regs->regs[1],
-                                   regs->regs[2], regs->regs[3],
-                                   regs->regs[4], regs->regs[5]);
+       audit_syscall_entry(audit_arch(), regs->regs[1],
+                           regs->regs[2], regs->regs[3],
+                           regs->regs[4], regs->regs[5]);
 
        return ret ?: regs->regs[9];
 }
@@ -548,9 +547,7 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
 {
        int step;
 
-       if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
-                                  regs->regs[9]);
+       audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_exit(regs, regs->regs[9]);
index a0e1bcf843a1ed5f9931a89c6cc63d4cd636d4ae..c00c3b5c2806edfac2b2b7f44f79045ae1bfec01 100644 (file)
@@ -207,7 +207,15 @@ do {       current_thread_info()->syscall_noerror = 1; \
 #define instruction_pointer(regs) ((regs)->tpc)
 #define instruction_pointer_set(regs, val) ((regs)->tpc = (val))
 #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP])
-#define regs_return_value(regs) ((regs)->u_regs[UREG_I0])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+       return !(regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY));
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->u_regs[UREG_I0];
+}
 #ifdef CONFIG_SMP
 extern unsigned long profile_pc(struct pt_regs *);
 #else
index 96ee50a806613b782ce11e8627f1911ede3fde3f..9388844cd88c5afa47f3a24027019a96d11895ba 100644 (file)
@@ -1071,32 +1071,22 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->u_regs[UREG_G1]);
 
-       if (unlikely(current->audit_context) && !ret)
-               audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
-                                    AUDIT_ARCH_SPARC :
-                                    AUDIT_ARCH_SPARC64),
-                                   regs->u_regs[UREG_G1],
-                                   regs->u_regs[UREG_I0],
-                                   regs->u_regs[UREG_I1],
-                                   regs->u_regs[UREG_I2],
-                                   regs->u_regs[UREG_I3]);
+       audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
+                            AUDIT_ARCH_SPARC :
+                            AUDIT_ARCH_SPARC64),
+                           regs->u_regs[UREG_G1],
+                           regs->u_regs[UREG_I0],
+                           regs->u_regs[UREG_I1],
+                           regs->u_regs[UREG_I2],
+                           regs->u_regs[UREG_I3]);
 
        return ret;
 }
 
 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
 {
-#ifdef CONFIG_AUDITSYSCALL
-       if (unlikely(current->audit_context)) {
-               unsigned long tstate = regs->tstate;
-               int result = AUDITSC_SUCCESS;
+       audit_syscall_exit(regs);
 
-               if (unlikely(tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
-                       result = AUDITSC_FAILURE;
-
-               audit_syscall_exit(result, regs->u_regs[UREG_I0]);
-       }
-#endif
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_exit(regs, regs->u_regs[UREG_G1]);
 
index c9da32b0c707015c19db39b49785cd8b0610a35a..06b190390505f848a095648d2c555bd9d996604d 100644 (file)
@@ -167,17 +167,15 @@ void syscall_trace(struct uml_pt_regs *regs, int entryexit)
        int is_singlestep = (current->ptrace & PT_DTRACE) && entryexit;
        int tracesysgood;
 
-       if (unlikely(current->audit_context)) {
-               if (!entryexit)
-                       audit_syscall_entry(HOST_AUDIT_ARCH,
-                                           UPT_SYSCALL_NR(regs),
-                                           UPT_SYSCALL_ARG1(regs),
-                                           UPT_SYSCALL_ARG2(regs),
-                                           UPT_SYSCALL_ARG3(regs),
-                                           UPT_SYSCALL_ARG4(regs));
-               else audit_syscall_exit(AUDITSC_RESULT(UPT_SYSCALL_RET(regs)),
-                                       UPT_SYSCALL_RET(regs));
-       }
+       if (!entryexit)
+               audit_syscall_entry(HOST_AUDIT_ARCH,
+                                   UPT_SYSCALL_NR(regs),
+                                   UPT_SYSCALL_ARG1(regs),
+                                   UPT_SYSCALL_ARG2(regs),
+                                   UPT_SYSCALL_ARG3(regs),
+                                   UPT_SYSCALL_ARG4(regs));
+       else
+               audit_syscall_exit(regs);
 
        /* Fake a debug trap */
        if (is_singlestep)
index 1106261856c8022610bface1d874c38f24b9bf63..e3e734005e19c1849dfb6f6be7a5b26f2fc9b374 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/segment.h>
 #include <asm/irqflags.h>
 #include <linux/linkage.h>
+#include <linux/err.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 #include <linux/elf-em.h>
@@ -189,7 +190,7 @@ sysexit_from_sys_call:
        movl %ebx,%edx                  /* 3rd arg: 1st syscall arg */
        movl %eax,%esi                  /* 2nd arg: syscall number */
        movl $AUDIT_ARCH_I386,%edi      /* 1st arg: audit arch */
-       call audit_syscall_entry
+       call __audit_syscall_entry
        movl RAX-ARGOFFSET(%rsp),%eax   /* reload syscall number */
        cmpq $(IA32_NR_syscalls-1),%rax
        ja ia32_badsys
@@ -206,12 +207,13 @@ sysexit_from_sys_call:
        TRACE_IRQS_ON
        sti
        movl %eax,%esi          /* second arg, syscall return value */
-       cmpl $0,%eax            /* is it < 0? */
-       setl %al                /* 1 if so, 0 if not */
+       cmpl $-MAX_ERRNO,%eax   /* is it an error ? */
+       jbe 1f
+       movslq %eax, %rsi       /* if error sign extend to 64 bits */
+1:     setbe %al               /* 1 if error, 0 if not */
        movzbl %al,%edi         /* zero-extend that into %edi */
-       inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
-       call audit_syscall_exit
-       movl RAX-ARGOFFSET(%rsp),%eax   /* reload syscall return value */
+       call __audit_syscall_exit
+       movq RAX-ARGOFFSET(%rsp),%rax   /* reload syscall return value */
        movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
        cli
        TRACE_IRQS_OFF
index 4af9fd2450a5e7f319f6077c75cb8a3bfca89fae..79d97e68f04238ac13993e89c016e67a7b647af6 100644 (file)
@@ -42,6 +42,7 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/err.h>
 #include <asm/thread_info.h>
 #include <asm/irqflags.h>
 #include <asm/errno.h>
@@ -453,7 +454,7 @@ sysenter_audit:
        movl %ebx,%ecx                  /* 3rd arg: 1st syscall arg */
        movl %eax,%edx                  /* 2nd arg: syscall number */
        movl $AUDIT_ARCH_I386,%eax      /* 1st arg: audit arch */
-       call audit_syscall_entry
+       call __audit_syscall_entry
        pushl_cfi %ebx
        movl PT_EAX(%esp),%eax          /* reload syscall number */
        jmp sysenter_do_call
@@ -464,11 +465,10 @@ sysexit_audit:
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_ANY)
        movl %eax,%edx          /* second arg, syscall return value */
-       cmpl $0,%eax            /* is it < 0? */
-       setl %al                /* 1 if so, 0 if not */
+       cmpl $-MAX_ERRNO,%eax   /* is it an error ? */
+       setbe %al               /* 1 if so, 0 if not */
        movzbl %al,%eax         /* zero-extend that */
-       inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
-       call audit_syscall_exit
+       call __audit_syscall_exit
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
        movl TI_flags(%ebp), %ecx
index 940ba711fc286510ddc20c441fd882fe7a632b62..3fe8239fd8fbd8ef692f57517d0d1dbd37f284ae 100644 (file)
@@ -55,6 +55,7 @@
 #include <asm/paravirt.h>
 #include <asm/ftrace.h>
 #include <asm/percpu.h>
+#include <linux/err.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 #include <linux/elf-em.h>
@@ -548,7 +549,7 @@ badsys:
 #ifdef CONFIG_AUDITSYSCALL
        /*
         * Fast path for syscall audit without full syscall trace.
-        * We just call audit_syscall_entry() directly, and then
+        * We just call __audit_syscall_entry() directly, and then
         * jump back to the normal fast path.
         */
 auditsys:
@@ -558,22 +559,21 @@ auditsys:
        movq %rdi,%rdx                  /* 3rd arg: 1st syscall arg */
        movq %rax,%rsi                  /* 2nd arg: syscall number */
        movl $AUDIT_ARCH_X86_64,%edi    /* 1st arg: audit arch */
-       call audit_syscall_entry
+       call __audit_syscall_entry
        LOAD_ARGS 0             /* reload call-clobbered registers */
        jmp system_call_fastpath
 
        /*
-        * Return fast path for syscall audit.  Call audit_syscall_exit()
+        * Return fast path for syscall audit.  Call __audit_syscall_exit()
         * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
         * masked off.
         */
 sysret_audit:
        movq RAX-ARGOFFSET(%rsp),%rsi   /* second arg, syscall return value */
-       cmpq $0,%rsi            /* is it < 0? */
-       setl %al                /* 1 if so, 0 if not */
+       cmpq $-MAX_ERRNO,%rsi   /* is it < -MAX_ERRNO? */
+       setbe %al               /* 1 if so, 0 if not */
        movzbl %al,%edi         /* zero-extend that into %edi */
-       inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
-       call audit_syscall_exit
+       call __audit_syscall_exit
        movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
        jmp sysret_check
 #endif /* CONFIG_AUDITSYSCALL */
index 89a04c7b5bb6f600ff764bf3c91900880e47db4e..50267386b7668d7761f14b2e98a6a032815a792f 100644 (file)
@@ -1392,20 +1392,18 @@ long syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->orig_ax);
 
-       if (unlikely(current->audit_context)) {
-               if (IS_IA32)
-                       audit_syscall_entry(AUDIT_ARCH_I386,
-                                           regs->orig_ax,
-                                           regs->bx, regs->cx,
-                                           regs->dx, regs->si);
+       if (IS_IA32)
+               audit_syscall_entry(AUDIT_ARCH_I386,
+                                   regs->orig_ax,
+                                   regs->bx, regs->cx,
+                                   regs->dx, regs->si);
 #ifdef CONFIG_X86_64
-               else
-                       audit_syscall_entry(AUDIT_ARCH_X86_64,
-                                           regs->orig_ax,
-                                           regs->di, regs->si,
-                                           regs->dx, regs->r10);
+       else
+               audit_syscall_entry(AUDIT_ARCH_X86_64,
+                                   regs->orig_ax,
+                                   regs->di, regs->si,
+                                   regs->dx, regs->r10);
 #endif
-       }
 
        return ret ?: regs->orig_ax;
 }
@@ -1414,8 +1412,7 @@ void syscall_trace_leave(struct pt_regs *regs)
 {
        bool step;
 
-       if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
+       audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_exit(regs, regs->ax);
index 863f8753ab0ae696f8981ef30d9ee031dd0e310b..b466cab5ba15d171cb4b1fd61a1590bd0cca9956 100644 (file)
@@ -335,9 +335,11 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
        if (info->flags & VM86_SCREEN_BITMAP)
                mark_screen_rdonly(tsk->mm);
 
-       /*call audit_syscall_exit since we do not exit via the normal paths */
+       /*call __audit_syscall_exit since we do not exit via the normal paths */
+#ifdef CONFIG_AUDITSYSCALL
        if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(0), 0);
+               __audit_syscall_exit(1, 0);
+#endif
 
        __asm__ __volatile__(
                "movl %0,%%esp\n\t"
index 711b1621747f2e45e8497a13c7c81b9bc260916f..5ef9344a8b241d9a69448602b48240d5165e9c1b 100644 (file)
@@ -3,3 +3,8 @@
 #else
 #include "ptrace_64.h"
 #endif
+
+static inline long regs_return_value(struct uml_pt_regs *regs)
+{
+       return UPT_SYSCALL_RET(regs);
+}
index a0d042aa296755e441fe4266a7103b315a581a34..2dff698ab02e5ce1642248afa0f582b8aa0f112f 100644 (file)
@@ -334,8 +334,7 @@ void do_syscall_trace_enter(struct pt_regs *regs)
                do_syscall_trace();
 
 #if 0
-       if (unlikely(current->audit_context))
-               audit_syscall_entry(current, AUDIT_ARCH_XTENSA..);
+       audit_syscall_entry(current, AUDIT_ARCH_XTENSA..);
 #endif
 }
 
index 163263ddd3814e3f087eb1a7e1461242fabbd702..ee55019066a19500c6df54addf90d91d2ace60fa 100644 (file)
@@ -3117,18 +3117,17 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
  */
 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
-       struct cfq_queue *old_cfqq = cfqd->active_queue;
-
        cfq_log_cfqq(cfqd, cfqq, "preempt");
-       cfq_slice_expired(cfqd, 1);
 
        /*
         * workload type is changed, don't save slice, otherwise preempt
         * doesn't happen
         */
-       if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
+       if (cfqq_type(cfqd->active_queue) != cfqq_type(cfqq))
                cfqq->cfqg->saved_workload_slice = 0;
 
+       cfq_slice_expired(cfqd, 1);
+
        /*
         * Put the new queue at the front of the of the current list,
         * so we know that it will be selected next.
index 69ac373c72abfc41e53304c0a8e2c7bb4fbf4e5c..fdf27b9fce43a882b706bf5148cd51a6a67f4f83 100644 (file)
@@ -1116,6 +1116,13 @@ static int piix_broken_suspend(void)
                                DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"),
                        },
                },
+               {
+                       .ident = "Satellite Pro A120",
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+                               DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"),
+                       },
+               },
                {
                        .ident = "Portege M500",
                        .matches = {
index 11c9aea4f4f7ea93a50fdc4b3547323d3728738d..c06e0ec11556d7696b620fa0b32e5af508863052 100644 (file)
@@ -4125,6 +4125,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
         * device and controller are SATA.
         */
        { "PIONEER DVD-RW  DVRTD08",    NULL,   ATA_HORKAGE_NOSETXFER },
+       { "PIONEER DVD-RW  DVRTD08A",   NULL,   ATA_HORKAGE_NOSETXFER },
+       { "PIONEER DVD-RW  DVR-215",    NULL,   ATA_HORKAGE_NOSETXFER },
        { "PIONEER DVD-RW  DVR-212D",   NULL,   ATA_HORKAGE_NOSETXFER },
        { "PIONEER DVD-RW  DVR-216D",   NULL,   ATA_HORKAGE_NOSETXFER },
 
index 9a7f0ea565df6c6066d7e64465c8385b81014a01..74aaee30e264ce1959c3807cb6a9e4c8bad27c95 100644 (file)
@@ -291,6 +291,7 @@ int ata_tport_add(struct device *parent,
                goto tport_err;
        }
 
+       device_enable_async_suspend(dev);
        pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
 
index d6a4677fdf711801e4d09e74d6cc122db9a50904..1e65842e2ca719a0916bc371c676e68f27c096e2 100644 (file)
@@ -251,6 +251,8 @@ static const u32 udma_tenvmin = 20;
 static const u32 udma_tackmin = 20;
 static const u32 udma_tssmin = 50;
 
+#define BFIN_MAX_SG_SEGMENTS 4
+
 /**
  *
  *     Function:       num_clocks_min
@@ -829,79 +831,61 @@ static void bfin_set_devctl(struct ata_port *ap, u8 ctl)
 
 static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
 {
-       unsigned short config = WDSIZE_16;
+       struct ata_port *ap = qc->ap;
+       struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd;
+       void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+       unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN;
        struct scatterlist *sg;
        unsigned int si;
+       unsigned int channel;
+       unsigned int dir;
+       unsigned int size = 0;
 
        dev_dbg(qc->ap->dev, "in atapi dma setup\n");
        /* Program the ATA_CTRL register with dir */
        if (qc->tf.flags & ATA_TFLAG_WRITE) {
-               /* fill the ATAPI DMA controller */
-               set_dma_config(CH_ATAPI_TX, config);
-               set_dma_x_modify(CH_ATAPI_TX, 2);
-               for_each_sg(qc->sg, sg, qc->n_elem, si) {
-                       set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
-                       set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
-               }
+               channel = CH_ATAPI_TX;
+               dir = DMA_TO_DEVICE;
        } else {
+               channel = CH_ATAPI_RX;
+               dir = DMA_FROM_DEVICE;
                config |= WNR;
-               /* fill the ATAPI DMA controller */
-               set_dma_config(CH_ATAPI_RX, config);
-               set_dma_x_modify(CH_ATAPI_RX, 2);
-               for_each_sg(qc->sg, sg, qc->n_elem, si) {
-                       set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
-                       set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
-               }
        }
-}
 
-/**
- *     bfin_bmdma_start - Start an IDE DMA transaction
- *     @qc: Info associated with this ATA transaction.
- *
- *     Note: Original code is ata_bmdma_start().
- */
+       dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir);
 
-static void bfin_bmdma_start(struct ata_queued_cmd *qc)
-{
-       struct ata_port *ap = qc->ap;
-       void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
-       struct scatterlist *sg;
-       unsigned int si;
+       /* fill the ATAPI DMA controller */
+       for_each_sg(qc->sg, sg, qc->n_elem, si) {
+               dma_desc_cpu[si].start_addr = sg_dma_address(sg);
+               dma_desc_cpu[si].cfg = config;
+               dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1;
+               dma_desc_cpu[si].x_modify = 2;
+               size += sg_dma_len(sg);
+       }
 
-       dev_dbg(qc->ap->dev, "in atapi dma start\n");
-       if (!(ap->udma_mask || ap->mwdma_mask))
-               return;
+       /* Set the last descriptor to stop mode */
+       dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE);
 
-       /* start ATAPI DMA controller*/
-       if (qc->tf.flags & ATA_TFLAG_WRITE) {
-               /*
-                * On blackfin arch, uncacheable memory is not
-                * allocated with flag GFP_DMA. DMA buffer from
-                * common kenel code should be flushed if WB
-                * data cache is enabled. Otherwise, this loop
-                * is an empty loop and optimized out.
-                */
-               for_each_sg(qc->sg, sg, qc->n_elem, si) {
-                       flush_dcache_range(sg_dma_address(sg),
-                               sg_dma_address(sg) + sg_dma_len(sg));
-               }
-               enable_dma(CH_ATAPI_TX);
-               dev_dbg(qc->ap->dev, "enable udma write\n");
+       flush_dcache_range((unsigned int)dma_desc_cpu,
+               (unsigned int)dma_desc_cpu +
+                       qc->n_elem * sizeof(struct dma_desc_array));
 
-               /* Send ATA DMA write command */
-               bfin_exec_command(ap, &qc->tf);
+       /* Enable ATA DMA operation*/
+       set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma);
+       set_dma_x_count(channel, 0);
+       set_dma_x_modify(channel, 0);
+       set_dma_config(channel, config);
+
+       SSYNC();
+
+       /* Send ATA DMA command */
+       bfin_exec_command(ap, &qc->tf);
 
+       if (qc->tf.flags & ATA_TFLAG_WRITE) {
                /* set ATA DMA write direction */
                ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
                        | XFER_DIR));
        } else {
-               enable_dma(CH_ATAPI_RX);
-               dev_dbg(qc->ap->dev, "enable udma read\n");
-
-               /* Send ATA DMA read command */
-               bfin_exec_command(ap, &qc->tf);
-
                /* set ATA DMA read direction */
                ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
                        & ~XFER_DIR));
@@ -913,12 +897,28 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
        /* Set ATAPI state machine contorl in terminate sequence */
        ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
 
-       /* Set transfer length to buffer len */
-       for_each_sg(qc->sg, sg, qc->n_elem, si) {
-               ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
-       }
+       /* Set transfer length to the total size of sg buffers */
+       ATAPI_SET_XFER_LEN(base, size >> 1);
+}
 
-       /* Enable ATA DMA operation*/
+/**
+ *     bfin_bmdma_start - Start an IDE DMA transaction
+ *     @qc: Info associated with this ATA transaction.
+ *
+ *     Note: Original code is ata_bmdma_start().
+ */
+
+static void bfin_bmdma_start(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+       void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+       dev_dbg(qc->ap->dev, "in atapi dma start\n");
+
+       if (!(ap->udma_mask || ap->mwdma_mask))
+               return;
+
+       /* start ATAPI transfer*/
        if (ap->udma_mask)
                ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
                        | ULTRA_START);
@@ -935,34 +935,23 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
 static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
-       struct scatterlist *sg;
-       unsigned int si;
+       unsigned int dir;
 
        dev_dbg(qc->ap->dev, "in atapi dma stop\n");
+
        if (!(ap->udma_mask || ap->mwdma_mask))
                return;
 
        /* stop ATAPI DMA controller*/
-       if (qc->tf.flags & ATA_TFLAG_WRITE)
+       if (qc->tf.flags & ATA_TFLAG_WRITE) {
+               dir = DMA_TO_DEVICE;
                disable_dma(CH_ATAPI_TX);
-       else {
+       } else {
+               dir = DMA_FROM_DEVICE;
                disable_dma(CH_ATAPI_RX);
-               if (ap->hsm_task_state & HSM_ST_LAST) {
-                       /*
-                        * On blackfin arch, uncacheable memory is not
-                        * allocated with flag GFP_DMA. DMA buffer from
-                        * common kenel code should be invalidated if
-                        * data cache is enabled. Otherwise, this loop
-                        * is an empty loop and optimized out.
-                        */
-                       for_each_sg(qc->sg, sg, qc->n_elem, si) {
-                               invalidate_dcache_range(
-                                       sg_dma_address(sg),
-                                       sg_dma_address(sg)
-                                       + sg_dma_len(sg));
-                       }
-               }
        }
+
+       dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir);
 }
 
 /**
@@ -1260,6 +1249,11 @@ static void bfin_port_stop(struct ata_port *ap)
 {
        dev_dbg(ap->dev, "in atapi port stop\n");
        if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
+               dma_free_coherent(ap->dev,
+                       BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+                       ap->bmdma_prd,
+                       ap->bmdma_prd_dma);
+
                free_dma(CH_ATAPI_RX);
                free_dma(CH_ATAPI_TX);
        }
@@ -1271,14 +1265,29 @@ static int bfin_port_start(struct ata_port *ap)
        if (!(ap->udma_mask || ap->mwdma_mask))
                return 0;
 
+       ap->bmdma_prd = dma_alloc_coherent(ap->dev,
+                               BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+                               &ap->bmdma_prd_dma,
+                               GFP_KERNEL);
+
+       if (ap->bmdma_prd == NULL) {
+               dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n");
+               goto out;
+       }
+
        if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
                if (request_dma(CH_ATAPI_TX,
                        "BFIN ATAPI TX DMA") >= 0)
                        return 0;
 
                free_dma(CH_ATAPI_RX);
+               dma_free_coherent(ap->dev,
+                       BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+                       ap->bmdma_prd,
+                       ap->bmdma_prd_dma);
        }
 
+out:
        ap->udma_mask = 0;
        ap->mwdma_mask = 0;
        dev_err(ap->dev, "Unable to request ATAPI DMA!"
@@ -1400,7 +1409,7 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
 
 static struct scsi_host_template bfin_sht = {
        ATA_BASE_SHT(DRV_NAME),
-       .sg_tablesize           = SG_NONE,
+       .sg_tablesize           = BFIN_MAX_SG_SEGMENTS,
        .dma_boundary           = ATA_DMA_BOUNDARY,
 };
 
index 5a2c95ba050a28caacbb4e3a799a0cdd176fcf47..0120b0d1e9a5aa838cee9a7347992377012d6c8c 100644 (file)
@@ -140,6 +140,7 @@ enum {
         */
        HCONTROL_ONLINE_PHY_RST = (1 << 31),
        HCONTROL_FORCE_OFFLINE = (1 << 30),
+       HCONTROL_LEGACY = (1 << 28),
        HCONTROL_PARITY_PROT_MOD = (1 << 14),
        HCONTROL_DPATH_PARITY = (1 << 12),
        HCONTROL_SNOOP_ENABLE = (1 << 10),
@@ -1223,6 +1224,10 @@ static int sata_fsl_init_controller(struct ata_host *host)
         * part of the port_start() callback
         */
 
+       /* sata controller to operate in enterprise mode */
+       temp = ioread32(hcr_base + HCONTROL);
+       iowrite32(temp & ~HCONTROL_LEGACY, hcr_base + HCONTROL);
+
        /* ack. any pending IRQs for this controller/port */
        temp = ioread32(hcr_base + HSTATUS);
        if (temp & 0x3F)
@@ -1421,6 +1426,12 @@ static int sata_fsl_resume(struct platform_device *op)
        /* Recovery the CHBA register in host controller cmd register set */
        iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
 
+       iowrite32((ioread32(hcr_base + HCONTROL)
+                               | HCONTROL_ONLINE_PHY_RST
+                               | HCONTROL_SNOOP_ENABLE
+                               | HCONTROL_PMP_ATTACHED),
+                       hcr_base + HCONTROL);
+
        ata_host_resume(host);
        return 0;
 }
index fda56bde36b836cc7fa4a8bee2c1bc92b0e5a43d..0def898a1d159c12d2543df6ffd6264932a7affc 100644 (file)
@@ -19,6 +19,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
                                   struct bcma_device *core_cc,
                                   struct bcma_device *core_mips);
 #ifdef CONFIG_PM
+int bcma_bus_suspend(struct bcma_bus *bus);
 int bcma_bus_resume(struct bcma_bus *bus);
 #endif
 
index 443b83a2fd7aa012c1c6835137e2642a79499b41..f59244e3397137ca9b6a6d311612dd53db721646 100644 (file)
@@ -235,38 +235,32 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
 }
 
 #ifdef CONFIG_PM
-static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
+static int bcma_host_pci_suspend(struct device *dev)
 {
-       /* Host specific */
-       pci_save_state(dev);
-       pci_disable_device(dev);
-       pci_set_power_state(dev, pci_choose_state(dev, state));
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct bcma_bus *bus = pci_get_drvdata(pdev);
 
-       return 0;
+       bus->mapped_core = NULL;
+
+       return bcma_bus_suspend(bus);
 }
 
-static int bcma_host_pci_resume(struct pci_dev *dev)
+static int bcma_host_pci_resume(struct device *dev)
 {
-       struct bcma_bus *bus = pci_get_drvdata(dev);
-       int err;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct bcma_bus *bus = pci_get_drvdata(pdev);
 
-       /* Host specific */
-       pci_set_power_state(dev, 0);
-       err = pci_enable_device(dev);
-       if (err)
-               return err;
-       pci_restore_state(dev);
+       return bcma_bus_resume(bus);
+}
 
-       /* Bus specific */
-       err = bcma_bus_resume(bus);
-       if (err)
-               return err;
+static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
+                        bcma_host_pci_resume);
+#define BCMA_PM_OPS    (&bcma_pm_ops)
 
-       return 0;
-}
 #else /* CONFIG_PM */
-# define bcma_host_pci_suspend NULL
-# define bcma_host_pci_resume  NULL
+
+#define BCMA_PM_OPS     NULL
+
 #endif /* CONFIG_PM */
 
 static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
@@ -284,8 +278,7 @@ static struct pci_driver bcma_pci_bridge_driver = {
        .id_table = bcma_pci_bridge_tbl,
        .probe = bcma_host_pci_probe,
        .remove = bcma_host_pci_remove,
-       .suspend = bcma_host_pci_suspend,
-       .resume = bcma_host_pci_resume,
+       .driver.pm = BCMA_PM_OPS,
 };
 
 int __init bcma_host_pci_init(void)
index 10f92b371e582bf6d371f425b17dd1dfe5629afd..febbc0a1222ae1444acea8312bab18b7163fbcdb 100644 (file)
@@ -241,6 +241,21 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
 }
 
 #ifdef CONFIG_PM
+int bcma_bus_suspend(struct bcma_bus *bus)
+{
+       struct bcma_device *core;
+
+       list_for_each_entry(core, &bus->cores, list) {
+               struct device_driver *drv = core->dev.driver;
+               if (drv) {
+                       struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+                       if (adrv->suspend)
+                               adrv->suspend(core);
+               }
+       }
+       return 0;
+}
+
 int bcma_bus_resume(struct bcma_bus *bus)
 {
        struct bcma_device *core;
@@ -252,6 +267,15 @@ int bcma_bus_resume(struct bcma_bus *bus)
                bcma_core_chipcommon_init(&bus->drv_cc);
        }
 
+       list_for_each_entry(core, &bus->cores, list) {
+               struct device_driver *drv = core->dev.driver;
+               if (drv) {
+                       struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+                       if (adrv->resume)
+                               adrv->resume(core);
+               }
+       }
+
        return 0;
 }
 #endif
index a30aa103f95b33a4567b519250331725dc5f9539..4e4c8a4a5fd3fb4412a19fdbd3f62647b285fd6a 100644 (file)
@@ -317,6 +317,17 @@ config BLK_DEV_NBD
 
          If unsure, say N.
 
+config BLK_DEV_NVME
+       tristate "NVM Express block device"
+       depends on PCI
+       ---help---
+         The NVM Express driver is for solid state drives directly
+         connected to the PCI or PCI Express bus.  If you know you
+         don't have one of these, it is safe to answer N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called nvme.
+
 config BLK_DEV_OSD
        tristate "OSD object-as-blkdev support"
        depends on SCSI_OSD_ULD
index ad7b74a44ef3dd3e1e1c39f7e44855e45e0bfd56..5b795059f8fb76107b5f2950dd88c5c9696ad4ee 100644 (file)
@@ -23,6 +23,7 @@ obj-$(CONFIG_XILINX_SYSACE)   += xsysace.o
 obj-$(CONFIG_CDROM_PKTCDVD)    += pktcdvd.o
 obj-$(CONFIG_MG_DISK)          += mg_disk.o
 obj-$(CONFIG_SUNVDC)           += sunvdc.o
+obj-$(CONFIG_BLK_DEV_NVME)     += nvme.o
 obj-$(CONFIG_BLK_DEV_OSD)      += osdblk.o
 
 obj-$(CONFIG_BLK_DEV_UMEM)     += umem.o
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
new file mode 100644 (file)
index 0000000..f4996b0
--- /dev/null
@@ -0,0 +1,1745 @@
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/nvme.h>
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kdev_t.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#define NVME_Q_DEPTH 1024
+#define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
+#define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
+#define NVME_MINORS 64
+#define NVME_IO_TIMEOUT        (5 * HZ)
+#define ADMIN_TIMEOUT  (60 * HZ)
+
+static int nvme_major;
+module_param(nvme_major, int, 0);
+
+static int use_threaded_interrupts;
+module_param(use_threaded_interrupts, int, 0);
+
+static DEFINE_SPINLOCK(dev_list_lock);
+static LIST_HEAD(dev_list);
+static struct task_struct *nvme_thread;
+
+/*
+ * Represents an NVM Express device.  Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+       struct list_head node;
+       struct nvme_queue **queues;
+       u32 __iomem *dbs;
+       struct pci_dev *pci_dev;
+       struct dma_pool *prp_page_pool;
+       struct dma_pool *prp_small_pool;
+       int instance;
+       int queue_count;
+       int db_stride;
+       u32 ctrl_config;
+       struct msix_entry *entry;
+       struct nvme_bar __iomem *bar;
+       struct list_head namespaces;
+       char serial[20];
+       char model[40];
+       char firmware_rev[8];
+};
+
+/*
+ * An NVM Express namespace is equivalent to a SCSI LUN
+ */
+struct nvme_ns {
+       struct list_head list;
+
+       struct nvme_dev *dev;
+       struct request_queue *queue;
+       struct gendisk *disk;
+
+       int ns_id;
+       int lba_shift;
+};
+
+/*
+ * An NVM Express queue.  Each device has at least two (one for admin
+ * commands and one for I/O commands).
+ */
+struct nvme_queue {
+       struct device *q_dmadev;
+       struct nvme_dev *dev;
+       spinlock_t q_lock;
+       struct nvme_command *sq_cmds;
+       volatile struct nvme_completion *cqes;
+       dma_addr_t sq_dma_addr;
+       dma_addr_t cq_dma_addr;
+       wait_queue_head_t sq_full;
+       wait_queue_t sq_cong_wait;
+       struct bio_list sq_cong;
+       u32 __iomem *q_db;
+       u16 q_depth;
+       u16 cq_vector;
+       u16 sq_head;
+       u16 sq_tail;
+       u16 cq_head;
+       u16 cq_phase;
+       unsigned long cmdid_data[];
+};
+
+/*
+ * Check we didin't inadvertently grow the command struct
+ */
+static inline void _nvme_check_size(void)
+{
+       BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
+       BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
+       BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
+}
+
+typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
+                                               struct nvme_completion *);
+
+struct nvme_cmd_info {
+       nvme_completion_fn fn;
+       void *ctx;
+       unsigned long timeout;
+};
+
+static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
+{
+       return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
+}
+
+/**
+ * alloc_cmdid() - Allocate a Command ID
+ * @nvmeq: The queue that will be used for this command
+ * @ctx: A pointer that will be passed to the handler
+ * @handler: The function to call on completion
+ *
+ * Allocate a Command ID for a queue.  The data passed in will
+ * be passed to the completion handler.  This is implemented by using
+ * the bottom two bits of the ctx pointer to store the handler ID.
+ * Passing in a pointer that's not 4-byte aligned will cause a BUG.
+ * We can change this if it becomes a problem.
+ *
+ * May be called with local interrupts disabled and the q_lock held,
+ * or with interrupts enabled and no locks held.
+ */
+static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
+                               nvme_completion_fn handler, unsigned timeout)
+{
+       int depth = nvmeq->q_depth - 1;
+       struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+       int cmdid;
+
+       do {
+               cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
+               if (cmdid >= depth)
+                       return -EBUSY;
+       } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
+
+       info[cmdid].fn = handler;
+       info[cmdid].ctx = ctx;
+       info[cmdid].timeout = jiffies + timeout;
+       return cmdid;
+}
+
+static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
+                               nvme_completion_fn handler, unsigned timeout)
+{
+       int cmdid;
+       wait_event_killable(nvmeq->sq_full,
+               (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
+       return (cmdid < 0) ? -EINTR : cmdid;
+}
+
+/* Special values must be less than 0x1000 */
+#define CMD_CTX_BASE           ((void *)POISON_POINTER_DELTA)
+#define CMD_CTX_CANCELLED      (0x30C + CMD_CTX_BASE)
+#define CMD_CTX_COMPLETED      (0x310 + CMD_CTX_BASE)
+#define CMD_CTX_INVALID                (0x314 + CMD_CTX_BASE)
+#define CMD_CTX_FLUSH          (0x318 + CMD_CTX_BASE)
+
+static void special_completion(struct nvme_dev *dev, void *ctx,
+                                               struct nvme_completion *cqe)
+{
+       if (ctx == CMD_CTX_CANCELLED)
+               return;
+       if (ctx == CMD_CTX_FLUSH)
+               return;
+       if (ctx == CMD_CTX_COMPLETED) {
+               dev_warn(&dev->pci_dev->dev,
+                               "completed id %d twice on queue %d\n",
+                               cqe->command_id, le16_to_cpup(&cqe->sq_id));
+               return;
+       }
+       if (ctx == CMD_CTX_INVALID) {
+               dev_warn(&dev->pci_dev->dev,
+                               "invalid id %d completed on queue %d\n",
+                               cqe->command_id, le16_to_cpup(&cqe->sq_id));
+               return;
+       }
+
+       dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held.  May not sleep.
+ */
+static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
+                                               nvme_completion_fn *fn)
+{
+       void *ctx;
+       struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+       if (cmdid >= nvmeq->q_depth) {
+               *fn = special_completion;
+               return CMD_CTX_INVALID;
+       }
+       *fn = info[cmdid].fn;
+       ctx = info[cmdid].ctx;
+       info[cmdid].fn = special_completion;
+       info[cmdid].ctx = CMD_CTX_COMPLETED;
+       clear_bit(cmdid, nvmeq->cmdid_data);
+       wake_up(&nvmeq->sq_full);
+       return ctx;
+}
+
+static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
+                                               nvme_completion_fn *fn)
+{
+       void *ctx;
+       struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+       if (fn)
+               *fn = info[cmdid].fn;
+       ctx = info[cmdid].ctx;
+       info[cmdid].fn = special_completion;
+       info[cmdid].ctx = CMD_CTX_CANCELLED;
+       return ctx;
+}
+
+static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+{
+       return dev->queues[get_cpu() + 1];
+}
+
+static void put_nvmeq(struct nvme_queue *nvmeq)
+{
+       put_cpu();
+}
+
+/**
+ * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
+ * @nvmeq: The queue to use
+ * @cmd: The command to send
+ *
+ * Safe to use from interrupt context
+ */
+static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+{
+       unsigned long flags;
+       u16 tail;
+       spin_lock_irqsave(&nvmeq->q_lock, flags);
+       tail = nvmeq->sq_tail;
+       memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+       if (++tail == nvmeq->q_depth)
+               tail = 0;
+       writel(tail, nvmeq->q_db);
+       nvmeq->sq_tail = tail;
+       spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+
+       return 0;
+}
+
+/*
+ * The nvme_iod describes the data in an I/O, including the list of PRP
+ * entries.  You can't see it in this data structure because C doesn't let
+ * me express that.  Use nvme_alloc_iod to ensure there's enough space
+ * allocated to store the PRP list.
+ */
+struct nvme_iod {
+       void *private;          /* For the use of the submitter of the I/O */
+       int npages;             /* In the PRP list. 0 means small pool in use */
+       int offset;             /* Of PRP list */
+       int nents;              /* Used in scatterlist */
+       int length;             /* Of data, in bytes */
+       dma_addr_t first_dma;
+       struct scatterlist sg[0];
+};
+
+static __le64 **iod_list(struct nvme_iod *iod)
+{
+       return ((void *)iod) + iod->offset;
+}
+
+/*
+ * Will slightly overestimate the number of pages needed.  This is OK
+ * as it only leads to a small amount of wasted memory for the lifetime of
+ * the I/O.
+ */
+static int nvme_npages(unsigned size)
+{
+       unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
+       return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+}
+
+static struct nvme_iod *
+nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
+{
+       struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
+                               sizeof(__le64 *) * nvme_npages(nbytes) +
+                               sizeof(struct scatterlist) * nseg, gfp);
+
+       if (iod) {
+               iod->offset = offsetof(struct nvme_iod, sg[nseg]);
+               iod->npages = -1;
+               iod->length = nbytes;
+       }
+
+       return iod;
+}
+
+static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
+{
+       const int last_prp = PAGE_SIZE / 8 - 1;
+       int i;
+       __le64 **list = iod_list(iod);
+       dma_addr_t prp_dma = iod->first_dma;
+
+       if (iod->npages == 0)
+               dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
+       for (i = 0; i < iod->npages; i++) {
+               __le64 *prp_list = list[i];
+               dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
+               dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
+               prp_dma = next_prp_dma;
+       }
+       kfree(iod);
+}
+
+static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
+{
+       struct nvme_queue *nvmeq = get_nvmeq(dev);
+       if (bio_list_empty(&nvmeq->sq_cong))
+               add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+       bio_list_add(&nvmeq->sq_cong, bio);
+       put_nvmeq(nvmeq);
+       wake_up_process(nvme_thread);
+}
+
+static void bio_completion(struct nvme_dev *dev, void *ctx,
+                                               struct nvme_completion *cqe)
+{
+       struct nvme_iod *iod = ctx;
+       struct bio *bio = iod->private;
+       u16 status = le16_to_cpup(&cqe->status) >> 1;
+
+       dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+                       bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+       nvme_free_iod(dev, iod);
+       if (status) {
+               bio_endio(bio, -EIO);
+       } else if (bio->bi_vcnt > bio->bi_idx) {
+               requeue_bio(dev, bio);
+       } else {
+               bio_endio(bio, 0);
+       }
+}
+
+/* length is in bytes.  gfp flags indicates whether we may sleep. */
+static int nvme_setup_prps(struct nvme_dev *dev,
+                       struct nvme_common_command *cmd, struct nvme_iod *iod,
+                       int total_len, gfp_t gfp)
+{
+       struct dma_pool *pool;
+       int length = total_len;
+       struct scatterlist *sg = iod->sg;
+       int dma_len = sg_dma_len(sg);
+       u64 dma_addr = sg_dma_address(sg);
+       int offset = offset_in_page(dma_addr);
+       __le64 *prp_list;
+       __le64 **list = iod_list(iod);
+       dma_addr_t prp_dma;
+       int nprps, i;
+
+       cmd->prp1 = cpu_to_le64(dma_addr);
+       length -= (PAGE_SIZE - offset);
+       if (length <= 0)
+               return total_len;
+
+       dma_len -= (PAGE_SIZE - offset);
+       if (dma_len) {
+               dma_addr += (PAGE_SIZE - offset);
+       } else {
+               sg = sg_next(sg);
+               dma_addr = sg_dma_address(sg);
+               dma_len = sg_dma_len(sg);
+       }
+
+       if (length <= PAGE_SIZE) {
+               cmd->prp2 = cpu_to_le64(dma_addr);
+               return total_len;
+       }
+
+       nprps = DIV_ROUND_UP(length, PAGE_SIZE);
+       if (nprps <= (256 / 8)) {
+               pool = dev->prp_small_pool;
+               iod->npages = 0;
+       } else {
+               pool = dev->prp_page_pool;
+               iod->npages = 1;
+       }
+
+       prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+       if (!prp_list) {
+               cmd->prp2 = cpu_to_le64(dma_addr);
+               iod->npages = -1;
+               return (total_len - length) + PAGE_SIZE;
+       }
+       list[0] = prp_list;
+       iod->first_dma = prp_dma;
+       cmd->prp2 = cpu_to_le64(prp_dma);
+       i = 0;
+       for (;;) {
+               if (i == PAGE_SIZE / 8) {
+                       __le64 *old_prp_list = prp_list;
+                       prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+                       if (!prp_list)
+                               return total_len - length;
+                       list[iod->npages++] = prp_list;
+                       prp_list[0] = old_prp_list[i - 1];
+                       old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+                       i = 1;
+               }
+               prp_list[i++] = cpu_to_le64(dma_addr);
+               dma_len -= PAGE_SIZE;
+               dma_addr += PAGE_SIZE;
+               length -= PAGE_SIZE;
+               if (length <= 0)
+                       break;
+               if (dma_len > 0)
+                       continue;
+               BUG_ON(dma_len < 0);
+               sg = sg_next(sg);
+               dma_addr = sg_dma_address(sg);
+               dma_len = sg_dma_len(sg);
+       }
+
+       return total_len;
+}
+
+/* NVMe scatterlists require no holes in the virtual address */
+#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2)  ((vec2)->bv_offset || \
+                       (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
+
+static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
+               struct bio *bio, enum dma_data_direction dma_dir, int psegs)
+{
+       struct bio_vec *bvec, *bvprv = NULL;
+       struct scatterlist *sg = NULL;
+       int i, old_idx, length = 0, nsegs = 0;
+
+       sg_init_table(iod->sg, psegs);
+       old_idx = bio->bi_idx;
+       bio_for_each_segment(bvec, bio, i) {
+               if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
+                       sg->length += bvec->bv_len;
+               } else {
+                       if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
+                               break;
+                       sg = sg ? sg + 1 : iod->sg;
+                       sg_set_page(sg, bvec->bv_page, bvec->bv_len,
+                                                       bvec->bv_offset);
+                       nsegs++;
+               }
+               length += bvec->bv_len;
+               bvprv = bvec;
+       }
+       bio->bi_idx = i;
+       iod->nents = nsegs;
+       sg_mark_end(sg);
+       if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
+               bio->bi_idx = old_idx;
+               return -ENOMEM;
+       }
+       return length;
+}
+
+static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+                                                               int cmdid)
+{
+       struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+       memset(cmnd, 0, sizeof(*cmnd));
+       cmnd->common.opcode = nvme_cmd_flush;
+       cmnd->common.command_id = cmdid;
+       cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+
+       if (++nvmeq->sq_tail == nvmeq->q_depth)
+               nvmeq->sq_tail = 0;
+       writel(nvmeq->sq_tail, nvmeq->q_db);
+
+       return 0;
+}
+
+static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
+{
+       int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
+                                       special_completion, NVME_IO_TIMEOUT);
+       if (unlikely(cmdid < 0))
+               return cmdid;
+
+       return nvme_submit_flush(nvmeq, ns, cmdid);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held.  May not sleep.
+ */
+static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+                                                               struct bio *bio)
+{
+       struct nvme_command *cmnd;
+       struct nvme_iod *iod;
+       enum dma_data_direction dma_dir;
+       int cmdid, length, result = -ENOMEM;
+       u16 control;
+       u32 dsmgmt;
+       int psegs = bio_phys_segments(ns->queue, bio);
+
+       if ((bio->bi_rw & REQ_FLUSH) && psegs) {
+               result = nvme_submit_flush_data(nvmeq, ns);
+               if (result)
+                       return result;
+       }
+
+       iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+       if (!iod)
+               goto nomem;
+       iod->private = bio;
+
+       result = -EBUSY;
+       cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
+       if (unlikely(cmdid < 0))
+               goto free_iod;
+
+       if ((bio->bi_rw & REQ_FLUSH) && !psegs)
+               return nvme_submit_flush(nvmeq, ns, cmdid);
+
+       control = 0;
+       if (bio->bi_rw & REQ_FUA)
+               control |= NVME_RW_FUA;
+       if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+               control |= NVME_RW_LR;
+
+       dsmgmt = 0;
+       if (bio->bi_rw & REQ_RAHEAD)
+               dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+
+       cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+       memset(cmnd, 0, sizeof(*cmnd));
+       if (bio_data_dir(bio)) {
+               cmnd->rw.opcode = nvme_cmd_write;
+               dma_dir = DMA_TO_DEVICE;
+       } else {
+               cmnd->rw.opcode = nvme_cmd_read;
+               dma_dir = DMA_FROM_DEVICE;
+       }
+
+       result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
+       if (result < 0)
+               goto free_iod;
+       length = result;
+
+       cmnd->rw.command_id = cmdid;
+       cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+       length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
+                                                               GFP_ATOMIC);
+       cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+       cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
+       cmnd->rw.control = cpu_to_le16(control);
+       cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+
+       bio->bi_sector += length >> 9;
+
+       if (++nvmeq->sq_tail == nvmeq->q_depth)
+               nvmeq->sq_tail = 0;
+       writel(nvmeq->sq_tail, nvmeq->q_db);
+
+       return 0;
+
+ free_iod:
+       nvme_free_iod(nvmeq->dev, iod);
+ nomem:
+       return result;
+}
+
+/*
+ * NB: return value of non-zero would mean that we were a stacking driver.
+ * make_request must always succeed.
+ */
+static int nvme_make_request(struct request_queue *q, struct bio *bio)
+{
+       struct nvme_ns *ns = q->queuedata;
+       struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
+       int result = -EBUSY;
+
+       spin_lock_irq(&nvmeq->q_lock);
+       if (bio_list_empty(&nvmeq->sq_cong))
+               result = nvme_submit_bio_queue(nvmeq, ns, bio);
+       if (unlikely(result)) {
+               if (bio_list_empty(&nvmeq->sq_cong))
+                       add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+               bio_list_add(&nvmeq->sq_cong, bio);
+       }
+
+       spin_unlock_irq(&nvmeq->q_lock);
+       put_nvmeq(nvmeq);
+
+       return 0;
+}
+
+static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
+{
+       u16 head, phase;
+
+       head = nvmeq->cq_head;
+       phase = nvmeq->cq_phase;
+
+       for (;;) {
+               void *ctx;
+               nvme_completion_fn fn;
+               struct nvme_completion cqe = nvmeq->cqes[head];
+               if ((le16_to_cpu(cqe.status) & 1) != phase)
+                       break;
+               nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
+               if (++head == nvmeq->q_depth) {
+                       head = 0;
+                       phase = !phase;
+               }
+
+               ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
+               fn(nvmeq->dev, ctx, &cqe);
+       }
+
+       /* If the controller ignores the cq head doorbell and continuously
+        * writes to the queue, it is theoretically possible to wrap around
+        * the queue twice and mistakenly return IRQ_NONE.  Linux only
+        * requires that 0.1% of your interrupts are handled, so this isn't
+        * a big problem.
+        */
+       if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
+               return IRQ_NONE;
+
+       writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+       nvmeq->cq_head = head;
+       nvmeq->cq_phase = phase;
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t nvme_irq(int irq, void *data)
+{
+       irqreturn_t result;
+       struct nvme_queue *nvmeq = data;
+       spin_lock(&nvmeq->q_lock);
+       result = nvme_process_cq(nvmeq);
+       spin_unlock(&nvmeq->q_lock);
+       return result;
+}
+
+static irqreturn_t nvme_irq_check(int irq, void *data)
+{
+       struct nvme_queue *nvmeq = data;
+       struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
+       if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
+               return IRQ_NONE;
+       return IRQ_WAKE_THREAD;
+}
+
+static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
+{
+       spin_lock_irq(&nvmeq->q_lock);
+       cancel_cmdid(nvmeq, cmdid, NULL);
+       spin_unlock_irq(&nvmeq->q_lock);
+}
+
+struct sync_cmd_info {
+       struct task_struct *task;
+       u32 result;
+       int status;
+};
+
+static void sync_completion(struct nvme_dev *dev, void *ctx,
+                                               struct nvme_completion *cqe)
+{
+       struct sync_cmd_info *cmdinfo = ctx;
+       cmdinfo->result = le32_to_cpup(&cqe->result);
+       cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+       wake_up_process(cmdinfo->task);
+}
+
+/*
+ * Returns 0 on success.  If the result is negative, it's a Linux error code;
+ * if the result is positive, it's an NVM Express status code
+ */
+static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
+                       struct nvme_command *cmd, u32 *result, unsigned timeout)
+{
+       int cmdid;
+       struct sync_cmd_info cmdinfo;
+
+       cmdinfo.task = current;
+       cmdinfo.status = -EINTR;
+
+       cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
+                                                               timeout);
+       if (cmdid < 0)
+               return cmdid;
+       cmd->common.command_id = cmdid;
+
+       set_current_state(TASK_KILLABLE);
+       nvme_submit_cmd(nvmeq, cmd);
+       schedule();
+
+       if (cmdinfo.status == -EINTR) {
+               nvme_abort_command(nvmeq, cmdid);
+               return -EINTR;
+       }
+
+       if (result)
+               *result = cmdinfo.result;
+
+       return cmdinfo.status;
+}
+
+static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+                                                               u32 *result)
+{
+       return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
+}
+
+static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
+{
+       int status;
+       struct nvme_command c;
+
+       memset(&c, 0, sizeof(c));
+       c.delete_queue.opcode = opcode;
+       c.delete_queue.qid = cpu_to_le16(id);
+
+       status = nvme_submit_admin_cmd(dev, &c, NULL);
+       if (status)
+               return -EIO;
+       return 0;
+}
+
+static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
+                                               struct nvme_queue *nvmeq)
+{
+       int status;
+       struct nvme_command c;
+       int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
+
+       memset(&c, 0, sizeof(c));
+       c.create_cq.opcode = nvme_admin_create_cq;
+       c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
+       c.create_cq.cqid = cpu_to_le16(qid);
+       c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+       c.create_cq.cq_flags = cpu_to_le16(flags);
+       c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
+
+       status = nvme_submit_admin_cmd(dev, &c, NULL);
+       if (status)
+               return -EIO;
+       return 0;
+}
+
+static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
+                                               struct nvme_queue *nvmeq)
+{
+       int status;
+       struct nvme_command c;
+       int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
+
+       memset(&c, 0, sizeof(c));
+       c.create_sq.opcode = nvme_admin_create_sq;
+       c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
+       c.create_sq.sqid = cpu_to_le16(qid);
+       c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+       c.create_sq.sq_flags = cpu_to_le16(flags);
+       c.create_sq.cqid = cpu_to_le16(qid);
+
+       status = nvme_submit_admin_cmd(dev, &c, NULL);
+       if (status)
+               return -EIO;
+       return 0;
+}
+
+static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
+{
+       return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
+}
+
+static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
+{
+       return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
+}
+
+static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
+                                                       dma_addr_t dma_addr)
+{
+       struct nvme_command c;
+
+       memset(&c, 0, sizeof(c));
+       c.identify.opcode = nvme_admin_identify;
+       c.identify.nsid = cpu_to_le32(nsid);
+       c.identify.prp1 = cpu_to_le64(dma_addr);
+       c.identify.cns = cpu_to_le32(cns);
+
+       return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
+                               unsigned dword11, dma_addr_t dma_addr)
+{
+       struct nvme_command c;
+
+       memset(&c, 0, sizeof(c));
+       c.features.opcode = nvme_admin_get_features;
+       c.features.prp1 = cpu_to_le64(dma_addr);
+       c.features.fid = cpu_to_le32(fid);
+       c.features.dword11 = cpu_to_le32(dword11);
+
+       return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
+                       unsigned dword11, dma_addr_t dma_addr, u32 *result)
+{
+       struct nvme_command c;
+
+       memset(&c, 0, sizeof(c));
+       c.features.opcode = nvme_admin_set_features;
+       c.features.prp1 = cpu_to_le64(dma_addr);
+       c.features.fid = cpu_to_le32(fid);
+       c.features.dword11 = cpu_to_le32(dword11);
+
+       return nvme_submit_admin_cmd(dev, &c, result);
+}
+
+static void nvme_free_queue(struct nvme_dev *dev, int qid)
+{
+       struct nvme_queue *nvmeq = dev->queues[qid];
+       int vector = dev->entry[nvmeq->cq_vector].vector;
+
+       irq_set_affinity_hint(vector, NULL);
+       free_irq(vector, nvmeq);
+
+       /* Don't tell the adapter to delete the admin queue */
+       if (qid) {
+               adapter_delete_sq(dev, qid);
+               adapter_delete_cq(dev, qid);
+       }
+
+       dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+                               (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+       dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+                                       nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+       kfree(nvmeq);
+}
+
+static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
+                                                       int depth, int vector)
+{
+       struct device *dmadev = &dev->pci_dev->dev;
+       unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
+       struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
+       if (!nvmeq)
+               return NULL;
+
+       nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
+                                       &nvmeq->cq_dma_addr, GFP_KERNEL);
+       if (!nvmeq->cqes)
+               goto free_nvmeq;
+       memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
+
+       nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
+                                       &nvmeq->sq_dma_addr, GFP_KERNEL);
+       if (!nvmeq->sq_cmds)
+               goto free_cqdma;
+
+       nvmeq->q_dmadev = dmadev;
+       nvmeq->dev = dev;
+       spin_lock_init(&nvmeq->q_lock);
+       nvmeq->cq_head = 0;
+       nvmeq->cq_phase = 1;
+       init_waitqueue_head(&nvmeq->sq_full);
+       init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
+       bio_list_init(&nvmeq->sq_cong);
+       nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+       nvmeq->q_depth = depth;
+       nvmeq->cq_vector = vector;
+
+       return nvmeq;
+
+ free_cqdma:
+       dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
+                                                       nvmeq->cq_dma_addr);
+ free_nvmeq:
+       kfree(nvmeq);
+       return NULL;
+}
+
+static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+                                                       const char *name)
+{
+       if (use_threaded_interrupts)
+               return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
+                                       nvme_irq_check, nvme_irq,
+                                       IRQF_DISABLED | IRQF_SHARED,
+                                       name, nvmeq);
+       return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
+                               IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+}
+
+static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
+                                       int qid, int cq_size, int vector)
+{
+       int result;
+       struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
+
+       if (!nvmeq)
+               return ERR_PTR(-ENOMEM);
+
+       result = adapter_alloc_cq(dev, qid, nvmeq);
+       if (result < 0)
+               goto free_nvmeq;
+
+       result = adapter_alloc_sq(dev, qid, nvmeq);
+       if (result < 0)
+               goto release_cq;
+
+       result = queue_request_irq(dev, nvmeq, "nvme");
+       if (result < 0)
+               goto release_sq;
+
+       return nvmeq;
+
+ release_sq:
+       adapter_delete_sq(dev, qid);
+ release_cq:
+       adapter_delete_cq(dev, qid);
+ free_nvmeq:
+       dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+                               (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+       dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+                                       nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+       kfree(nvmeq);
+       return ERR_PTR(result);
+}
+
+static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
+{
+       int result;
+       u32 aqa;
+       u64 cap;
+       unsigned long timeout;
+       struct nvme_queue *nvmeq;
+
+       dev->dbs = ((void __iomem *)dev->bar) + 4096;
+
+       nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
+       if (!nvmeq)
+               return -ENOMEM;
+
+       aqa = nvmeq->q_depth - 1;
+       aqa |= aqa << 16;
+
+       dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
+       dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
+       dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
+       dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
+
+       writel(0, &dev->bar->cc);
+       writel(aqa, &dev->bar->aqa);
+       writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
+       writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
+       writel(dev->ctrl_config, &dev->bar->cc);
+
+       cap = readq(&dev->bar->cap);
+       timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+       dev->db_stride = NVME_CAP_STRIDE(cap);
+
+       while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
+               msleep(100);
+               if (fatal_signal_pending(current))
+                       return -EINTR;
+               if (time_after(jiffies, timeout)) {
+                       dev_err(&dev->pci_dev->dev,
+                               "Device not ready; aborting initialisation\n");
+                       return -ENODEV;
+               }
+       }
+
+       result = queue_request_irq(dev, nvmeq, "nvme admin");
+       dev->queues[0] = nvmeq;
+       return result;
+}
+
+static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+                               unsigned long addr, unsigned length)
+{
+       int i, err, count, nents, offset;
+       struct scatterlist *sg;
+       struct page **pages;
+       struct nvme_iod *iod;
+
+       if (addr & 3)
+               return ERR_PTR(-EINVAL);
+       if (!length)
+               return ERR_PTR(-EINVAL);
+
+       offset = offset_in_page(addr);
+       count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
+       pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
+
+       err = get_user_pages_fast(addr, count, 1, pages);
+       if (err < count) {
+               count = err;
+               err = -EFAULT;
+               goto put_pages;
+       }
+
+       iod = nvme_alloc_iod(count, length, GFP_KERNEL);
+       sg = iod->sg;
+       sg_init_table(sg, count);
+       for (i = 0; i < count; i++) {
+               sg_set_page(&sg[i], pages[i],
+                               min_t(int, length, PAGE_SIZE - offset), offset);
+               length -= (PAGE_SIZE - offset);
+               offset = 0;
+       }
+       sg_mark_end(&sg[i - 1]);
+       iod->nents = count;
+
+       err = -ENOMEM;
+       nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
+                               write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+       if (!nents)
+               goto free_iod;
+
+       kfree(pages);
+       return iod;
+
+ free_iod:
+       kfree(iod);
+ put_pages:
+       for (i = 0; i < count; i++)
+               put_page(pages[i]);
+       kfree(pages);
+       return ERR_PTR(err);
+}
+
+static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+                       struct nvme_iod *iod)
+{
+       int i;
+
+       dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+                               write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+       for (i = 0; i < iod->nents; i++)
+               put_page(sg_page(&iod->sg[i]));
+}
+
+static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+{
+       struct nvme_dev *dev = ns->dev;
+       struct nvme_queue *nvmeq;
+       struct nvme_user_io io;
+       struct nvme_command c;
+       unsigned length;
+       int status;
+       struct nvme_iod *iod;
+
+       if (copy_from_user(&io, uio, sizeof(io)))
+               return -EFAULT;
+       length = (io.nblocks + 1) << ns->lba_shift;
+
+       switch (io.opcode) {
+       case nvme_cmd_write:
+       case nvme_cmd_read:
+       case nvme_cmd_compare:
+               iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (IS_ERR(iod))
+               return PTR_ERR(iod);
+
+       memset(&c, 0, sizeof(c));
+       c.rw.opcode = io.opcode;
+       c.rw.flags = io.flags;
+       c.rw.nsid = cpu_to_le32(ns->ns_id);
+       c.rw.slba = cpu_to_le64(io.slba);
+       c.rw.length = cpu_to_le16(io.nblocks);
+       c.rw.control = cpu_to_le16(io.control);
+       c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
+       c.rw.reftag = io.reftag;
+       c.rw.apptag = io.apptag;
+       c.rw.appmask = io.appmask;
+       /* XXX: metadata */
+       length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
+
+       nvmeq = get_nvmeq(dev);
+       /*
+        * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
+        * disabled.  We may be preempted at any point, and be rescheduled
+        * to a different CPU.  That will cause cacheline bouncing, but no
+        * additional races since q_lock already protects against other CPUs.
+        */
+       put_nvmeq(nvmeq);
+       if (length != (io.nblocks + 1) << ns->lba_shift)
+               status = -ENOMEM;
+       else
+               status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+       nvme_unmap_user_pages(dev, io.opcode & 1, iod);
+       nvme_free_iod(dev, iod);
+       return status;
+}
+
+static int nvme_user_admin_cmd(struct nvme_ns *ns,
+                                       struct nvme_admin_cmd __user *ucmd)
+{
+       struct nvme_dev *dev = ns->dev;
+       struct nvme_admin_cmd cmd;
+       struct nvme_command c;
+       int status, length;
+       struct nvme_iod *iod;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+       if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+               return -EFAULT;
+
+       memset(&c, 0, sizeof(c));
+       c.common.opcode = cmd.opcode;
+       c.common.flags = cmd.flags;
+       c.common.nsid = cpu_to_le32(cmd.nsid);
+       c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+       c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+       c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
+       c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
+       c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
+       c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
+       c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
+       c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
+
+       length = cmd.data_len;
+       if (cmd.data_len) {
+               iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
+                                                               length);
+               if (IS_ERR(iod))
+                       return PTR_ERR(iod);
+               length = nvme_setup_prps(dev, &c.common, iod, length,
+                                                               GFP_KERNEL);
+       }
+
+       if (length != cmd.data_len)
+               status = -ENOMEM;
+       else
+               status = nvme_submit_admin_cmd(dev, &c, NULL);
+
+       if (cmd.data_len) {
+               nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
+               nvme_free_iod(dev, iod);
+       }
+       return status;
+}
+
+static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+                                                       unsigned long arg)
+{
+       struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+       switch (cmd) {
+       case NVME_IOCTL_ID:
+               return ns->ns_id;
+       case NVME_IOCTL_ADMIN_CMD:
+               return nvme_user_admin_cmd(ns, (void __user *)arg);
+       case NVME_IOCTL_SUBMIT_IO:
+               return nvme_submit_io(ns, (void __user *)arg);
+       default:
+               return -ENOTTY;
+       }
+}
+
+static const struct block_device_operations nvme_fops = {
+       .owner          = THIS_MODULE,
+       .ioctl          = nvme_ioctl,
+       .compat_ioctl   = nvme_ioctl,
+};
+
+static void nvme_timeout_ios(struct nvme_queue *nvmeq)
+{
+       int depth = nvmeq->q_depth - 1;
+       struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+       unsigned long now = jiffies;
+       int cmdid;
+
+       for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
+               void *ctx;
+               nvme_completion_fn fn;
+               static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
+
+               if (!time_after(now, info[cmdid].timeout))
+                       continue;
+               dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
+               ctx = cancel_cmdid(nvmeq, cmdid, &fn);
+               fn(nvmeq->dev, ctx, &cqe);
+       }
+}
+
+static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
+{
+       while (bio_list_peek(&nvmeq->sq_cong)) {
+               struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+               struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
+               if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
+                       bio_list_add_head(&nvmeq->sq_cong, bio);
+                       break;
+               }
+               if (bio_list_empty(&nvmeq->sq_cong))
+                       remove_wait_queue(&nvmeq->sq_full,
+                                                       &nvmeq->sq_cong_wait);
+       }
+}
+
+static int nvme_kthread(void *data)
+{
+       struct nvme_dev *dev;
+
+       while (!kthread_should_stop()) {
+               __set_current_state(TASK_RUNNING);
+               spin_lock(&dev_list_lock);
+               list_for_each_entry(dev, &dev_list, node) {
+                       int i;
+                       for (i = 0; i < dev->queue_count; i++) {
+                               struct nvme_queue *nvmeq = dev->queues[i];
+                               if (!nvmeq)
+                                       continue;
+                               spin_lock_irq(&nvmeq->q_lock);
+                               if (nvme_process_cq(nvmeq))
+                                       printk("process_cq did something\n");
+                               nvme_timeout_ios(nvmeq);
+                               nvme_resubmit_bios(nvmeq);
+                               spin_unlock_irq(&nvmeq->q_lock);
+                       }
+               }
+               spin_unlock(&dev_list_lock);
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule_timeout(HZ);
+       }
+       return 0;
+}
+
+static DEFINE_IDA(nvme_index_ida);
+
+static int nvme_get_ns_idx(void)
+{
+       int index, error;
+
+       do {
+               if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
+                       return -1;
+
+               spin_lock(&dev_list_lock);
+               error = ida_get_new(&nvme_index_ida, &index);
+               spin_unlock(&dev_list_lock);
+       } while (error == -EAGAIN);
+
+       if (error)
+               index = -1;
+       return index;
+}
+
+static void nvme_put_ns_idx(int index)
+{
+       spin_lock(&dev_list_lock);
+       ida_remove(&nvme_index_ida, index);
+       spin_unlock(&dev_list_lock);
+}
+
+static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
+                       struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
+{
+       struct nvme_ns *ns;
+       struct gendisk *disk;
+       int lbaf;
+
+       if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
+               return NULL;
+
+       ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+       if (!ns)
+               return NULL;
+       ns->queue = blk_alloc_queue(GFP_KERNEL);
+       if (!ns->queue)
+               goto out_free_ns;
+       ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
+       queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
+       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+/*     queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
+       blk_queue_make_request(ns->queue, nvme_make_request);
+       ns->dev = dev;
+       ns->queue->queuedata = ns;
+
+       disk = alloc_disk(NVME_MINORS);
+       if (!disk)
+               goto out_free_queue;
+       ns->ns_id = nsid;
+       ns->disk = disk;
+       lbaf = id->flbas & 0xf;
+       ns->lba_shift = id->lbaf[lbaf].ds;
+
+       disk->major = nvme_major;
+       disk->minors = NVME_MINORS;
+       disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+       disk->fops = &nvme_fops;
+       disk->private_data = ns;
+       disk->queue = ns->queue;
+       disk->driverfs_dev = &dev->pci_dev->dev;
+       sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
+       set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+
+       return ns;
+
+ out_free_queue:
+       blk_cleanup_queue(ns->queue);
+ out_free_ns:
+       kfree(ns);
+       return NULL;
+}
+
+static void nvme_ns_free(struct nvme_ns *ns)
+{
+       int index = ns->disk->first_minor / NVME_MINORS;
+       put_disk(ns->disk);
+       nvme_put_ns_idx(index);
+       blk_cleanup_queue(ns->queue);
+       kfree(ns);
+}
+
+static int set_queue_count(struct nvme_dev *dev, int count)
+{
+       int status;
+       u32 result;
+       u32 q_count = (count - 1) | ((count - 1) << 16);
+
+       status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
+                                                               &result);
+       if (status)
+               return -EIO;
+       return min(result & 0xffff, result >> 16) + 1;
+}
+
+static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
+{
+       int result, cpu, i, nr_io_queues, db_bar_size;
+
+       nr_io_queues = num_online_cpus();
+       result = set_queue_count(dev, nr_io_queues);
+       if (result < 0)
+               return result;
+       if (result < nr_io_queues)
+               nr_io_queues = result;
+
+       /* Deregister the admin queue's interrupt */
+       free_irq(dev->entry[0].vector, dev->queues[0]);
+
+       db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+       if (db_bar_size > 8192) {
+               iounmap(dev->bar);
+               dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
+                                                               db_bar_size);
+               dev->dbs = ((void __iomem *)dev->bar) + 4096;
+               dev->queues[0]->q_db = dev->dbs;
+       }
+
+       for (i = 0; i < nr_io_queues; i++)
+               dev->entry[i].entry = i;
+       for (;;) {
+               result = pci_enable_msix(dev->pci_dev, dev->entry,
+                                                               nr_io_queues);
+               if (result == 0) {
+                       break;
+               } else if (result > 0) {
+                       nr_io_queues = result;
+                       continue;
+               } else {
+                       nr_io_queues = 1;
+                       break;
+               }
+       }
+
+       result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+       /* XXX: handle failure here */
+
+       cpu = cpumask_first(cpu_online_mask);
+       for (i = 0; i < nr_io_queues; i++) {
+               irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
+               cpu = cpumask_next(cpu, cpu_online_mask);
+       }
+
+       for (i = 0; i < nr_io_queues; i++) {
+               dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
+                                                       NVME_Q_DEPTH, i);
+               if (IS_ERR(dev->queues[i + 1]))
+                       return PTR_ERR(dev->queues[i + 1]);
+               dev->queue_count++;
+       }
+
+       for (; i < num_possible_cpus(); i++) {
+               int target = i % rounddown_pow_of_two(dev->queue_count - 1);
+               dev->queues[i + 1] = dev->queues[target + 1];
+       }
+
+       return 0;
+}
+
+static void nvme_free_queues(struct nvme_dev *dev)
+{
+       int i;
+
+       for (i = dev->queue_count - 1; i >= 0; i--)
+               nvme_free_queue(dev, i);
+}
+
+static int __devinit nvme_dev_add(struct nvme_dev *dev)
+{
+       int res, nn, i;
+       struct nvme_ns *ns, *next;
+       struct nvme_id_ctrl *ctrl;
+       struct nvme_id_ns *id_ns;
+       void *mem;
+       dma_addr_t dma_addr;
+
+       res = nvme_setup_io_queues(dev);
+       if (res)
+               return res;
+
+       mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
+                                                               GFP_KERNEL);
+
+       res = nvme_identify(dev, 0, 1, dma_addr);
+       if (res) {
+               res = -EIO;
+               goto out_free;
+       }
+
+       ctrl = mem;
+       nn = le32_to_cpup(&ctrl->nn);
+       memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
+       memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
+       memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
+
+       id_ns = mem;
+       for (i = 1; i <= nn; i++) {
+               res = nvme_identify(dev, i, 0, dma_addr);
+               if (res)
+                       continue;
+
+               if (id_ns->ncap == 0)
+                       continue;
+
+               res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
+                                                       dma_addr + 4096);
+               if (res)
+                       continue;
+
+               ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
+               if (ns)
+                       list_add_tail(&ns->list, &dev->namespaces);
+       }
+       list_for_each_entry(ns, &dev->namespaces, list)
+               add_disk(ns->disk);
+
+       goto out;
+
+ out_free:
+       list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+               list_del(&ns->list);
+               nvme_ns_free(ns);
+       }
+
+ out:
+       dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
+       return res;
+}
+
+static int nvme_dev_remove(struct nvme_dev *dev)
+{
+       struct nvme_ns *ns, *next;
+
+       spin_lock(&dev_list_lock);
+       list_del(&dev->node);
+       spin_unlock(&dev_list_lock);
+
+       /* TODO: wait all I/O finished or cancel them */
+
+       list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+               list_del(&ns->list);
+               del_gendisk(ns->disk);
+               nvme_ns_free(ns);
+       }
+
+       nvme_free_queues(dev);
+
+       return 0;
+}
+
+static int nvme_setup_prp_pools(struct nvme_dev *dev)
+{
+       struct device *dmadev = &dev->pci_dev->dev;
+       dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
+                                               PAGE_SIZE, PAGE_SIZE, 0);
+       if (!dev->prp_page_pool)
+               return -ENOMEM;
+
+       /* Optimisation for I/Os between 4k and 128k */
+       dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
+                                               256, 256, 0);
+       if (!dev->prp_small_pool) {
+               dma_pool_destroy(dev->prp_page_pool);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static void nvme_release_prp_pools(struct nvme_dev *dev)
+{
+       dma_pool_destroy(dev->prp_page_pool);
+       dma_pool_destroy(dev->prp_small_pool);
+}
+
+/* XXX: Use an ida or something to let remove / add work correctly */
+static void nvme_set_instance(struct nvme_dev *dev)
+{
+       static int instance;
+       dev->instance = instance++;
+}
+
+static void nvme_release_instance(struct nvme_dev *dev)
+{
+}
+
+static int __devinit nvme_probe(struct pci_dev *pdev,
+                                               const struct pci_device_id *id)
+{
+       int bars, result = -ENOMEM;
+       struct nvme_dev *dev;
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+       dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
+                                                               GFP_KERNEL);
+       if (!dev->entry)
+               goto free;
+       dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
+                                                               GFP_KERNEL);
+       if (!dev->queues)
+               goto free;
+
+       if (pci_enable_device_mem(pdev))
+               goto free;
+       pci_set_master(pdev);
+       bars = pci_select_bars(pdev, IORESOURCE_MEM);
+       if (pci_request_selected_regions(pdev, bars, "nvme"))
+               goto disable;
+
+       INIT_LIST_HEAD(&dev->namespaces);
+       dev->pci_dev = pdev;
+       pci_set_drvdata(pdev, dev);
+       dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+       nvme_set_instance(dev);
+       dev->entry[0].vector = pdev->irq;
+
+       result = nvme_setup_prp_pools(dev);
+       if (result)
+               goto disable_msix;
+
+       dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+       if (!dev->bar) {
+               result = -ENOMEM;
+               goto disable_msix;
+       }
+
+       result = nvme_configure_admin_queue(dev);
+       if (result)
+               goto unmap;
+       dev->queue_count++;
+
+       spin_lock(&dev_list_lock);
+       list_add(&dev->node, &dev_list);
+       spin_unlock(&dev_list_lock);
+
+       result = nvme_dev_add(dev);
+       if (result)
+               goto delete;
+
+       return 0;
+
+ delete:
+       spin_lock(&dev_list_lock);
+       list_del(&dev->node);
+       spin_unlock(&dev_list_lock);
+
+       nvme_free_queues(dev);
+ unmap:
+       iounmap(dev->bar);
+ disable_msix:
+       pci_disable_msix(pdev);
+       nvme_release_instance(dev);
+       nvme_release_prp_pools(dev);
+ disable:
+       pci_disable_device(pdev);
+       pci_release_regions(pdev);
+ free:
+       kfree(dev->queues);
+       kfree(dev->entry);
+       kfree(dev);
+       return result;
+}
+
+static void __devexit nvme_remove(struct pci_dev *pdev)
+{
+       struct nvme_dev *dev = pci_get_drvdata(pdev);
+       nvme_dev_remove(dev);
+       pci_disable_msix(pdev);
+       iounmap(dev->bar);
+       nvme_release_instance(dev);
+       nvme_release_prp_pools(dev);
+       pci_disable_device(pdev);
+       pci_release_regions(pdev);
+       kfree(dev->queues);
+       kfree(dev->entry);
+       kfree(dev);
+}
+
+/* These functions are yet to be implemented */
+#define nvme_error_detected NULL
+#define nvme_dump_registers NULL
+#define nvme_link_reset NULL
+#define nvme_slot_reset NULL
+#define nvme_error_resume NULL
+#define nvme_suspend NULL
+#define nvme_resume NULL
+
+static struct pci_error_handlers nvme_err_handler = {
+       .error_detected = nvme_error_detected,
+       .mmio_enabled   = nvme_dump_registers,
+       .link_reset     = nvme_link_reset,
+       .slot_reset     = nvme_slot_reset,
+       .resume         = nvme_error_resume,
+};
+
+/* Move to pci_ids.h later */
+#define PCI_CLASS_STORAGE_EXPRESS      0x010802
+
+static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
+       { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, nvme_id_table);
+
+static struct pci_driver nvme_driver = {
+       .name           = "nvme",
+       .id_table       = nvme_id_table,
+       .probe          = nvme_probe,
+       .remove         = __devexit_p(nvme_remove),
+       .suspend        = nvme_suspend,
+       .resume         = nvme_resume,
+       .err_handler    = &nvme_err_handler,
+};
+
+static int __init nvme_init(void)
+{
+       int result = -EBUSY;
+
+       nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+       if (IS_ERR(nvme_thread))
+               return PTR_ERR(nvme_thread);
+
+       nvme_major = register_blkdev(nvme_major, "nvme");
+       if (nvme_major <= 0)
+               goto kill_kthread;
+
+       result = pci_register_driver(&nvme_driver);
+       if (result)
+               goto unregister_blkdev;
+       return 0;
+
+ unregister_blkdev:
+       unregister_blkdev(nvme_major, "nvme");
+ kill_kthread:
+       kthread_stop(nvme_thread);
+       return result;
+}
+
+static void __exit nvme_exit(void)
+{
+       pci_unregister_driver(&nvme_driver);
+       unregister_blkdev(nvme_major, "nvme");
+       kthread_stop(nvme_thread);
+}
+
+MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.8");
+module_init(nvme_init);
+module_exit(nvme_exit);
index 5a99bb3f255ae7c34fedc540949ab0163a15d5a5..f1a274994bb1fbdc1c8e0e54b26aebe58b161c2b 100644 (file)
@@ -124,7 +124,7 @@ config MV_XOR
 
 config MX3_IPU
        bool "MX3x Image Processing Unit support"
-       depends on SOC_IMX31 || SOC_IMX35
+       depends on ARCH_MXC
        select DMA_ENGINE
        default y
        help
@@ -187,6 +187,13 @@ config TIMB_DMA
        help
          Enable support for the Timberdale FPGA DMA engine.
 
+config SIRF_DMA
+       tristate "CSR SiRFprimaII DMA support"
+       depends on ARCH_PRIMA2
+       select DMA_ENGINE
+       help
+         Enable support for the CSR SiRFprimaII DMA engine.
+
 config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
        bool
 
@@ -201,26 +208,26 @@ config PL330_DMA
          platform_data for a dma-pl330 device.
 
 config PCH_DMA
-       tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
+       tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
        depends on PCI && X86
        select DMA_ENGINE
        help
          Enable support for Intel EG20T PCH DMA engine.
 
-         This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-         Output Hub), ML7213 and ML7223.
-         ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
-         for MP(Media Phone) use.
-         ML7213/ML7223 is companion chip for Intel Atom E6xx series.
-         ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+         This driver also can be used for LAPIS Semiconductor IOH(Input/
+         Output Hub), ML7213, ML7223 and ML7831.
+         ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+         for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+         ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
 config IMX_SDMA
        tristate "i.MX SDMA support"
-       depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
+       depends on ARCH_MXC
        select DMA_ENGINE
        help
          Support the i.MX SDMA engine. This engine is integrated into
-         Freescale i.MX25/31/35/51 chips.
+         Freescale i.MX25/31/35/51/53 chips.
 
 config IMX_DMA
        tristate "i.MX DMA support"
index 30cf3b1f0c5ca4ff633d9e50474feef0a49fc565..009a222e8283cba3929fd8763ae9abe9b4d6d694 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
 obj-$(CONFIG_IMX_DMA) += imx-dma.o
 obj-$(CONFIG_MXS_DMA) += mxs-dma.o
 obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
 obj-$(CONFIG_PL330_DMA) += pl330.o
 obj-$(CONFIG_PCH_DMA) += pch_dma.o
index 0698695e8bf9508e0c0dac5cdad44f7f8f508c07..8a281584458b582bbb872137323ff82cdaac2eb1 100644 (file)
@@ -854,8 +854,10 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
        int ret;
 
        /* Check if we already have a channel */
-       if (plchan->phychan)
-               return 0;
+       if (plchan->phychan) {
+               ch = plchan->phychan;
+               goto got_channel;
+       }
 
        ch = pl08x_get_phy_channel(pl08x, plchan);
        if (!ch) {
@@ -880,21 +882,22 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
                        return -EBUSY;
                }
                ch->signal = ret;
-
-               /* Assign the flow control signal to this channel */
-               if (txd->direction == DMA_TO_DEVICE)
-                       txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
-               else if (txd->direction == DMA_FROM_DEVICE)
-                       txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
        }
 
+       plchan->phychan = ch;
        dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
                 ch->id,
                 ch->signal,
                 plchan->name);
 
+got_channel:
+       /* Assign the flow control signal to this channel */
+       if (txd->direction == DMA_MEM_TO_DEV)
+               txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
+       else if (txd->direction == DMA_DEV_TO_MEM)
+               txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+
        plchan->phychan_hold++;
-       plchan->phychan = ch;
 
        return 0;
 }
@@ -1102,10 +1105,10 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 
        /* Transfer direction */
        plchan->runtime_direction = config->direction;
-       if (config->direction == DMA_TO_DEVICE) {
+       if (config->direction == DMA_MEM_TO_DEV) {
                addr_width = config->dst_addr_width;
                maxburst = config->dst_maxburst;
-       } else if (config->direction == DMA_FROM_DEVICE) {
+       } else if (config->direction == DMA_DEV_TO_MEM) {
                addr_width = config->src_addr_width;
                maxburst = config->src_maxburst;
        } else {
@@ -1136,7 +1139,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
        cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
        cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
 
-       if (plchan->runtime_direction == DMA_FROM_DEVICE) {
+       if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
                plchan->src_addr = config->src_addr;
                plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
                        pl08x_select_bus(plchan->cd->periph_buses,
@@ -1152,7 +1155,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
                "configured channel %s (%s) for %s, data width %d, "
                "maxburst %d words, LE, CCTL=0x%08x\n",
                dma_chan_name(chan), plchan->name,
-               (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+               (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
                addr_width,
                maxburst,
                cctl);
@@ -1322,7 +1325,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 
 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
                struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags)
 {
        struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
@@ -1354,10 +1357,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
         */
        txd->direction = direction;
 
-       if (direction == DMA_TO_DEVICE) {
+       if (direction == DMA_MEM_TO_DEV) {
                txd->cctl = plchan->dst_cctl;
                slave_addr = plchan->dst_addr;
-       } else if (direction == DMA_FROM_DEVICE) {
+       } else if (direction == DMA_DEV_TO_MEM) {
                txd->cctl = plchan->src_cctl;
                slave_addr = plchan->src_addr;
        } else {
@@ -1368,10 +1371,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
        }
 
        if (plchan->cd->device_fc)
-               tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
+               tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
                        PL080_FLOW_PER2MEM_PER;
        else
-               tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
+               tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
                        PL080_FLOW_PER2MEM;
 
        txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1387,7 +1390,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
                list_add_tail(&dsg->node, &txd->dsg_list);
 
                dsg->len = sg_dma_len(sg);
-               if (direction == DMA_TO_DEVICE) {
+               if (direction == DMA_MEM_TO_DEV) {
                        dsg->src_addr = sg_phys(sg);
                        dsg->dst_addr = slave_addr;
                } else {
index fcfa0a8b5c59956516060cd9ff5535f6524a819f..97f87b29b9f3db5ea0068e076e145a0a81d5ad08 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #include "at_hdmac_regs.h"
 
@@ -660,7 +662,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
  */
 static struct dma_async_tx_descriptor *
 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
@@ -678,7 +680,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 
        dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
                        sg_len,
-                       direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+                       direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
                        flags);
 
        if (unlikely(!atslave || !sg_len)) {
@@ -692,7 +694,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        ctrlb = ATC_IEN;
 
        switch (direction) {
-       case DMA_TO_DEVICE:
+       case DMA_MEM_TO_DEV:
                ctrla |=  ATC_DST_WIDTH(reg_width);
                ctrlb |=  ATC_DST_ADDR_MODE_FIXED
                        | ATC_SRC_ADDR_MODE_INCR
@@ -725,7 +727,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        total_len += len;
                }
                break;
-       case DMA_FROM_DEVICE:
+       case DMA_DEV_TO_MEM:
                ctrla |=  ATC_SRC_WIDTH(reg_width);
                ctrlb |=  ATC_DST_ADDR_MODE_INCR
                        | ATC_SRC_ADDR_MODE_FIXED
@@ -787,7 +789,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
  */
 static int
 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        if (period_len > (ATC_BTSIZE_MAX << reg_width))
                goto err_out;
@@ -795,7 +797,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
                goto err_out;
        if (unlikely(buf_addr & ((1 << reg_width) - 1)))
                goto err_out;
-       if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+       if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
                goto err_out;
 
        return 0;
@@ -810,7 +812,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
 static int
 atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
                unsigned int period_index, dma_addr_t buf_addr,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        u32             ctrla;
        unsigned int    reg_width = atslave->reg_width;
@@ -822,7 +824,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
                | period_len >> reg_width;
 
        switch (direction) {
-       case DMA_TO_DEVICE:
+       case DMA_MEM_TO_DEV:
                desc->lli.saddr = buf_addr + (period_len * period_index);
                desc->lli.daddr = atslave->tx_reg;
                desc->lli.ctrla = ctrla;
@@ -833,7 +835,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
                                | ATC_DIF(AT_DMA_PER_IF);
                break;
 
-       case DMA_FROM_DEVICE:
+       case DMA_DEV_TO_MEM:
                desc->lli.saddr = atslave->rx_reg;
                desc->lli.daddr = buf_addr + (period_len * period_index);
                desc->lli.ctrla = ctrla;
@@ -861,7 +863,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
  */
 static struct dma_async_tx_descriptor *
 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
        struct at_dma_slave     *atslave = chan->private;
@@ -872,7 +874,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
        unsigned int            i;
 
        dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
-                       direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+                       direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
                        buf_addr,
                        periods, buf_len, period_len);
 
@@ -1175,6 +1177,56 @@ static void atc_free_chan_resources(struct dma_chan *chan)
 
 /*--  Module Management  -----------------------------------------------*/
 
+/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
+static struct at_dma_platform_data at91sam9rl_config = {
+       .nr_channels = 2,
+};
+static struct at_dma_platform_data at91sam9g45_config = {
+       .nr_channels = 8,
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_dma_dt_ids[] = {
+       {
+               .compatible = "atmel,at91sam9rl-dma",
+               .data = &at91sam9rl_config,
+       }, {
+               .compatible = "atmel,at91sam9g45-dma",
+               .data = &at91sam9g45_config,
+       }, {
+               /* sentinel */
+       }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
+#endif
+
+static const struct platform_device_id atdma_devtypes[] = {
+       {
+               .name = "at91sam9rl_dma",
+               .driver_data = (unsigned long) &at91sam9rl_config,
+       }, {
+               .name = "at91sam9g45_dma",
+               .driver_data = (unsigned long) &at91sam9g45_config,
+       }, {
+               /* sentinel */
+       }
+};
+
+static inline struct at_dma_platform_data * __init at_dma_get_driver_data(
+                                               struct platform_device *pdev)
+{
+       if (pdev->dev.of_node) {
+               const struct of_device_id *match;
+               match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
+               if (match == NULL)
+                       return NULL;
+               return match->data;
+       }
+       return (struct at_dma_platform_data *)
+                       platform_get_device_id(pdev)->driver_data;
+}
+
 /**
  * at_dma_off - disable DMA controller
  * @atdma: the Atmel HDAMC device
@@ -1193,18 +1245,23 @@ static void at_dma_off(struct at_dma *atdma)
 
 static int __init at_dma_probe(struct platform_device *pdev)
 {
-       struct at_dma_platform_data *pdata;
        struct resource         *io;
        struct at_dma           *atdma;
        size_t                  size;
        int                     irq;
        int                     err;
        int                     i;
+       struct at_dma_platform_data *plat_dat;
 
-       /* get DMA Controller parameters from platform */
-       pdata = pdev->dev.platform_data;
-       if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
-               return -EINVAL;
+       /* setup platform data for each SoC */
+       dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
+       dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
+       dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
+
+       /* get DMA parameters from controller type */
+       plat_dat = at_dma_get_driver_data(pdev);
+       if (!plat_dat)
+               return -ENODEV;
 
        io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!io)
@@ -1215,14 +1272,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
                return irq;
 
        size = sizeof(struct at_dma);
-       size += pdata->nr_channels * sizeof(struct at_dma_chan);
+       size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
        atdma = kzalloc(size, GFP_KERNEL);
        if (!atdma)
                return -ENOMEM;
 
-       /* discover transaction capabilites from the platform data */
-       atdma->dma_common.cap_mask = pdata->cap_mask;
-       atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
+       /* discover transaction capabilities */
+       atdma->dma_common.cap_mask = plat_dat->cap_mask;
+       atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
 
        size = resource_size(io);
        if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
@@ -1268,7 +1325,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
 
        /* initialize channels related values */
        INIT_LIST_HEAD(&atdma->dma_common.channels);
-       for (i = 0; i < pdata->nr_channels; i++) {
+       for (i = 0; i < plat_dat->nr_channels; i++) {
                struct at_dma_chan      *atchan = &atdma->chan[i];
 
                atchan->chan_common.device = &atdma->dma_common;
@@ -1313,7 +1370,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
        dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
          dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
          dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
-         pdata->nr_channels);
+         plat_dat->nr_channels);
 
        dma_async_device_register(&atdma->dma_common);
 
@@ -1495,9 +1552,11 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = {
 static struct platform_driver at_dma_driver = {
        .remove         = __exit_p(at_dma_remove),
        .shutdown       = at_dma_shutdown,
+       .id_table       = atdma_devtypes,
        .driver = {
                .name   = "at_hdmac",
                .pm     = &at_dma_dev_pm_ops,
+               .of_match_table = of_match_ptr(atmel_dma_dt_ids),
        },
 };
 
index aa4c9aebab7cedec68a42aececeb218aaea0208b..dcaedfc181cf489d97bc0bd01444f7d875211fe8 100644 (file)
@@ -251,6 +251,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
 /**
  * struct at_dma - internal representation of an Atmel HDMA Controller
  * @chan_common: common dmaengine dma_device object members
+ * @atdma_devtype: identifier of DMA controller compatibility
  * @ch_regs: memory mapped register base
  * @clk: dma controller clock
  * @save_imr: interrupt mask register that is saved on suspend/resume cycle
index 4234f416ef115055cb425822a2c411a73dd41590..d65a718c0f9b1ae5819aef9af4c1c58200627a22 100644 (file)
@@ -39,7 +39,7 @@ struct coh901318_desc {
        struct scatterlist *sg;
        unsigned int sg_len;
        struct coh901318_lli *lli;
-       enum dma_data_direction dir;
+       enum dma_transfer_direction dir;
        unsigned long flags;
        u32 head_config;
        u32 head_ctrl;
@@ -1034,7 +1034,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 
 static struct dma_async_tx_descriptor *
 coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-                       unsigned int sg_len, enum dma_data_direction direction,
+                       unsigned int sg_len, enum dma_transfer_direction direction,
                        unsigned long flags)
 {
        struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -1077,7 +1077,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        ctrl_last |= cohc->runtime_ctrl;
        ctrl |= cohc->runtime_ctrl;
 
-       if (direction == DMA_TO_DEVICE) {
+       if (direction == DMA_MEM_TO_DEV) {
                u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
                        COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
 
@@ -1085,7 +1085,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                ctrl_chained |= tx_flags;
                ctrl_last |= tx_flags;
                ctrl |= tx_flags;
-       } else if (direction == DMA_FROM_DEVICE) {
+       } else if (direction == DMA_DEV_TO_MEM) {
                u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
                        COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
 
@@ -1274,11 +1274,11 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
        int i = 0;
 
        /* We only support mem to per or per to mem transfers */
-       if (config->direction == DMA_FROM_DEVICE) {
+       if (config->direction == DMA_DEV_TO_MEM) {
                addr = config->src_addr;
                addr_width = config->src_addr_width;
                maxburst = config->src_maxburst;
-       } else if (config->direction == DMA_TO_DEVICE) {
+       } else if (config->direction == DMA_MEM_TO_DEV) {
                addr = config->dst_addr;
                addr_width = config->dst_addr_width;
                maxburst = config->dst_maxburst;
index 9f7e0e6a7eea12e8487cef8fd1395c38cfbdc7a0..6c0e2d4c66827c7a179a55c8bc52375b932dfaa1 100644 (file)
@@ -7,11 +7,10 @@
  * Author: Per Friden <per.friden@stericsson.com>
  */
 
-#include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
-#include <linux/dmapool.h>
 #include <linux/memory.h>
 #include <linux/gfp.h>
+#include <linux/dmapool.h>
 #include <mach/coh901318.h>
 
 #include "coh901318_lli.h"
@@ -177,18 +176,18 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
                          struct coh901318_lli *lli,
                          dma_addr_t buf, unsigned int size,
                          dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
-                         enum dma_data_direction dir)
+                         enum dma_transfer_direction dir)
 {
        int s = size;
        dma_addr_t src;
        dma_addr_t dst;
 
 
-       if (dir == DMA_TO_DEVICE) {
+       if (dir == DMA_MEM_TO_DEV) {
                src = buf;
                dst = dev_addr;
 
-       } else if (dir == DMA_FROM_DEVICE) {
+       } else if (dir == DMA_DEV_TO_MEM) {
 
                src = dev_addr;
                dst = buf;
@@ -215,9 +214,9 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
 
                lli = coh901318_lli_next(lli);
 
-               if (dir == DMA_TO_DEVICE)
+               if (dir == DMA_MEM_TO_DEV)
                        src += block_size;
-               else if (dir == DMA_FROM_DEVICE)
+               else if (dir == DMA_DEV_TO_MEM)
                        dst += block_size;
        }
 
@@ -234,7 +233,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
                      struct scatterlist *sgl, unsigned int nents,
                      dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
                      u32 ctrl_last,
-                     enum dma_data_direction dir, u32 ctrl_irq_mask)
+                     enum dma_transfer_direction dir, u32 ctrl_irq_mask)
 {
        int i;
        struct scatterlist *sg;
@@ -249,9 +248,9 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
 
        spin_lock(&pool->lock);
 
-       if (dir == DMA_TO_DEVICE)
+       if (dir == DMA_MEM_TO_DEV)
                dst = dev_addr;
-       else if (dir == DMA_FROM_DEVICE)
+       else if (dir == DMA_DEV_TO_MEM)
                src = dev_addr;
        else
                goto err;
@@ -269,7 +268,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
                        ctrl_sg = ctrl ? ctrl : ctrl_last;
 
 
-               if (dir == DMA_TO_DEVICE)
+               if (dir == DMA_MEM_TO_DEV)
                        /* increment source address */
                        src = sg_phys(sg);
                else
@@ -293,7 +292,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
                        lli->src_addr = src;
                        lli->dst_addr = dst;
 
-                       if (dir == DMA_FROM_DEVICE)
+                       if (dir == DMA_DEV_TO_MEM)
                                dst += elem_size;
                        else
                                src += elem_size;
index 7a5c80990e9ef795def59abeb4e8b310e1245759..abff3714fdda73ed508d588c3b1b3fc923d08e89 100644 (file)
@@ -97,7 +97,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
                          struct coh901318_lli *lli,
                          dma_addr_t buf, unsigned int size,
                          dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
-                         enum dma_data_direction dir);
+                         enum dma_transfer_direction dir);
 
 /**
  * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
@@ -119,6 +119,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
                      struct scatterlist *sg, unsigned int nents,
                      dma_addr_t dev_addr, u32 ctrl_chained,
                      u32 ctrl, u32 ctrl_last,
-                     enum dma_data_direction dir, u32 ctrl_irq_mask);
+                     enum dma_transfer_direction dir, u32 ctrl_irq_mask);
 
 #endif /* COH901318_LLI_H */
index b48967b499da0bad30529501492def4695dc1f25..a6c6051ec85811041277c64d675810f14d9da851 100644 (file)
@@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device)
                !device->device_prep_dma_interrupt);
        BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
                !device->device_prep_dma_sg);
-       BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
-               !device->device_prep_slave_sg);
        BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
                !device->device_prep_dma_cyclic);
        BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
                !device->device_control);
+       BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
+               !device->device_prep_interleaved_dma);
 
        BUG_ON(!device->device_alloc_chan_resources);
        BUG_ON(!device->device_free_chan_resources);
index 9bfd6d3607180930692140b43f31db630c25bee6..9b592b02b5f49a3023cdc883322af7c451448a0a 100644 (file)
@@ -166,6 +166,38 @@ dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
        return cookie;
 }
 
+static void dwc_initialize(struct dw_dma_chan *dwc)
+{
+       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+       struct dw_dma_slave *dws = dwc->chan.private;
+       u32 cfghi = DWC_CFGH_FIFO_MODE;
+       u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+
+       if (dwc->initialized == true)
+               return;
+
+       if (dws) {
+               /*
+                * We need controller-specific data to set up slave
+                * transfers.
+                */
+               BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+
+               cfghi = dws->cfg_hi;
+               cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
+       }
+
+       channel_writel(dwc, CFG_LO, cfglo);
+       channel_writel(dwc, CFG_HI, cfghi);
+
+       /* Enable interrupts */
+       channel_set_bit(dw, MASK.XFER, dwc->mask);
+       channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+       channel_set_bit(dw, MASK.ERROR, dwc->mask);
+
+       dwc->initialized = true;
+}
+
 /*----------------------------------------------------------------------*/
 
 /* Called with dwc->lock held and bh disabled */
@@ -189,6 +221,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
                return;
        }
 
+       dwc_initialize(dwc);
+
        channel_writel(dwc, LLP, first->txd.phys);
        channel_writel(dwc, CTL_LO,
                        DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
@@ -696,7 +730,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 
 static struct dma_async_tx_descriptor *
 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags)
 {
        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
@@ -720,7 +754,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        prev = first = NULL;
 
        switch (direction) {
-       case DMA_TO_DEVICE:
+       case DMA_MEM_TO_DEV:
                ctllo = (DWC_DEFAULT_CTLLO(chan->private)
                                | DWC_CTLL_DST_WIDTH(reg_width)
                                | DWC_CTLL_DST_FIX
@@ -777,7 +811,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                                goto slave_sg_todev_fill_desc;
                }
                break;
-       case DMA_FROM_DEVICE:
+       case DMA_DEV_TO_MEM:
                ctllo = (DWC_DEFAULT_CTLLO(chan->private)
                                | DWC_CTLL_SRC_WIDTH(reg_width)
                                | DWC_CTLL_DST_INC
@@ -959,10 +993,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
        struct dw_dma           *dw = to_dw_dma(chan->device);
        struct dw_desc          *desc;
-       struct dw_dma_slave     *dws;
        int                     i;
-       u32                     cfghi;
-       u32                     cfglo;
        unsigned long           flags;
 
        dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -975,26 +1006,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
 
        dwc->completed = chan->cookie = 1;
 
-       cfghi = DWC_CFGH_FIFO_MODE;
-       cfglo = 0;
-
-       dws = chan->private;
-       if (dws) {
-               /*
-                * We need controller-specific data to set up slave
-                * transfers.
-                */
-               BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
-               cfghi = dws->cfg_hi;
-               cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
-       }
-
-       cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
-
-       channel_writel(dwc, CFG_LO, cfglo);
-       channel_writel(dwc, CFG_HI, cfghi);
-
        /*
         * NOTE: some controllers may have additional features that we
         * need to initialize here, like "scatter-gather" (which
@@ -1026,11 +1037,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
                i = ++dwc->descs_allocated;
        }
 
-       /* Enable interrupts */
-       channel_set_bit(dw, MASK.XFER, dwc->mask);
-       channel_set_bit(dw, MASK.BLOCK, dwc->mask);
-       channel_set_bit(dw, MASK.ERROR, dwc->mask);
-
        spin_unlock_irqrestore(&dwc->lock, flags);
 
        dev_dbg(chan2dev(chan),
@@ -1058,6 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
        spin_lock_irqsave(&dwc->lock, flags);
        list_splice_init(&dwc->free_list, &list);
        dwc->descs_allocated = 0;
+       dwc->initialized = false;
 
        /* Disable interrupts */
        channel_clear_bit(dw, MASK.XFER, dwc->mask);
@@ -1165,7 +1172,7 @@ EXPORT_SYMBOL(dw_dma_cyclic_stop);
  */
 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
                dma_addr_t buf_addr, size_t buf_len, size_t period_len,
-               enum dma_data_direction direction)
+               enum dma_transfer_direction direction)
 {
        struct dw_dma_chan              *dwc = to_dw_dma_chan(chan);
        struct dw_cyclic_desc           *cdesc;
@@ -1206,7 +1213,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
                goto out_err;
        if (unlikely(buf_addr & ((1 << reg_width) - 1)))
                goto out_err;
-       if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+       if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
                goto out_err;
 
        retval = ERR_PTR(-ENOMEM);
@@ -1228,7 +1235,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
                        goto out_err_desc_get;
 
                switch (direction) {
-               case DMA_TO_DEVICE:
+               case DMA_MEM_TO_DEV:
                        desc->lli.dar = dws->tx_reg;
                        desc->lli.sar = buf_addr + (period_len * i);
                        desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1239,7 +1246,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
                                        | DWC_CTLL_FC(dws->fc)
                                        | DWC_CTLL_INT_EN);
                        break;
-               case DMA_FROM_DEVICE:
+               case DMA_DEV_TO_MEM:
                        desc->lli.dar = buf_addr + (period_len * i);
                        desc->lli.sar = dws->rx_reg;
                        desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1335,6 +1342,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free);
 
 static void dw_dma_off(struct dw_dma *dw)
 {
+       int i;
+
        dma_writel(dw, CFG, 0);
 
        channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
@@ -1345,6 +1354,9 @@ static void dw_dma_off(struct dw_dma *dw)
 
        while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
                cpu_relax();
+
+       for (i = 0; i < dw->dma.chancnt; i++)
+               dw->chan[i].initialized = false;
 }
 
 static int __init dw_probe(struct platform_device *pdev)
@@ -1533,6 +1545,7 @@ static int dw_suspend_noirq(struct device *dev)
 
        dw_dma_off(platform_get_drvdata(pdev));
        clk_disable(dw->clk);
+
        return 0;
 }
 
index c3419518d701dbe3a0671cbc906a88c7ee3823a3..5eef6946a36713bd7413ecd29c1751291c4cc41c 100644 (file)
@@ -140,6 +140,7 @@ struct dw_dma_chan {
        u8                      mask;
        u8                      priority;
        bool                    paused;
+       bool                    initialized;
 
        spinlock_t              lock;
 
index b47e2b803fafdaedaf7c8a765858790ce869f2db..59e7a965772bfdff900aa1d82063d6f2510dd8c4 100644 (file)
@@ -246,6 +246,9 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
 static struct ep93xx_dma_desc *
 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
 {
+       if (list_empty(&edmac->active))
+               return NULL;
+
        return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
 }
 
@@ -263,16 +266,22 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
  */
 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
 {
+       struct ep93xx_dma_desc *desc;
+
        list_rotate_left(&edmac->active);
 
        if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
                return true;
 
+       desc = ep93xx_dma_get_active(edmac);
+       if (!desc)
+               return false;
+
        /*
         * If txd.cookie is set it means that we are back in the first
         * descriptor in the chain and hence done with it.
         */
-       return !ep93xx_dma_get_active(edmac)->txd.cookie;
+       return !desc->txd.cookie;
 }
 
 /*
@@ -327,10 +336,16 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
 
 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
 {
-       struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+       struct ep93xx_dma_desc *desc;
        u32 bus_addr;
 
-       if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
+       desc = ep93xx_dma_get_active(edmac);
+       if (!desc) {
+               dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
+               return;
+       }
+
+       if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
                bus_addr = desc->src_addr;
        else
                bus_addr = desc->dst_addr;
@@ -443,7 +458,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
                control = (5 << M2M_CONTROL_PWSC_SHIFT);
                control |= M2M_CONTROL_NO_HDSK;
 
-               if (data->direction == DMA_TO_DEVICE) {
+               if (data->direction == DMA_MEM_TO_DEV) {
                        control |= M2M_CONTROL_DAH;
                        control |= M2M_CONTROL_TM_TX;
                        control |= M2M_CONTROL_RSS_SSPTX;
@@ -459,11 +474,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
                 * This IDE part is totally untested. Values below are taken
                 * from the EP93xx Users's Guide and might not be correct.
                 */
-               control |= M2M_CONTROL_NO_HDSK;
-               control |= M2M_CONTROL_RSS_IDE;
-               control |= M2M_CONTROL_PW_16;
-
-               if (data->direction == DMA_TO_DEVICE) {
+               if (data->direction == DMA_MEM_TO_DEV) {
                        /* Worst case from the UG */
                        control = (3 << M2M_CONTROL_PWSC_SHIFT);
                        control |= M2M_CONTROL_DAH;
@@ -473,6 +484,10 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
                        control |= M2M_CONTROL_SAH;
                        control |= M2M_CONTROL_TM_RX;
                }
+
+               control |= M2M_CONTROL_NO_HDSK;
+               control |= M2M_CONTROL_RSS_IDE;
+               control |= M2M_CONTROL_PW_16;
                break;
 
        default:
@@ -491,7 +506,13 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
 
 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
 {
-       struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+       struct ep93xx_dma_desc *desc;
+
+       desc = ep93xx_dma_get_active(edmac);
+       if (!desc) {
+               dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
+               return;
+       }
 
        if (edmac->buffer == 0) {
                writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
@@ -669,24 +690,30 @@ static void ep93xx_dma_tasklet(unsigned long data)
 {
        struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
        struct ep93xx_dma_desc *desc, *d;
-       dma_async_tx_callback callback;
-       void *callback_param;
+       dma_async_tx_callback callback = NULL;
+       void *callback_param = NULL;
        LIST_HEAD(list);
 
        spin_lock_irq(&edmac->lock);
+       /*
+        * If dma_terminate_all() was called before we get to run, the active
+        * list has become empty. If that happens we aren't supposed to do
+        * anything more than call ep93xx_dma_advance_work().
+        */
        desc = ep93xx_dma_get_active(edmac);
-       if (desc->complete) {
-               edmac->last_completed = desc->txd.cookie;
-               list_splice_init(&edmac->active, &list);
+       if (desc) {
+               if (desc->complete) {
+                       edmac->last_completed = desc->txd.cookie;
+                       list_splice_init(&edmac->active, &list);
+               }
+               callback = desc->txd.callback;
+               callback_param = desc->txd.callback_param;
        }
        spin_unlock_irq(&edmac->lock);
 
        /* Pick up the next descriptor from the queue */
        ep93xx_dma_advance_work(edmac);
 
-       callback = desc->txd.callback;
-       callback_param = desc->txd.callback_param;
-
        /* Now we can release all the chained descriptors */
        list_for_each_entry_safe(desc, d, &list, node) {
                /*
@@ -706,13 +733,22 @@ static void ep93xx_dma_tasklet(unsigned long data)
 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
 {
        struct ep93xx_dma_chan *edmac = dev_id;
+       struct ep93xx_dma_desc *desc;
        irqreturn_t ret = IRQ_HANDLED;
 
        spin_lock(&edmac->lock);
 
+       desc = ep93xx_dma_get_active(edmac);
+       if (!desc) {
+               dev_warn(chan2dev(edmac),
+                        "got interrupt while active list is empty\n");
+               spin_unlock(&edmac->lock);
+               return IRQ_NONE;
+       }
+
        switch (edmac->edma->hw_interrupt(edmac)) {
        case INTERRUPT_DONE:
-               ep93xx_dma_get_active(edmac)->complete = true;
+               desc->complete = true;
                tasklet_schedule(&edmac->tasklet);
                break;
 
@@ -803,8 +839,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
                        switch (data->port) {
                        case EP93XX_DMA_SSP:
                        case EP93XX_DMA_IDE:
-                               if (data->direction != DMA_TO_DEVICE &&
-                                   data->direction != DMA_FROM_DEVICE)
+                               if (data->direction != DMA_MEM_TO_DEV &&
+                                   data->direction != DMA_DEV_TO_MEM)
                                        return -EINVAL;
                                break;
                        default:
@@ -952,7 +988,7 @@ ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
  */
 static struct dma_async_tx_descriptor *
 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-                        unsigned int sg_len, enum dma_data_direction dir,
+                        unsigned int sg_len, enum dma_transfer_direction dir,
                         unsigned long flags)
 {
        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
@@ -988,7 +1024,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        goto fail;
                }
 
-               if (dir == DMA_TO_DEVICE) {
+               if (dir == DMA_MEM_TO_DEV) {
                        desc->src_addr = sg_dma_address(sg);
                        desc->dst_addr = edmac->runtime_addr;
                } else {
@@ -1032,7 +1068,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 static struct dma_async_tx_descriptor *
 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
                           size_t buf_len, size_t period_len,
-                          enum dma_data_direction dir)
+                          enum dma_transfer_direction dir)
 {
        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
        struct ep93xx_dma_desc *desc, *first;
@@ -1065,7 +1101,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
                        goto fail;
                }
 
-               if (dir == DMA_TO_DEVICE) {
+               if (dir == DMA_MEM_TO_DEV) {
                        desc->src_addr = dma_addr + offset;
                        desc->dst_addr = edmac->runtime_addr;
                } else {
@@ -1133,12 +1169,12 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
                return -EINVAL;
 
        switch (config->direction) {
-       case DMA_FROM_DEVICE:
+       case DMA_DEV_TO_MEM:
                width = config->src_addr_width;
                addr = config->src_addr;
                break;
 
-       case DMA_TO_DEVICE:
+       case DMA_MEM_TO_DEV:
                width = config->dst_addr_width;
                addr = config->dst_addr;
                break;
index 8a781540590cdf1e76e79c74d137a647c02642f6..b98070c33ca9d3b8aa42d1fb5085da9772957733 100644 (file)
@@ -772,7 +772,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
  */
 static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
        struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
-       enum dma_data_direction direction, unsigned long flags)
+       enum dma_transfer_direction direction, unsigned long flags)
 {
        /*
         * This operation is not supported on the Freescale DMA controller
@@ -819,7 +819,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
                        return -ENXIO;
 
                /* we set the controller burst size depending on direction */
-               if (config->direction == DMA_TO_DEVICE)
+               if (config->direction == DMA_MEM_TO_DEV)
                        size = config->dst_addr_width * config->dst_maxburst;
                else
                        size = config->src_addr_width * config->src_maxburst;
index 4be55f9bb6c19c6b6165a6de82eea9b2d977c18d..e4383ee2c9acd015b72675557ad03a06357f4eda 100644 (file)
@@ -107,7 +107,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                imx_dma_disable(imxdmac->imxdma_channel);
                return 0;
        case DMA_SLAVE_CONFIG:
-               if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+               if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
                        imxdmac->per_address = dmaengine_cfg->src_addr;
                        imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
                        imxdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -224,7 +224,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
 
 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
                struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags)
 {
        struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
@@ -241,7 +241,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
                dma_length += sg->length;
        }
 
-       if (direction == DMA_FROM_DEVICE)
+       if (direction == DMA_DEV_TO_MEM)
                dmamode = DMA_MODE_READ;
        else
                dmamode = DMA_MODE_WRITE;
@@ -271,7 +271,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
 
 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
        struct imxdma_engine *imxdma = imxdmac->imxdma;
@@ -317,7 +317,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
        imxdmac->sg_list[periods].page_link =
                ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
 
-       if (direction == DMA_FROM_DEVICE)
+       if (direction == DMA_DEV_TO_MEM)
                dmamode = DMA_MODE_READ;
        else
                dmamode = DMA_MODE_WRITE;
index f993955a640c376342498d3336668348ecd78884..a8af379680c16776e19ae3018a9991243a3a8255 100644 (file)
@@ -247,7 +247,7 @@ struct sdma_engine;
 struct sdma_channel {
        struct sdma_engine              *sdma;
        unsigned int                    channel;
-       enum dma_data_direction         direction;
+       enum dma_transfer_direction             direction;
        enum sdma_peripheral_type       peripheral_type;
        unsigned int                    event_id0;
        unsigned int                    event_id1;
@@ -268,6 +268,8 @@ struct sdma_channel {
        struct dma_async_tx_descriptor  desc;
        dma_cookie_t                    last_completed;
        enum dma_status                 status;
+       unsigned int                    chn_count;
+       unsigned int                    chn_real_count;
 };
 
 #define IMX_DMA_SG_LOOP                (1 << 0)
@@ -503,6 +505,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
        struct sdma_buffer_descriptor *bd;
        int i, error = 0;
 
+       sdmac->chn_real_count = 0;
        /*
         * non loop mode. Iterate over all descriptors, collect
         * errors and call callback function
@@ -512,6 +515,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
 
                 if (bd->mode.status & (BD_DONE | BD_RROR))
                        error = -EIO;
+                sdmac->chn_real_count += bd->mode.count;
        }
 
        if (error)
@@ -519,9 +523,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
        else
                sdmac->status = DMA_SUCCESS;
 
+       sdmac->last_completed = sdmac->desc.cookie;
        if (sdmac->desc.callback)
                sdmac->desc.callback(sdmac->desc.callback_param);
-       sdmac->last_completed = sdmac->desc.cookie;
 }
 
 static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
@@ -650,7 +654,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
        struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
        int ret;
 
-       if (sdmac->direction == DMA_FROM_DEVICE) {
+       if (sdmac->direction == DMA_DEV_TO_MEM) {
                load_address = sdmac->pc_from_device;
        } else {
                load_address = sdmac->pc_to_device;
@@ -832,17 +836,18 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
 
 static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
+       unsigned long flags;
        struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
        struct sdma_engine *sdma = sdmac->sdma;
        dma_cookie_t cookie;
 
-       spin_lock_irq(&sdmac->lock);
+       spin_lock_irqsave(&sdmac->lock, flags);
 
        cookie = sdma_assign_cookie(sdmac);
 
        sdma_enable_channel(sdma, sdmac->channel);
 
-       spin_unlock_irq(&sdmac->lock);
+       spin_unlock_irqrestore(&sdmac->lock, flags);
 
        return cookie;
 }
@@ -911,7 +916,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
 
 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
                struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
@@ -941,6 +946,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
                goto err_out;
        }
 
+       sdmac->chn_count = 0;
        for_each_sg(sgl, sg, sg_len, i) {
                struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
                int param;
@@ -957,6 +963,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
                }
 
                bd->mode.count = count;
+               sdmac->chn_count += count;
 
                if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
                        ret =  -EINVAL;
@@ -1008,7 +1015,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 
 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
@@ -1093,7 +1100,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                sdma_disable_channel(sdmac);
                return 0;
        case DMA_SLAVE_CONFIG:
-               if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+               if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
                        sdmac->per_address = dmaengine_cfg->src_addr;
                        sdmac->watermark_level = dmaengine_cfg->src_maxburst;
                        sdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -1102,6 +1109,7 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                        sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
                        sdmac->word_size = dmaengine_cfg->dst_addr_width;
                }
+               sdmac->direction = dmaengine_cfg->direction;
                return sdma_config_channel(sdmac);
        default:
                return -ENOSYS;
@@ -1119,7 +1127,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
 
        last_used = chan->cookie;
 
-       dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
+       dma_set_tx_state(txstate, sdmac->last_completed, last_used,
+                       sdmac->chn_count - sdmac->chn_real_count);
 
        return sdmac->status;
 }
index 19a0c64d45d3643a99e7b8972c8c3f3a16fc6e5f..74f70aadf9e47313cb23e3bee95781c8f24a2e02 100644 (file)
@@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
  * callbacks but must be called with the lock held.
  */
 static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
-              struct intel_mid_dma_desc *desc)
+               struct intel_mid_dma_desc *desc)
+               __releases(&midc->lock) __acquires(&midc->lock)
 {
        struct dma_async_tx_descriptor  *txd = &desc->txd;
        dma_async_tx_callback callback_txd = NULL;
@@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
                        pci_pool_free(desc->lli_pool, desc->lli,
                                                desc->lli_phys);
                        pci_pool_destroy(desc->lli_pool);
+                       desc->lli = NULL;
                }
                list_move(&desc->desc_node, &midc->free_list);
                midc->busy = false;
@@ -395,10 +397,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
                                                        midc->dma->block_size);
                /*Populate SAR and DAR values*/
                sg_phy_addr = sg_phys(sg);
-               if (desc->dirn ==  DMA_TO_DEVICE) {
+               if (desc->dirn ==  DMA_MEM_TO_DEV) {
                        lli_bloc_desc->sar  = sg_phy_addr;
                        lli_bloc_desc->dar  = mids->dma_slave.dst_addr;
-               } else if (desc->dirn ==  DMA_FROM_DEVICE) {
+               } else if (desc->dirn ==  DMA_DEV_TO_MEM) {
                        lli_bloc_desc->sar  = mids->dma_slave.src_addr;
                        lli_bloc_desc->dar  = sg_phy_addr;
                }
@@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
 
        ret = dma_async_is_complete(cookie, last_complete, last_used);
        if (ret != DMA_SUCCESS) {
+               spin_lock_bh(&midc->lock);
                midc_scan_descriptors(to_middma_device(chan->device), midc);
+               spin_unlock_bh(&midc->lock);
 
                last_complete = midc->completed;
                last_used = chan->cookie;
@@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
                        pci_pool_free(desc->lli_pool, desc->lli,
                                                desc->lli_phys);
                        pci_pool_destroy(desc->lli_pool);
+                       desc->lli = NULL;
                }
                list_move(&desc->desc_node, &midc->free_list);
        }
@@ -632,13 +637,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
                if (midc->dma->pimr_mask) {
                        cfg_hi.cfgx.protctl = 0x0; /*default value*/
                        cfg_hi.cfgx.fifo_mode = 1;
-                       if (mids->dma_slave.direction == DMA_TO_DEVICE) {
+                       if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
                                cfg_hi.cfgx.src_per = 0;
                                if (mids->device_instance == 0)
                                        cfg_hi.cfgx.dst_per = 3;
                                if (mids->device_instance == 1)
                                        cfg_hi.cfgx.dst_per = 1;
-                       } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
+                       } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
                                if (mids->device_instance == 0)
                                        cfg_hi.cfgx.src_per = 2;
                                if (mids->device_instance == 1)
@@ -682,11 +687,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
                ctl_lo.ctlx.sinc = 0;
                ctl_lo.ctlx.dinc = 0;
        } else {
-               if (mids->dma_slave.direction == DMA_TO_DEVICE) {
+               if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
                        ctl_lo.ctlx.sinc = 0;
                        ctl_lo.ctlx.dinc = 2;
                        ctl_lo.ctlx.tt_fc = 1;
-               } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
+               } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
                        ctl_lo.ctlx.sinc = 2;
                        ctl_lo.ctlx.dinc = 0;
                        ctl_lo.ctlx.tt_fc = 2;
@@ -732,7 +737,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
  */
 static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
                        struct dma_chan *chan, struct scatterlist *sgl,
-                       unsigned int sg_len, enum dma_data_direction direction,
+                       unsigned int sg_len, enum dma_transfer_direction direction,
                        unsigned long flags)
 {
        struct intel_mid_dma_chan *midc = NULL;
@@ -868,7 +873,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
        pm_runtime_get_sync(&mid->pdev->dev);
 
        if (mid->state == SUSPENDED) {
-               if (dma_resume(mid->pdev)) {
+               if (dma_resume(&mid->pdev->dev)) {
                        pr_err("ERR_MDMA: resume failed");
                        return -EFAULT;
                }
@@ -1099,7 +1104,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
                                        LNW_PERIPHRAL_MASK_SIZE);
                if (dma->mask_reg == NULL) {
                        pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
-                       return -ENOMEM;
+                       err = -ENOMEM;
+                       goto err_ioremap;
                }
        } else
                dma->mask_reg = NULL;
@@ -1196,6 +1202,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
 err_engine:
        free_irq(pdev->irq, dma);
 err_irq:
+       if (dma->mask_reg)
+               iounmap(dma->mask_reg);
+err_ioremap:
        pci_pool_destroy(dma->dma_pool);
 err_dma_pool:
        pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
@@ -1337,8 +1346,9 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
 *
 * This function is called by OS when a power event occurs
 */
-int dma_suspend(struct pci_dev *pci, pm_message_t state)
+static int dma_suspend(struct device *dev)
 {
+       struct pci_dev *pci = to_pci_dev(dev);
        int i;
        struct middma_device *device = pci_get_drvdata(pci);
        pr_debug("MDMA: dma_suspend called\n");
@@ -1362,8 +1372,9 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
 *
 * This function is called by OS when a power event occurs
 */
-int dma_resume(struct pci_dev *pci)
+int dma_resume(struct device *dev)
 {
+       struct pci_dev *pci = to_pci_dev(dev);
        int ret;
        struct middma_device *device = pci_get_drvdata(pci);
 
@@ -1429,6 +1440,8 @@ static const struct dev_pm_ops intel_mid_dma_pm = {
        .runtime_suspend = dma_runtime_suspend,
        .runtime_resume = dma_runtime_resume,
        .runtime_idle = dma_runtime_idle,
+       .suspend = dma_suspend,
+       .resume = dma_resume,
 };
 
 static struct pci_driver intel_mid_dma_pci_driver = {
@@ -1437,8 +1450,6 @@ static struct pci_driver intel_mid_dma_pci_driver = {
        .probe          =       intel_mid_dma_probe,
        .remove         =       __devexit_p(intel_mid_dma_remove),
 #ifdef CONFIG_PM
-       .suspend = dma_suspend,
-       .resume = dma_resume,
        .driver = {
                .pm = &intel_mid_dma_pm,
        },
index aea5ee88ce035a1d432eb836ef6b4b938f163ab4..c83d35b97bd8e38a91330854c3d8b10acbe4cedf 100644 (file)
@@ -262,7 +262,7 @@ struct intel_mid_dma_desc {
        unsigned int                    lli_length;
        unsigned int                    current_lli;
        dma_addr_t                      next;
-       enum dma_data_direction         dirn;
+       enum dma_transfer_direction             dirn;
        enum dma_status                 status;
        enum dma_slave_buswidth         width; /*width of DMA txn*/
        enum intel_mid_dma_mode         cfg_mode; /*mode configuration*/
@@ -296,6 +296,6 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
 }
 
 
-int dma_resume(struct pci_dev *pci);
+int dma_resume(struct device *dev);
 
 #endif /*__INTEL_MID_DMAC_REGS_H__*/
index e03f811a83dd980e5aad9e2b25b0acdc291769f5..04be90b645b839e929512032a234089cfe2d082c 100644 (file)
@@ -1735,8 +1735,6 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
        spin_unlock_bh(&iop_chan->lock);
 }
 
-MODULE_ALIAS("platform:iop-adma");
-
 static struct platform_driver iop_adma_driver = {
        .probe          = iop_adma_probe,
        .remove         = __devexit_p(iop_adma_remove),
@@ -1746,19 +1744,9 @@ static struct platform_driver iop_adma_driver = {
        },
 };
 
-static int __init iop_adma_init (void)
-{
-       return platform_driver_register(&iop_adma_driver);
-}
-
-static void __exit iop_adma_exit (void)
-{
-       platform_driver_unregister(&iop_adma_driver);
-       return;
-}
-module_exit(iop_adma_exit);
-module_init(iop_adma_init);
+module_platform_driver(iop_adma_driver);
 
 MODULE_AUTHOR("Intel Corporation");
 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iop-adma");
index 0e5ef33f90a17ad75bebaa889a7ae7a46d75248c..6212b16e8cf21ea32ae05732ae1f3b1147b134b8 100644 (file)
@@ -312,7 +312,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
        case IPU_PIX_FMT_RGB565:
                params->ip.bpp  = 2;
                params->ip.pfs  = 4;
-               params->ip.npb  = 7;
+               params->ip.npb  = 15;
                params->ip.sat  = 2;            /* SAT = 32-bit access */
                params->ip.ofs0 = 0;            /* Red bit offset */
                params->ip.ofs1 = 5;            /* Green bit offset */
@@ -422,12 +422,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
        params->pp.nsb = 1;
 }
 
-static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
-                                       uint16_t burst_pixels)
-{
-       params->pp.npb = burst_pixels - 1;
-}
-
 static void ipu_ch_param_set_buffer(union chan_param_mem *params,
                                    dma_addr_t buf0, dma_addr_t buf1)
 {
@@ -690,23 +684,6 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
        ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
        ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
        ipu_ch_param_set_rotation(&params, rot_mode);
-       /* Some channels (rotation) have restriction on burst length */
-       switch (channel) {
-       case IDMAC_IC_7:        /* Hangs with burst 8, 16, other values
-                                  invalid - Table 44-30 */
-/*
-               ipu_ch_param_set_burst_size(&params, 8);
- */
-               break;
-       case IDMAC_SDC_0:
-       case IDMAC_SDC_1:
-               /* In original code only IPU_PIX_FMT_RGB565 was setting burst */
-               ipu_ch_param_set_burst_size(&params, 16);
-               break;
-       case IDMAC_IC_0:
-       default:
-               break;
-       }
 
        spin_lock_irqsave(&ipu->lock, flags);
 
@@ -1364,7 +1341,7 @@ static void ipu_gc_tasklet(unsigned long arg)
 /* Allocate and initialise a transfer descriptor. */
 static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
                struct scatterlist *sgl, unsigned int sg_len,
-               enum dma_data_direction direction, unsigned long tx_flags)
+               enum dma_transfer_direction direction, unsigned long tx_flags)
 {
        struct idmac_channel *ichan = to_idmac_chan(chan);
        struct idmac_tx_desc *desc = NULL;
@@ -1376,7 +1353,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
            chan->chan_id != IDMAC_IC_7)
                return NULL;
 
-       if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
+       if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
                dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
                return NULL;
        }
index 8ba4edc6185e904ea65b98537a1c09a2b38549bb..4d6d4cf669496ff0afc65d5dee450452b143a2c6 100644 (file)
@@ -835,17 +835,7 @@ static struct platform_driver mpc_dma_driver = {
        },
 };
 
-static int __init mpc_dma_init(void)
-{
-       return platform_driver_register(&mpc_dma_driver);
-}
-module_init(mpc_dma_init);
-
-static void __exit mpc_dma_exit(void)
-{
-       platform_driver_unregister(&mpc_dma_driver);
-}
-module_exit(mpc_dma_exit);
+module_platform_driver(mpc_dma_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
index fc903c0ed234eae754d06911b94f73f463a3b7eb..b06cd4ca626fb4fd93d5ab3b7fedddbfd6ea6c25 100644 (file)
@@ -44,7 +44,6 @@
 #define HW_APBHX_CTRL0                         0x000
 #define BM_APBH_CTRL0_APB_BURST8_EN            (1 << 29)
 #define BM_APBH_CTRL0_APB_BURST_EN             (1 << 28)
-#define BP_APBH_CTRL0_CLKGATE_CHANNEL          8
 #define BP_APBH_CTRL0_RESET_CHANNEL            16
 #define HW_APBHX_CTRL1                         0x010
 #define HW_APBHX_CTRL2                         0x020
@@ -111,6 +110,7 @@ struct mxs_dma_chan {
        int                             chan_irq;
        struct mxs_dma_ccw              *ccw;
        dma_addr_t                      ccw_phys;
+       int                             desc_count;
        dma_cookie_t                    last_completed;
        enum dma_status                 status;
        unsigned int                    flags;
@@ -130,23 +130,6 @@ struct mxs_dma_engine {
        struct mxs_dma_chan             mxs_chans[MXS_DMA_CHANNELS];
 };
 
-static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
-{
-       struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-       int chan_id = mxs_chan->chan.chan_id;
-       int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
-
-       /* enable apbh channel clock */
-       if (dma_is_apbh()) {
-               if (apbh_is_old())
-                       writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
-                               mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
-               else
-                       writel(1 << chan_id,
-                               mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
-       }
-}
-
 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
 {
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -165,9 +148,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
        int chan_id = mxs_chan->chan.chan_id;
 
-       /* clkgate needs to be enabled before writing other registers */
-       mxs_dma_clkgate(mxs_chan, 1);
-
        /* set cmd_addr up */
        writel(mxs_chan->ccw_phys,
                mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
@@ -178,9 +158,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
 
 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
 {
-       /* disable apbh channel clock */
-       mxs_dma_clkgate(mxs_chan, 0);
-
        mxs_chan->status = DMA_SUCCESS;
 }
 
@@ -268,7 +245,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
        /*
         * When both completion and error of termination bits set at the
         * same time, we do not take it as an error.  IOW, it only becomes
-        * an error we need to handler here in case of ether it's (1) an bus
+        * an error we need to handle here in case of either it's (1) a bus
         * error or (2) a termination error with no completion.
         */
        stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
@@ -338,10 +315,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
        if (ret)
                goto err_clk;
 
-       /* clkgate needs to be enabled for reset to finish */
-       mxs_dma_clkgate(mxs_chan, 1);
        mxs_dma_reset_chan(mxs_chan);
-       mxs_dma_clkgate(mxs_chan, 0);
 
        dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
        mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -377,7 +351,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
 
 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long append)
 {
        struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
@@ -386,7 +360,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
        struct scatterlist *sg;
        int i, j;
        u32 *pio;
-       static int idx;
+       int idx = append ? mxs_chan->desc_count : 0;
 
        if (mxs_chan->status == DMA_IN_PROGRESS && !append)
                return NULL;
@@ -417,7 +391,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                idx = 0;
        }
 
-       if (direction == DMA_NONE) {
+       if (direction == DMA_TRANS_NONE) {
                ccw = &mxs_chan->ccw[idx++];
                pio = (u32 *) sgl;
 
@@ -450,7 +424,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                        ccw->bits |= CCW_CHAIN;
                        ccw->bits |= CCW_HALT_ON_TERM;
                        ccw->bits |= CCW_TERM_FLUSH;
-                       ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+                       ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
                                        MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
                                        COMMAND);
 
@@ -462,6 +436,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                        }
                }
        }
+       mxs_chan->desc_count = idx;
 
        return &mxs_chan->desc;
 
@@ -472,7 +447,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
 
 static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -515,7 +490,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
                ccw->bits |= CCW_IRQ;
                ccw->bits |= CCW_HALT_ON_TERM;
                ccw->bits |= CCW_TERM_FLUSH;
-               ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+               ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
                                MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
 
                dma_addr += period_len;
@@ -523,6 +498,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
 
                i++;
        }
+       mxs_chan->desc_count = i;
 
        return &mxs_chan->desc;
 
@@ -539,8 +515,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 
        switch (cmd) {
        case DMA_TERMINATE_ALL:
-               mxs_dma_disable_chan(mxs_chan);
                mxs_dma_reset_chan(mxs_chan);
+               mxs_dma_disable_chan(mxs_chan);
                break;
        case DMA_PAUSE:
                mxs_dma_pause_chan(mxs_chan);
@@ -580,7 +556,7 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
 
        ret = clk_prepare_enable(mxs_dma->clk);
        if (ret)
-               goto err_out;
+               return ret;
 
        ret = mxs_reset_block(mxs_dma->base);
        if (ret)
@@ -604,11 +580,8 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
        writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
                mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
 
-       clk_disable_unprepare(mxs_dma->clk);
-
-       return 0;
-
 err_out:
+       clk_disable_unprepare(mxs_dma->clk);
        return ret;
 }
 
index a6d0e3dbed0748f1d666f5972b4109dfb043a3e2..823f58179f9d46a8a169dbbaf7551b1d91ec5f28 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Topcliff PCH DMA controller driver
  * Copyright (c) 2010 Intel Corporation
- * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -99,7 +99,7 @@ struct pch_dma_desc {
 struct pch_dma_chan {
        struct dma_chan         chan;
        void __iomem *membase;
-       enum dma_data_direction dir;
+       enum dma_transfer_direction dir;
        struct tasklet_struct   tasklet;
        unsigned long           err_status;
 
@@ -224,7 +224,7 @@ static void pdc_set_dir(struct dma_chan *chan)
                mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
                                       (DMA_CTL0_BITS_PER_CH * chan->chan_id));
                val &= mask_mode;
-               if (pd_chan->dir == DMA_TO_DEVICE)
+               if (pd_chan->dir == DMA_MEM_TO_DEV)
                        val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
                                       DMA_CTL0_DIR_SHIFT_BITS);
                else
@@ -242,7 +242,7 @@ static void pdc_set_dir(struct dma_chan *chan)
                mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
                                                 (DMA_CTL0_BITS_PER_CH * ch));
                val &= mask_mode;
-               if (pd_chan->dir == DMA_TO_DEVICE)
+               if (pd_chan->dir == DMA_MEM_TO_DEV)
                        val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
                                       DMA_CTL0_DIR_SHIFT_BITS);
                else
@@ -607,7 +607,7 @@ static void pd_issue_pending(struct dma_chan *chan)
 
 static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
                        struct scatterlist *sgl, unsigned int sg_len,
-                       enum dma_data_direction direction, unsigned long flags)
+                       enum dma_transfer_direction direction, unsigned long flags)
 {
        struct pch_dma_chan *pd_chan = to_pd_chan(chan);
        struct pch_dma_slave *pd_slave = chan->private;
@@ -623,9 +623,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
                return NULL;
        }
 
-       if (direction == DMA_FROM_DEVICE)
+       if (direction == DMA_DEV_TO_MEM)
                reg = pd_slave->rx_reg;
-       else if (direction == DMA_TO_DEVICE)
+       else if (direction == DMA_MEM_TO_DEV)
                reg = pd_slave->tx_reg;
        else
                return NULL;
@@ -1018,6 +1018,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
 #define PCI_DEVICE_ID_ML7223_DMA2_4CH  0x800E
 #define PCI_DEVICE_ID_ML7223_DMA3_4CH  0x8017
 #define PCI_DEVICE_ID_ML7223_DMA4_4CH  0x803B
+#define PCI_DEVICE_ID_ML7831_DMA1_8CH  0x8810
+#define PCI_DEVICE_ID_ML7831_DMA2_4CH  0x8815
 
 DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
@@ -1030,6 +1032,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
+       { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
+       { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
        { 0, },
 };
 
@@ -1057,7 +1061,7 @@ static void __exit pch_dma_exit(void)
 module_init(pch_dma_init);
 module_exit(pch_dma_exit);
 
-MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
+MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
                   "DMA controller driver");
 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
 MODULE_LICENSE("GPL v2");
index 09adcfcd953e6841e840fb9caa663212c20c03f7..b8ec03ee8e22e495e633ff95bf9faf0d02756dac 100644 (file)
@@ -350,14 +350,14 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
        case DMA_SLAVE_CONFIG:
                slave_config = (struct dma_slave_config *)arg;
 
-               if (slave_config->direction == DMA_TO_DEVICE) {
+               if (slave_config->direction == DMA_MEM_TO_DEV) {
                        if (slave_config->dst_addr)
                                pch->fifo_addr = slave_config->dst_addr;
                        if (slave_config->dst_addr_width)
                                pch->burst_sz = __ffs(slave_config->dst_addr_width);
                        if (slave_config->dst_maxburst)
                                pch->burst_len = slave_config->dst_maxburst;
-               } else if (slave_config->direction == DMA_FROM_DEVICE) {
+               } else if (slave_config->direction == DMA_DEV_TO_MEM) {
                        if (slave_config->src_addr)
                                pch->fifo_addr = slave_config->src_addr;
                        if (slave_config->src_addr_width)
@@ -621,7 +621,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
 
 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        struct dma_pl330_desc *desc;
        struct dma_pl330_chan *pch = to_pchan(chan);
@@ -636,14 +636,14 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
        }
 
        switch (direction) {
-       case DMA_TO_DEVICE:
+       case DMA_MEM_TO_DEV:
                desc->rqcfg.src_inc = 1;
                desc->rqcfg.dst_inc = 0;
                desc->req.rqtype = MEMTODEV;
                src = dma_addr;
                dst = pch->fifo_addr;
                break;
-       case DMA_FROM_DEVICE:
+       case DMA_DEV_TO_MEM:
                desc->rqcfg.src_inc = 0;
                desc->rqcfg.dst_inc = 1;
                desc->req.rqtype = DEVTOMEM;
@@ -710,7 +710,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
 
 static struct dma_async_tx_descriptor *
 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flg)
 {
        struct dma_pl330_desc *first, *desc = NULL;
@@ -759,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                else
                        list_add_tail(&desc->node, &first->node);
 
-               if (direction == DMA_TO_DEVICE) {
+               if (direction == DMA_MEM_TO_DEV) {
                        desc->rqcfg.src_inc = 1;
                        desc->rqcfg.dst_inc = 0;
                        desc->req.rqtype = MEMTODEV;
@@ -834,17 +834,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 
        amba_set_drvdata(adev, pdmac);
 
-#ifdef CONFIG_PM_RUNTIME
-       /* to use the runtime PM helper functions */
-       pm_runtime_enable(&adev->dev);
-
-       /* enable the power domain */
-       if (pm_runtime_get_sync(&adev->dev)) {
-               dev_err(&adev->dev, "failed to get runtime pm\n");
-               ret = -ENODEV;
-               goto probe_err1;
-       }
-#else
+#ifndef CONFIG_PM_RUNTIME
        /* enable dma clk */
        clk_enable(pdmac->clk);
 #endif
@@ -977,10 +967,7 @@ static int __devexit pl330_remove(struct amba_device *adev)
        res = &adev->res;
        release_mem_region(res->start, resource_size(res));
 
-#ifdef CONFIG_PM_RUNTIME
-       pm_runtime_put(&adev->dev);
-       pm_runtime_disable(&adev->dev);
-#else
+#ifndef CONFIG_PM_RUNTIME
        clk_disable(pdmac->clk);
 #endif
 
index 81809c2b46abef271cb2ac90408914990d0376a7..54043cd831c8e5b01875e08da6e8870343406134 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/interrupt.h>
 #include <linux/dmaengine.h>
 #include <linux/delay.h>
-#include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/sh_dma.h>
@@ -57,6 +56,15 @@ static LIST_HEAD(sh_dmae_devices);
 static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
 
 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
+static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
+
+static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+       struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+       __raw_writel(data, shdev->chan_reg +
+                    shdev->pdata->channel[sh_dc->id].chclr_offset);
+}
 
 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
 {
@@ -129,6 +137,15 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
 
        dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
 
+       if (shdev->pdata->chclr_present) {
+               int i;
+               for (i = 0; i < shdev->pdata->channel_num; i++) {
+                       struct sh_dmae_chan *sh_chan = shdev->chan[i];
+                       if (sh_chan)
+                               chclr_write(sh_chan, 0);
+               }
+       }
+
        dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
 
        dmaor = dmaor_read(shdev);
@@ -139,6 +156,10 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
                dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
                return -EIO;
        }
+       if (shdev->pdata->dmaor_init & ~dmaor)
+               dev_warn(shdev->common.dev,
+                        "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
+                        dmaor, shdev->pdata->dmaor_init);
        return 0;
 }
 
@@ -259,8 +280,6 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
        return 0;
 }
 
-static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
-
 static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
 {
        struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
@@ -340,6 +359,8 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
                                sh_chan_xfer_ld_queue(sh_chan);
                        sh_chan->pm_state = DMAE_PM_ESTABLISHED;
                }
+       } else {
+               sh_chan->pm_state = DMAE_PM_PENDING;
        }
 
        spin_unlock_irq(&sh_chan->desc_lock);
@@ -479,19 +500,19 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
  * @sh_chan:   DMA channel
  * @flags:     DMA transfer flags
  * @dest:      destination DMA address, incremented when direction equals
- *             DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
+ *             DMA_DEV_TO_MEM
  * @src:       source DMA address, incremented when direction equals
- *             DMA_TO_DEVICE or DMA_BIDIRECTIONAL
+ *             DMA_MEM_TO_DEV
  * @len:       DMA transfer length
  * @first:     if NULL, set to the current descriptor and cookie set to -EBUSY
  * @direction: needed for slave DMA to decide which address to keep constant,
- *             equals DMA_BIDIRECTIONAL for MEMCPY
+ *             equals DMA_MEM_TO_MEM for MEMCPY
  * Returns 0 or an error
  * Locks: called with desc_lock held
  */
 static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
        unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
-       struct sh_desc **first, enum dma_data_direction direction)
+       struct sh_desc **first, enum dma_transfer_direction direction)
 {
        struct sh_desc *new;
        size_t copy_size;
@@ -531,9 +552,9 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
        new->direction = direction;
 
        *len -= copy_size;
-       if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
+       if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
                *src += copy_size;
-       if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
+       if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
                *dest += copy_size;
 
        return new;
@@ -546,12 +567,12 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
  * converted to scatter-gather to guarantee consistent locking and a correct
  * list manipulation. For slave DMA direction carries the usual meaning, and,
  * logically, the SG list is RAM and the addr variable contains slave address,
- * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
  * and the SG list contains only one element and points at the source buffer.
  */
 static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
        struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
-       enum dma_data_direction direction, unsigned long flags)
+       enum dma_transfer_direction direction, unsigned long flags)
 {
        struct scatterlist *sg;
        struct sh_desc *first = NULL, *new = NULL /* compiler... */;
@@ -592,7 +613,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
                        dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
                                i, sg, len, (unsigned long long)sg_addr);
 
-                       if (direction == DMA_FROM_DEVICE)
+                       if (direction == DMA_DEV_TO_MEM)
                                new = sh_dmae_add_desc(sh_chan, flags,
                                                &sg_addr, addr, &len, &first,
                                                direction);
@@ -646,13 +667,13 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
        sg_dma_address(&sg) = dma_src;
        sg_dma_len(&sg) = len;
 
-       return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
+       return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
                               flags);
 }
 
 static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
        struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
-       enum dma_data_direction direction, unsigned long flags)
+       enum dma_transfer_direction direction, unsigned long flags)
 {
        struct sh_dmae_slave *param;
        struct sh_dmae_chan *sh_chan;
@@ -996,7 +1017,7 @@ static void dmae_do_tasklet(unsigned long data)
        spin_lock_irq(&sh_chan->desc_lock);
        list_for_each_entry(desc, &sh_chan->ld_queue, node) {
                if (desc->mark == DESC_SUBMITTED &&
-                   ((desc->direction == DMA_FROM_DEVICE &&
+                   ((desc->direction == DMA_DEV_TO_MEM &&
                      (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
                     (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
                        dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
@@ -1225,6 +1246,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, shdev);
 
+       shdev->common.dev = &pdev->dev;
+
        pm_runtime_enable(&pdev->dev);
        pm_runtime_get_sync(&pdev->dev);
 
@@ -1254,7 +1277,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
        shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
        shdev->common.device_control = sh_dmae_control;
 
-       shdev->common.dev = &pdev->dev;
        /* Default transfer size of 32 bytes requires 32-byte alignment */
        shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
 
@@ -1435,22 +1457,17 @@ static int sh_dmae_runtime_resume(struct device *dev)
 #ifdef CONFIG_PM
 static int sh_dmae_suspend(struct device *dev)
 {
-       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
-       int i;
-
-       for (i = 0; i < shdev->pdata->channel_num; i++) {
-               struct sh_dmae_chan *sh_chan = shdev->chan[i];
-               if (sh_chan->descs_allocated)
-                       sh_chan->pm_error = pm_runtime_put_sync(dev);
-       }
-
        return 0;
 }
 
 static int sh_dmae_resume(struct device *dev)
 {
        struct sh_dmae_device *shdev = dev_get_drvdata(dev);
-       int i;
+       int i, ret;
+
+       ret = sh_dmae_rst(shdev);
+       if (ret < 0)
+               dev_err(dev, "Failed to reset!\n");
 
        for (i = 0; i < shdev->pdata->channel_num; i++) {
                struct sh_dmae_chan *sh_chan = shdev->chan[i];
@@ -1459,9 +1476,6 @@ static int sh_dmae_resume(struct device *dev)
                if (!sh_chan->descs_allocated)
                        continue;
 
-               if (!sh_chan->pm_error)
-                       pm_runtime_get_sync(dev);
-
                if (param) {
                        const struct sh_dmae_slave_config *cfg = param->config;
                        dmae_set_dmars(sh_chan, cfg->mid_rid);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
new file mode 100644 (file)
index 0000000..2333810
--- /dev/null
@@ -0,0 +1,707 @@
+/*
+ * DMA controller driver for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/sirfsoc_dma.h>
+
+#define SIRFSOC_DMA_DESCRIPTORS                 16
+#define SIRFSOC_DMA_CHANNELS                    16
+
+#define SIRFSOC_DMA_CH_ADDR                     0x00
+#define SIRFSOC_DMA_CH_XLEN                     0x04
+#define SIRFSOC_DMA_CH_YLEN                     0x08
+#define SIRFSOC_DMA_CH_CTRL                     0x0C
+
+#define SIRFSOC_DMA_WIDTH_0                     0x100
+#define SIRFSOC_DMA_CH_VALID                    0x140
+#define SIRFSOC_DMA_CH_INT                      0x144
+#define SIRFSOC_DMA_INT_EN                      0x148
+#define SIRFSOC_DMA_CH_LOOP_CTRL                0x150
+
+#define SIRFSOC_DMA_MODE_CTRL_BIT               4
+#define SIRFSOC_DMA_DIR_CTRL_BIT                5
+
+/* xlen and dma_width register is in 4 bytes boundary */
+#define SIRFSOC_DMA_WORD_LEN                   4
+
+struct sirfsoc_dma_desc {
+       struct dma_async_tx_descriptor  desc;
+       struct list_head                node;
+
+       /* SiRFprimaII 2D-DMA parameters */
+
+       int             xlen;           /* DMA xlen */
+       int             ylen;           /* DMA ylen */
+       int             width;          /* DMA width */
+       int             dir;
+       bool            cyclic;         /* is loop DMA? */
+       u32             addr;           /* DMA buffer address */
+};
+
+struct sirfsoc_dma_chan {
+       struct dma_chan                 chan;
+       struct list_head                free;
+       struct list_head                prepared;
+       struct list_head                queued;
+       struct list_head                active;
+       struct list_head                completed;
+       dma_cookie_t                    completed_cookie;
+       unsigned long                   happened_cyclic;
+       unsigned long                   completed_cyclic;
+
+       /* Lock for this structure */
+       spinlock_t                      lock;
+
+       int                             mode;
+};
+
+struct sirfsoc_dma {
+       struct dma_device               dma;
+       struct tasklet_struct           tasklet;
+       struct sirfsoc_dma_chan         channels[SIRFSOC_DMA_CHANNELS];
+       void __iomem                    *base;
+       int                             irq;
+};
+
+#define DRV_NAME       "sirfsoc_dma"
+
+/* Convert struct dma_chan to struct sirfsoc_dma_chan */
+static inline
+struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct sirfsoc_dma_chan, chan);
+}
+
+/* Convert struct dma_chan to struct sirfsoc_dma */
+static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
+{
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
+       return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
+}
+
+/* Execute all queued DMA descriptors */
+static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
+{
+       struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+       int cid = schan->chan.chan_id;
+       struct sirfsoc_dma_desc *sdesc = NULL;
+
+       /*
+        * lock has been held by functions calling this, so we don't hold
+        * lock again
+        */
+
+       sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
+               node);
+       /* Move the first queued descriptor to active list */
+       list_move_tail(&schan->queued, &schan->active);
+
+       /* Start the DMA transfer */
+       writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
+               cid * 4);
+       writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
+               (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
+               sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
+       writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
+               SIRFSOC_DMA_CH_XLEN);
+       writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
+               SIRFSOC_DMA_CH_YLEN);
+       writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
+               (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+
+       /*
+        * writel has an implict memory write barrier to make sure data is
+        * flushed into memory before starting DMA
+        */
+       writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
+
+       if (sdesc->cyclic) {
+               writel((1 << cid) | 1 << (cid + 16) |
+                       readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
+                       sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+               schan->happened_cyclic = schan->completed_cyclic = 0;
+       }
+}
+
+/* Interrupt handler */
+static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
+{
+       struct sirfsoc_dma *sdma = data;
+       struct sirfsoc_dma_chan *schan;
+       struct sirfsoc_dma_desc *sdesc = NULL;
+       u32 is;
+       int ch;
+
+       is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
+       while ((ch = fls(is) - 1) >= 0) {
+               is &= ~(1 << ch);
+               writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
+               schan = &sdma->channels[ch];
+
+               spin_lock(&schan->lock);
+
+               sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+                       node);
+               if (!sdesc->cyclic) {
+                       /* Execute queued descriptors */
+                       list_splice_tail_init(&schan->active, &schan->completed);
+                       if (!list_empty(&schan->queued))
+                               sirfsoc_dma_execute(schan);
+               } else
+                       schan->happened_cyclic++;
+
+               spin_unlock(&schan->lock);
+       }
+
+       /* Schedule tasklet */
+       tasklet_schedule(&sdma->tasklet);
+
+       return IRQ_HANDLED;
+}
+
+/* process completed descriptors */
+static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
+{
+       dma_cookie_t last_cookie = 0;
+       struct sirfsoc_dma_chan *schan;
+       struct sirfsoc_dma_desc *sdesc;
+       struct dma_async_tx_descriptor *desc;
+       unsigned long flags;
+       unsigned long happened_cyclic;
+       LIST_HEAD(list);
+       int i;
+
+       for (i = 0; i < sdma->dma.chancnt; i++) {
+               schan = &sdma->channels[i];
+
+               /* Get all completed descriptors */
+               spin_lock_irqsave(&schan->lock, flags);
+               if (!list_empty(&schan->completed)) {
+                       list_splice_tail_init(&schan->completed, &list);
+                       spin_unlock_irqrestore(&schan->lock, flags);
+
+                       /* Execute callbacks and run dependencies */
+                       list_for_each_entry(sdesc, &list, node) {
+                               desc = &sdesc->desc;
+
+                               if (desc->callback)
+                                       desc->callback(desc->callback_param);
+
+                               last_cookie = desc->cookie;
+                               dma_run_dependencies(desc);
+                       }
+
+                       /* Free descriptors */
+                       spin_lock_irqsave(&schan->lock, flags);
+                       list_splice_tail_init(&list, &schan->free);
+                       schan->completed_cookie = last_cookie;
+                       spin_unlock_irqrestore(&schan->lock, flags);
+               } else {
+                       /* for cyclic channel, desc is always in active list */
+                       sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+                               node);
+
+                       if (!sdesc || (sdesc && !sdesc->cyclic)) {
+                               /* without active cyclic DMA */
+                               spin_unlock_irqrestore(&schan->lock, flags);
+                               continue;
+                       }
+
+                       /* cyclic DMA */
+                       happened_cyclic = schan->happened_cyclic;
+                       spin_unlock_irqrestore(&schan->lock, flags);
+
+                       desc = &sdesc->desc;
+                       while (happened_cyclic != schan->completed_cyclic) {
+                               if (desc->callback)
+                                       desc->callback(desc->callback_param);
+                               schan->completed_cyclic++;
+                       }
+               }
+       }
+}
+
+/* DMA Tasklet */
+static void sirfsoc_dma_tasklet(unsigned long data)
+{
+       struct sirfsoc_dma *sdma = (void *)data;
+
+       sirfsoc_dma_process_completed(sdma);
+}
+
+/* Submit descriptor to hardware */
+static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
+       struct sirfsoc_dma_desc *sdesc;
+       unsigned long flags;
+       dma_cookie_t cookie;
+
+       sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
+
+       spin_lock_irqsave(&schan->lock, flags);
+
+       /* Move descriptor to queue */
+       list_move_tail(&sdesc->node, &schan->queued);
+
+       /* Update cookie */
+       cookie = schan->chan.cookie + 1;
+       if (cookie <= 0)
+               cookie = 1;
+
+       schan->chan.cookie = cookie;
+       sdesc->desc.cookie = cookie;
+
+       spin_unlock_irqrestore(&schan->lock, flags);
+
+       return cookie;
+}
+
+static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
+       struct dma_slave_config *config)
+{
+       unsigned long flags;
+
+       if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+               (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
+               return -EINVAL;
+
+       spin_lock_irqsave(&schan->lock, flags);
+       schan->mode = (config->src_maxburst == 4 ? 1 : 0);
+       spin_unlock_irqrestore(&schan->lock, flags);
+
+       return 0;
+}
+
+static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
+{
+       struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+       int cid = schan->chan.chan_id;
+       unsigned long flags;
+
+       writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
+               ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+       writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
+
+       writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+               & ~((1 << cid) | 1 << (cid + 16)),
+                       sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+
+       spin_lock_irqsave(&schan->lock, flags);
+       list_splice_tail_init(&schan->active, &schan->free);
+       list_splice_tail_init(&schan->queued, &schan->free);
+       spin_unlock_irqrestore(&schan->lock, flags);
+
+       return 0;
+}
+
+static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+       unsigned long arg)
+{
+       struct dma_slave_config *config;
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+
+       switch (cmd) {
+       case DMA_TERMINATE_ALL:
+               return sirfsoc_dma_terminate_all(schan);
+       case DMA_SLAVE_CONFIG:
+               config = (struct dma_slave_config *)arg;
+               return sirfsoc_dma_slave_config(schan, config);
+
+       default:
+               break;
+       }
+
+       return -ENOSYS;
+}
+
+/* Alloc channel resources */
+static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+       struct sirfsoc_dma_desc *sdesc;
+       unsigned long flags;
+       LIST_HEAD(descs);
+       int i;
+
+       /* Alloc descriptors for this channel */
+       for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
+               sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
+               if (!sdesc) {
+                       dev_notice(sdma->dma.dev, "Memory allocation error. "
+                               "Allocated only %u descriptors\n", i);
+                       break;
+               }
+
+               dma_async_tx_descriptor_init(&sdesc->desc, chan);
+               sdesc->desc.flags = DMA_CTRL_ACK;
+               sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
+
+               list_add_tail(&sdesc->node, &descs);
+       }
+
+       /* Return error only if no descriptors were allocated */
+       if (i == 0)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&schan->lock, flags);
+
+       list_splice_tail_init(&descs, &schan->free);
+       spin_unlock_irqrestore(&schan->lock, flags);
+
+       return i;
+}
+
+/* Free channel resources */
+static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+       struct sirfsoc_dma_desc *sdesc, *tmp;
+       unsigned long flags;
+       LIST_HEAD(descs);
+
+       spin_lock_irqsave(&schan->lock, flags);
+
+       /* Channel must be idle */
+       BUG_ON(!list_empty(&schan->prepared));
+       BUG_ON(!list_empty(&schan->queued));
+       BUG_ON(!list_empty(&schan->active));
+       BUG_ON(!list_empty(&schan->completed));
+
+       /* Move data */
+       list_splice_tail_init(&schan->free, &descs);
+
+       spin_unlock_irqrestore(&schan->lock, flags);
+
+       /* Free descriptors */
+       list_for_each_entry_safe(sdesc, tmp, &descs, node)
+               kfree(sdesc);
+}
+
+/* Send pending descriptor to hardware */
+static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
+{
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&schan->lock, flags);
+
+       if (list_empty(&schan->active) && !list_empty(&schan->queued))
+               sirfsoc_dma_execute(schan);
+
+       spin_unlock_irqrestore(&schan->lock, flags);
+}
+
+/* Check request completion status */
+static enum dma_status
+sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+       struct dma_tx_state *txstate)
+{
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+       unsigned long flags;
+       dma_cookie_t last_used;
+       dma_cookie_t last_complete;
+
+       spin_lock_irqsave(&schan->lock, flags);
+       last_used = schan->chan.cookie;
+       last_complete = schan->completed_cookie;
+       spin_unlock_irqrestore(&schan->lock, flags);
+
+       dma_set_tx_state(txstate, last_complete, last_used, 0);
+       return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
+       struct dma_chan *chan, struct dma_interleaved_template *xt,
+       unsigned long flags)
+{
+       struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+       struct sirfsoc_dma_desc *sdesc = NULL;
+       unsigned long iflags;
+       int ret;
+
+       if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
+               ret = -EINVAL;
+               goto err_dir;
+       }
+
+       /* Get free descriptor */
+       spin_lock_irqsave(&schan->lock, iflags);
+       if (!list_empty(&schan->free)) {
+               sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+                       node);
+               list_del(&sdesc->node);
+       }
+       spin_unlock_irqrestore(&schan->lock, iflags);
+
+       if (!sdesc) {
+               /* try to free completed descriptors */
+               sirfsoc_dma_process_completed(sdma);
+               ret = 0;
+               goto no_desc;
+       }
+
+       /* Place descriptor in prepared list */
+       spin_lock_irqsave(&schan->lock, iflags);
+
+       /*
+        * Number of chunks in a frame can only be 1 for prima2
+        * and ylen (number of frame - 1) must be at least 0
+        */
+       if ((xt->frame_size == 1) && (xt->numf > 0)) {
+               sdesc->cyclic = 0;
+               sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
+               sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
+                               SIRFSOC_DMA_WORD_LEN;
+               sdesc->ylen = xt->numf - 1;
+               if (xt->dir == DMA_MEM_TO_DEV) {
+                       sdesc->addr = xt->src_start;
+                       sdesc->dir = 1;
+               } else {
+                       sdesc->addr = xt->dst_start;
+                       sdesc->dir = 0;
+               }
+
+               list_add_tail(&sdesc->node, &schan->prepared);
+       } else {
+               pr_err("sirfsoc DMA Invalid xfer\n");
+               ret = -EINVAL;
+               goto err_xfer;
+       }
+       spin_unlock_irqrestore(&schan->lock, iflags);
+
+       return &sdesc->desc;
+err_xfer:
+       spin_unlock_irqrestore(&schan->lock, iflags);
+no_desc:
+err_dir:
+       return ERR_PTR(ret);
+}
+
+static struct dma_async_tx_descriptor *
+sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
+       size_t buf_len, size_t period_len,
+       enum dma_transfer_direction direction)
+{
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+       struct sirfsoc_dma_desc *sdesc = NULL;
+       unsigned long iflags;
+
+       /*
+        * we only support cycle transfer with 2 period
+        * If the X-length is set to 0, it would be the loop mode.
+        * The DMA address keeps increasing until reaching the end of a loop
+        * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
+        * the DMA address goes back to the beginning of this area.
+        * In loop mode, the DMA data region is divided into two parts, BUFA
+        * and BUFB. DMA controller generates interrupts twice in each loop:
+        * when the DMA address reaches the end of BUFA or the end of the
+        * BUFB
+        */
+       if (buf_len !=  2 * period_len)
+               return ERR_PTR(-EINVAL);
+
+       /* Get free descriptor */
+       spin_lock_irqsave(&schan->lock, iflags);
+       if (!list_empty(&schan->free)) {
+               sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+                       node);
+               list_del(&sdesc->node);
+       }
+       spin_unlock_irqrestore(&schan->lock, iflags);
+
+       if (!sdesc)
+               return 0;
+
+       /* Place descriptor in prepared list */
+       spin_lock_irqsave(&schan->lock, iflags);
+       sdesc->addr = addr;
+       sdesc->cyclic = 1;
+       sdesc->xlen = 0;
+       sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
+       sdesc->width = 1;
+       list_add_tail(&sdesc->node, &schan->prepared);
+       spin_unlock_irqrestore(&schan->lock, iflags);
+
+       return &sdesc->desc;
+}
+
+/*
+ * The DMA controller consists of 16 independent DMA channels.
+ * Each channel is allocated to a different function
+ */
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
+{
+       unsigned int ch_nr = (unsigned int) chan_id;
+
+       if (ch_nr == chan->chan_id +
+               chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL(sirfsoc_dma_filter_id);
+
+static int __devinit sirfsoc_dma_probe(struct platform_device *op)
+{
+       struct device_node *dn = op->dev.of_node;
+       struct device *dev = &op->dev;
+       struct dma_device *dma;
+       struct sirfsoc_dma *sdma;
+       struct sirfsoc_dma_chan *schan;
+       struct resource res;
+       ulong regs_start, regs_size;
+       u32 id;
+       int ret, i;
+
+       sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
+       if (!sdma) {
+               dev_err(dev, "Memory exhausted!\n");
+               return -ENOMEM;
+       }
+
+       if (of_property_read_u32(dn, "cell-index", &id)) {
+               dev_err(dev, "Fail to get DMAC index\n");
+               ret = -ENODEV;
+               goto free_mem;
+       }
+
+       sdma->irq = irq_of_parse_and_map(dn, 0);
+       if (sdma->irq == NO_IRQ) {
+               dev_err(dev, "Error mapping IRQ!\n");
+               ret = -EINVAL;
+               goto free_mem;
+       }
+
+       ret = of_address_to_resource(dn, 0, &res);
+       if (ret) {
+               dev_err(dev, "Error parsing memory region!\n");
+               goto free_mem;
+       }
+
+       regs_start = res.start;
+       regs_size = resource_size(&res);
+
+       sdma->base = devm_ioremap(dev, regs_start, regs_size);
+       if (!sdma->base) {
+               dev_err(dev, "Error mapping memory region!\n");
+               ret = -ENOMEM;
+               goto irq_dispose;
+       }
+
+       ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
+               sdma);
+       if (ret) {
+               dev_err(dev, "Error requesting IRQ!\n");
+               ret = -EINVAL;
+               goto unmap_mem;
+       }
+
+       dma = &sdma->dma;
+       dma->dev = dev;
+       dma->chancnt = SIRFSOC_DMA_CHANNELS;
+
+       dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
+       dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
+       dma->device_issue_pending = sirfsoc_dma_issue_pending;
+       dma->device_control = sirfsoc_dma_control;
+       dma->device_tx_status = sirfsoc_dma_tx_status;
+       dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
+       dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
+
+       INIT_LIST_HEAD(&dma->channels);
+       dma_cap_set(DMA_SLAVE, dma->cap_mask);
+       dma_cap_set(DMA_CYCLIC, dma->cap_mask);
+       dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
+       dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+
+       for (i = 0; i < dma->chancnt; i++) {
+               schan = &sdma->channels[i];
+
+               schan->chan.device = dma;
+               schan->chan.cookie = 1;
+               schan->completed_cookie = schan->chan.cookie;
+
+               INIT_LIST_HEAD(&schan->free);
+               INIT_LIST_HEAD(&schan->prepared);
+               INIT_LIST_HEAD(&schan->queued);
+               INIT_LIST_HEAD(&schan->active);
+               INIT_LIST_HEAD(&schan->completed);
+
+               spin_lock_init(&schan->lock);
+               list_add_tail(&schan->chan.device_node, &dma->channels);
+       }
+
+       tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
+
+       /* Register DMA engine */
+       dev_set_drvdata(dev, sdma);
+       ret = dma_async_device_register(dma);
+       if (ret)
+               goto free_irq;
+
+       dev_info(dev, "initialized SIRFSOC DMAC driver\n");
+
+       return 0;
+
+free_irq:
+       devm_free_irq(dev, sdma->irq, sdma);
+irq_dispose:
+       irq_dispose_mapping(sdma->irq);
+unmap_mem:
+       iounmap(sdma->base);
+free_mem:
+       devm_kfree(dev, sdma);
+       return ret;
+}
+
+static int __devexit sirfsoc_dma_remove(struct platform_device *op)
+{
+       struct device *dev = &op->dev;
+       struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+       dma_async_device_unregister(&sdma->dma);
+       devm_free_irq(dev, sdma->irq, sdma);
+       irq_dispose_mapping(sdma->irq);
+       iounmap(sdma->base);
+       devm_kfree(dev, sdma);
+       return 0;
+}
+
+static struct of_device_id sirfsoc_dma_match[] = {
+       { .compatible = "sirf,prima2-dmac", },
+       {},
+};
+
+static struct platform_driver sirfsoc_dma_driver = {
+       .probe          = sirfsoc_dma_probe,
+       .remove         = __devexit_p(sirfsoc_dma_remove),
+       .driver = {
+               .name = DRV_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = sirfsoc_dma_match,
+       },
+};
+
+module_platform_driver(sirfsoc_dma_driver);
+
+MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
+       "Barry Song <baohua.song@csr.com>");
+MODULE_DESCRIPTION("SIRFSOC DMA control driver");
+MODULE_LICENSE("GPL v2");
index 13259cad0ceb61df6a593af758ebf1aec4ec877b..cc5ecbc067a3d8a8d88c97190b0248fb0851593b 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
 #include <linux/err.h>
 #include <linux/amba/bus.h>
 
@@ -32,6 +34,9 @@
 /* Maximum iterations taken before giving up suspending a channel */
 #define D40_SUSPEND_MAX_IT 500
 
+/* Milliseconds */
+#define DMA40_AUTOSUSPEND_DELAY        100
+
 /* Hardware requirement on LCLA alignment */
 #define LCLA_ALIGNMENT 0x40000
 
@@ -62,6 +67,55 @@ enum d40_command {
        D40_DMA_SUSPENDED       = 3
 };
 
+/*
+ * These are the registers that has to be saved and later restored
+ * when the DMA hw is powered off.
+ * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
+ */
+static u32 d40_backup_regs[] = {
+       D40_DREG_LCPA,
+       D40_DREG_LCLA,
+       D40_DREG_PRMSE,
+       D40_DREG_PRMSO,
+       D40_DREG_PRMOE,
+       D40_DREG_PRMOO,
+};
+
+#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
+
+/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
+static u32 d40_backup_regs_v3[] = {
+       D40_DREG_PSEG1,
+       D40_DREG_PSEG2,
+       D40_DREG_PSEG3,
+       D40_DREG_PSEG4,
+       D40_DREG_PCEG1,
+       D40_DREG_PCEG2,
+       D40_DREG_PCEG3,
+       D40_DREG_PCEG4,
+       D40_DREG_RSEG1,
+       D40_DREG_RSEG2,
+       D40_DREG_RSEG3,
+       D40_DREG_RSEG4,
+       D40_DREG_RCEG1,
+       D40_DREG_RCEG2,
+       D40_DREG_RCEG3,
+       D40_DREG_RCEG4,
+};
+
+#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
+
+static u32 d40_backup_regs_chan[] = {
+       D40_CHAN_REG_SSCFG,
+       D40_CHAN_REG_SSELT,
+       D40_CHAN_REG_SSPTR,
+       D40_CHAN_REG_SSLNK,
+       D40_CHAN_REG_SDCFG,
+       D40_CHAN_REG_SDELT,
+       D40_CHAN_REG_SDPTR,
+       D40_CHAN_REG_SDLNK,
+};
+
 /**
  * struct d40_lli_pool - Structure for keeping LLIs in memory
  *
@@ -96,7 +150,7 @@ struct d40_lli_pool {
  * during a transfer.
  * @node: List entry.
  * @is_in_client_list: true if the client owns this descriptor.
- * the previous one.
+ * @cyclic: true if this is a cyclic job
  *
  * This descriptor is used for both logical and physical transfers.
  */
@@ -143,6 +197,7 @@ struct d40_lcla_pool {
  * channels.
  *
  * @lock: A lock protection this entity.
+ * @reserved: True if used by secure world or otherwise.
  * @num: The physical channel number of this entity.
  * @allocated_src: Bit mapped to show which src event line's are mapped to
  * this physical channel. Can also be free or physically allocated.
@@ -152,6 +207,7 @@ struct d40_lcla_pool {
  */
 struct d40_phy_res {
        spinlock_t lock;
+       bool       reserved;
        int        num;
        u32        allocated_src;
        u32        allocated_dst;
@@ -185,7 +241,6 @@ struct d40_base;
  * @src_def_cfg: Default cfg register setting for src.
  * @dst_def_cfg: Default cfg register setting for dst.
  * @log_def: Default logical channel settings.
- * @lcla: Space for one dst src pair for logical channel transfers.
  * @lcpa: Pointer to dst and src lcpa settings.
  * @runtime_addr: runtime configured address.
  * @runtime_direction: runtime configured direction.
@@ -217,7 +272,7 @@ struct d40_chan {
        struct d40_log_lli_full         *lcpa;
        /* Runtime reconfiguration */
        dma_addr_t                      runtime_addr;
-       enum dma_data_direction         runtime_direction;
+       enum dma_transfer_direction     runtime_direction;
 };
 
 /**
@@ -241,6 +296,7 @@ struct d40_chan {
  * @dma_both: dma_device channels that can do both memcpy and slave transfers.
  * @dma_slave: dma_device channels that can do only do slave transfers.
  * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
+ * @phy_chans: Room for all possible physical channels in system.
  * @log_chans: Room for all possible logical channels in system.
  * @lookup_log_chans: Used to map interrupt number to logical channel. Points
  * to log_chans entries.
@@ -248,12 +304,20 @@ struct d40_chan {
  * to phy_chans entries.
  * @plat_data: Pointer to provided platform_data which is the driver
  * configuration.
+ * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
  * @phy_res: Vector containing all physical channels.
  * @lcla_pool: lcla pool settings and data.
  * @lcpa_base: The virtual mapped address of LCPA.
  * @phy_lcpa: The physical address of the LCPA.
  * @lcpa_size: The size of the LCPA area.
  * @desc_slab: cache for descriptors.
+ * @reg_val_backup: Here the values of some hardware registers are stored
+ * before the DMA is powered off. They are restored when the power is back on.
+ * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
+ * later.
+ * @reg_val_backup_chan: Backup data for standard channel parameter registers.
+ * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
+ * @initialized: true if the dma has been initialized
  */
 struct d40_base {
        spinlock_t                       interrupt_lock;
@@ -275,6 +339,7 @@ struct d40_base {
        struct d40_chan                 **lookup_log_chans;
        struct d40_chan                 **lookup_phy_chans;
        struct stedma40_platform_data    *plat_data;
+       struct regulator                 *lcpa_regulator;
        /* Physical half channels */
        struct d40_phy_res               *phy_res;
        struct d40_lcla_pool              lcla_pool;
@@ -282,6 +347,11 @@ struct d40_base {
        dma_addr_t                        phy_lcpa;
        resource_size_t                   lcpa_size;
        struct kmem_cache                *desc_slab;
+       u32                               reg_val_backup[BACKUP_REGS_SZ];
+       u32                               reg_val_backup_v3[BACKUP_REGS_SZ_V3];
+       u32                              *reg_val_backup_chan;
+       u16                               gcc_pwr_off_mask;
+       bool                              initialized;
 };
 
 /**
@@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
                struct d40_desc *d;
                struct d40_desc *_d;
 
-               list_for_each_entry_safe(d, _d, &d40c->client, node)
+               list_for_each_entry_safe(d, _d, &d40c->client, node) {
                        if (async_tx_test_ack(&d->txd)) {
                                d40_desc_remove(d);
                                desc = d;
                                memset(desc, 0, sizeof(*desc));
                                break;
                        }
+               }
        }
 
        if (!desc)
@@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
        bool cyclic = desc->cyclic;
        int curr_lcla = -EINVAL;
        int first_lcla = 0;
+       bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
        bool linkback;
 
        /*
@@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
                                       &lli->src[lli_current],
                                       next_lcla, flags);
 
-               dma_sync_single_range_for_device(chan->base->dev,
-                                       pool->dma_addr, lcla_offset,
-                                       2 * sizeof(struct d40_log_lli),
-                                       DMA_TO_DEVICE);
-
+               /*
+                * Cache maintenance is not needed if lcla is
+                * mapped in esram
+                */
+               if (!use_esram_lcla) {
+                       dma_sync_single_range_for_device(chan->base->dev,
+                                               pool->dma_addr, lcla_offset,
+                                               2 * sizeof(struct d40_log_lli),
+                                               DMA_TO_DEVICE);
+               }
                curr_lcla = next_lcla;
 
                if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
@@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
        return len;
 }
 
-/* Support functions for logical channels */
+
+#ifdef CONFIG_PM
+static void dma40_backup(void __iomem *baseaddr, u32 *backup,
+                        u32 *regaddr, int num, bool save)
+{
+       int i;
+
+       for (i = 0; i < num; i++) {
+               void __iomem *addr = baseaddr + regaddr[i];
+
+               if (save)
+                       backup[i] = readl_relaxed(addr);
+               else
+                       writel_relaxed(backup[i], addr);
+       }
+}
+
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+       int i;
+
+       /* Save/Restore channel specific registers */
+       for (i = 0; i < base->num_phy_chans; i++) {
+               void __iomem *addr;
+               int idx;
+
+               if (base->phy_res[i].reserved)
+                       continue;
+
+               addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
+               idx = i * ARRAY_SIZE(d40_backup_regs_chan);
+
+               dma40_backup(addr, &base->reg_val_backup_chan[idx],
+                            d40_backup_regs_chan,
+                            ARRAY_SIZE(d40_backup_regs_chan),
+                            save);
+       }
+
+       /* Save/Restore global registers */
+       dma40_backup(base->virtbase, base->reg_val_backup,
+                    d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
+                    save);
+
+       /* Save/Restore registers only existing on dma40 v3 and later */
+       if (base->rev >= 3)
+               dma40_backup(base->virtbase, base->reg_val_backup_v3,
+                            d40_backup_regs_v3,
+                            ARRAY_SIZE(d40_backup_regs_v3),
+                            save);
+}
+#else
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+}
+#endif
 
 static int d40_channel_execute_command(struct d40_chan *d40c,
                                       enum d40_command command)
@@ -973,6 +1104,10 @@ static void d40_config_write(struct d40_chan *d40c)
                /* Set LIDX for lcla */
                writel(lidx, chanbase + D40_CHAN_REG_SSELT);
                writel(lidx, chanbase + D40_CHAN_REG_SDELT);
+
+               /* Clear LNK which will be used by d40_chan_has_events() */
+               writel(0, chanbase + D40_CHAN_REG_SSLNK);
+               writel(0, chanbase + D40_CHAN_REG_SDLNK);
        }
 }
 
@@ -1013,6 +1148,7 @@ static int d40_pause(struct d40_chan *d40c)
        if (!d40c->busy)
                return 0;
 
+       pm_runtime_get_sync(d40c->base->dev);
        spin_lock_irqsave(&d40c->lock, flags);
 
        res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
@@ -1025,7 +1161,8 @@ static int d40_pause(struct d40_chan *d40c)
                                                                  D40_DMA_RUN);
                }
        }
-
+       pm_runtime_mark_last_busy(d40c->base->dev);
+       pm_runtime_put_autosuspend(d40c->base->dev);
        spin_unlock_irqrestore(&d40c->lock, flags);
        return res;
 }
@@ -1039,7 +1176,7 @@ static int d40_resume(struct d40_chan *d40c)
                return 0;
 
        spin_lock_irqsave(&d40c->lock, flags);
-
+       pm_runtime_get_sync(d40c->base->dev);
        if (d40c->base->rev == 0)
                if (chan_is_logical(d40c)) {
                        res = d40_channel_execute_command(d40c,
@@ -1057,6 +1194,8 @@ static int d40_resume(struct d40_chan *d40c)
        }
 
 no_suspend:
+       pm_runtime_mark_last_busy(d40c->base->dev);
+       pm_runtime_put_autosuspend(d40c->base->dev);
        spin_unlock_irqrestore(&d40c->lock, flags);
        return res;
 }
@@ -1129,7 +1268,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
        d40d = d40_first_queued(d40c);
 
        if (d40d != NULL) {
-               d40c->busy = true;
+               if (!d40c->busy)
+                       d40c->busy = true;
+
+               pm_runtime_get_sync(d40c->base->dev);
 
                /* Remove from queue */
                d40_desc_remove(d40d);
@@ -1190,6 +1332,8 @@ static void dma_tc_handle(struct d40_chan *d40c)
 
                if (d40_queue_start(d40c) == NULL)
                        d40c->busy = false;
+               pm_runtime_mark_last_busy(d40c->base->dev);
+               pm_runtime_put_autosuspend(d40c->base->dev);
        }
 
        d40c->pending_tx++;
@@ -1405,11 +1549,16 @@ static int d40_validate_conf(struct d40_chan *d40c,
        return res;
 }
 
-static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
-                              int log_event_line, bool is_log)
+static bool d40_alloc_mask_set(struct d40_phy_res *phy,
+                              bool is_src, int log_event_line, bool is_log,
+                              bool *first_user)
 {
        unsigned long flags;
        spin_lock_irqsave(&phy->lock, flags);
+
+       *first_user = ((phy->allocated_src | phy->allocated_dst)
+                       == D40_ALLOC_FREE);
+
        if (!is_log) {
                /* Physical interrupts are masked per physical full channel */
                if (phy->allocated_src == D40_ALLOC_FREE &&
@@ -1490,7 +1639,7 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
        return is_free;
 }
 
-static int d40_allocate_channel(struct d40_chan *d40c)
+static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
 {
        int dev_type;
        int event_group;
@@ -1526,7 +1675,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
                        for (i = 0; i < d40c->base->num_phy_chans; i++) {
 
                                if (d40_alloc_mask_set(&phys[i], is_src,
-                                                      0, is_log))
+                                                      0, is_log,
+                                                      first_phy_user))
                                        goto found_phy;
                        }
                } else
@@ -1536,7 +1686,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
                                        if (d40_alloc_mask_set(&phys[i],
                                                               is_src,
                                                               0,
-                                                              is_log))
+                                                              is_log,
+                                                              first_phy_user))
                                                goto found_phy;
                                }
                        }
@@ -1552,6 +1703,25 @@ static int d40_allocate_channel(struct d40_chan *d40c)
        /* Find logical channel */
        for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
                int phy_num = j + event_group * 2;
+
+               if (d40c->dma_cfg.use_fixed_channel) {
+                       i = d40c->dma_cfg.phy_channel;
+
+                       if ((i != phy_num) && (i != phy_num + 1)) {
+                               dev_err(chan2dev(d40c),
+                                       "invalid fixed phy channel %d\n", i);
+                               return -EINVAL;
+                       }
+
+                       if (d40_alloc_mask_set(&phys[i], is_src, event_line,
+                                              is_log, first_phy_user))
+                               goto found_log;
+
+                       dev_err(chan2dev(d40c),
+                               "could not allocate fixed phy channel %d\n", i);
+                       return -EINVAL;
+               }
+
                /*
                 * Spread logical channels across all available physical rather
                 * than pack every logical channel at the first available phy
@@ -1560,13 +1730,15 @@ static int d40_allocate_channel(struct d40_chan *d40c)
                if (is_src) {
                        for (i = phy_num; i < phy_num + 2; i++) {
                                if (d40_alloc_mask_set(&phys[i], is_src,
-                                                      event_line, is_log))
+                                                      event_line, is_log,
+                                                      first_phy_user))
                                        goto found_log;
                        }
                } else {
                        for (i = phy_num + 1; i >= phy_num; i--) {
                                if (d40_alloc_mask_set(&phys[i], is_src,
-                                                      event_line, is_log))
+                                                      event_line, is_log,
+                                                      first_phy_user))
                                        goto found_log;
                        }
                }
@@ -1643,10 +1815,11 @@ static int d40_free_dma(struct d40_chan *d40c)
                return -EINVAL;
        }
 
+       pm_runtime_get_sync(d40c->base->dev);
        res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
        if (res) {
                chan_err(d40c, "suspend failed\n");
-               return res;
+               goto out;
        }
 
        if (chan_is_logical(d40c)) {
@@ -1664,13 +1837,11 @@ static int d40_free_dma(struct d40_chan *d40c)
                        if (d40_chan_has_events(d40c)) {
                                res = d40_channel_execute_command(d40c,
                                                                  D40_DMA_RUN);
-                               if (res) {
+                               if (res)
                                        chan_err(d40c,
                                                "Executing RUN command\n");
-                                       return res;
-                               }
                        }
-                       return 0;
+                       goto out;
                }
        } else {
                (void) d40_alloc_mask_free(phy, is_src, 0);
@@ -1680,13 +1851,23 @@ static int d40_free_dma(struct d40_chan *d40c)
        res = d40_channel_execute_command(d40c, D40_DMA_STOP);
        if (res) {
                chan_err(d40c, "Failed to stop channel\n");
-               return res;
+               goto out;
        }
+
+       if (d40c->busy) {
+               pm_runtime_mark_last_busy(d40c->base->dev);
+               pm_runtime_put_autosuspend(d40c->base->dev);
+       }
+
+       d40c->busy = false;
        d40c->phy_chan = NULL;
        d40c->configured = false;
        d40c->base->lookup_phy_chans[phy->num] = NULL;
+out:
 
-       return 0;
+       pm_runtime_mark_last_busy(d40c->base->dev);
+       pm_runtime_put_autosuspend(d40c->base->dev);
+       return res;
 }
 
 static bool d40_is_paused(struct d40_chan *d40c)
@@ -1855,7 +2036,7 @@ d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
 }
 
 static dma_addr_t
-d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
+d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
 {
        struct stedma40_platform_data *plat = chan->base->plat_data;
        struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
@@ -1864,9 +2045,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
        if (chan->runtime_addr)
                return chan->runtime_addr;
 
-       if (direction == DMA_FROM_DEVICE)
+       if (direction == DMA_DEV_TO_MEM)
                addr = plat->dev_rx[cfg->src_dev_type];
-       else if (direction == DMA_TO_DEVICE)
+       else if (direction == DMA_MEM_TO_DEV)
                addr = plat->dev_tx[cfg->dst_dev_type];
 
        return addr;
@@ -1875,7 +2056,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
 static struct dma_async_tx_descriptor *
 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
            struct scatterlist *sg_dst, unsigned int sg_len,
-           enum dma_data_direction direction, unsigned long dma_flags)
+           enum dma_transfer_direction direction, unsigned long dma_flags)
 {
        struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
        dma_addr_t src_dev_addr = 0;
@@ -1902,9 +2083,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
        if (direction != DMA_NONE) {
                dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
 
-               if (direction == DMA_FROM_DEVICE)
+               if (direction == DMA_DEV_TO_MEM)
                        src_dev_addr = dev_addr;
-               else if (direction == DMA_TO_DEVICE)
+               else if (direction == DMA_MEM_TO_DEV)
                        dst_dev_addr = dev_addr;
        }
 
@@ -2011,14 +2192,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
                        goto fail;
                }
        }
-       is_free_phy = (d40c->phy_chan == NULL);
 
-       err = d40_allocate_channel(d40c);
+       err = d40_allocate_channel(d40c, &is_free_phy);
        if (err) {
                chan_err(d40c, "Failed to allocate channel\n");
+               d40c->configured = false;
                goto fail;
        }
 
+       pm_runtime_get_sync(d40c->base->dev);
        /* Fill in basic CFG register values */
        d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
                    &d40c->dst_def_cfg, chan_is_logical(d40c));
@@ -2038,6 +2220,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
                          D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
        }
 
+       dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
+                chan_is_logical(d40c) ? "logical" : "physical",
+                d40c->phy_chan->num,
+                d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
+
+
        /*
         * Only write channel configuration to the DMA if the physical
         * resource is free. In case of multiple logical channels
@@ -2046,6 +2234,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
        if (is_free_phy)
                d40_config_write(d40c);
 fail:
+       pm_runtime_mark_last_busy(d40c->base->dev);
+       pm_runtime_put_autosuspend(d40c->base->dev);
        spin_unlock_irqrestore(&d40c->lock, flags);
        return err;
 }
@@ -2108,10 +2298,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
                                                         struct scatterlist *sgl,
                                                         unsigned int sg_len,
-                                                        enum dma_data_direction direction,
+                                                        enum dma_transfer_direction direction,
                                                         unsigned long dma_flags)
 {
-       if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
+       if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
                return NULL;
 
        return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
@@ -2120,7 +2310,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
 static struct dma_async_tx_descriptor *
 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
                     size_t buf_len, size_t period_len,
-                    enum dma_data_direction direction)
+                    enum dma_transfer_direction direction)
 {
        unsigned int periods = buf_len / period_len;
        struct dma_async_tx_descriptor *txd;
@@ -2269,7 +2459,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
        dst_addr_width = config->dst_addr_width;
        dst_maxburst = config->dst_maxburst;
 
-       if (config->direction == DMA_FROM_DEVICE) {
+       if (config->direction == DMA_DEV_TO_MEM) {
                dma_addr_t dev_addr_rx =
                        d40c->base->plat_data->dev_rx[cfg->src_dev_type];
 
@@ -2292,7 +2482,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
                if (dst_maxburst == 0)
                        dst_maxburst = src_maxburst;
 
-       } else if (config->direction == DMA_TO_DEVICE) {
+       } else if (config->direction == DMA_MEM_TO_DEV) {
                dma_addr_t dev_addr_tx =
                        d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
 
@@ -2357,7 +2547,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
                "configured channel %s for %s, data width %d/%d, "
                "maxburst %d/%d elements, LE, no flow control\n",
                dma_chan_name(chan),
-               (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+               (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
                src_addr_width, dst_addr_width,
                src_maxburst, dst_maxburst);
 
@@ -2519,6 +2709,72 @@ static int __init d40_dmaengine_init(struct d40_base *base,
        return err;
 }
 
+/* Suspend resume functionality */
+#ifdef CONFIG_PM
+static int dma40_pm_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct d40_base *base = platform_get_drvdata(pdev);
+       int ret = 0;
+       if (!pm_runtime_suspended(dev))
+               return -EBUSY;
+
+       if (base->lcpa_regulator)
+               ret = regulator_disable(base->lcpa_regulator);
+       return ret;
+}
+
+static int dma40_runtime_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct d40_base *base = platform_get_drvdata(pdev);
+
+       d40_save_restore_registers(base, true);
+
+       /* Don't disable/enable clocks for v1 due to HW bugs */
+       if (base->rev != 1)
+               writel_relaxed(base->gcc_pwr_off_mask,
+                              base->virtbase + D40_DREG_GCC);
+
+       return 0;
+}
+
+static int dma40_runtime_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct d40_base *base = platform_get_drvdata(pdev);
+
+       if (base->initialized)
+               d40_save_restore_registers(base, false);
+
+       writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
+                      base->virtbase + D40_DREG_GCC);
+       return 0;
+}
+
+static int dma40_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct d40_base *base = platform_get_drvdata(pdev);
+       int ret = 0;
+
+       if (base->lcpa_regulator)
+               ret = regulator_enable(base->lcpa_regulator);
+
+       return ret;
+}
+
+static const struct dev_pm_ops dma40_pm_ops = {
+       .suspend                = dma40_pm_suspend,
+       .runtime_suspend        = dma40_runtime_suspend,
+       .runtime_resume         = dma40_runtime_resume,
+       .resume                 = dma40_resume,
+};
+#define DMA40_PM_OPS   (&dma40_pm_ops)
+#else
+#define DMA40_PM_OPS   NULL
+#endif
+
 /* Initialization functions. */
 
 static int __init d40_phy_res_init(struct d40_base *base)
@@ -2527,6 +2783,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
        int num_phy_chans_avail = 0;
        u32 val[2];
        int odd_even_bit = -2;
+       int gcc = D40_DREG_GCC_ENA;
 
        val[0] = readl(base->virtbase + D40_DREG_PRSME);
        val[1] = readl(base->virtbase + D40_DREG_PRSMO);
@@ -2538,9 +2795,17 @@ static int __init d40_phy_res_init(struct d40_base *base)
                        /* Mark security only channels as occupied */
                        base->phy_res[i].allocated_src = D40_ALLOC_PHY;
                        base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
+                       base->phy_res[i].reserved = true;
+                       gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+                                                      D40_DREG_GCC_SRC);
+                       gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+                                                      D40_DREG_GCC_DST);
+
+
                } else {
                        base->phy_res[i].allocated_src = D40_ALLOC_FREE;
                        base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
+                       base->phy_res[i].reserved = false;
                        num_phy_chans_avail++;
                }
                spin_lock_init(&base->phy_res[i].lock);
@@ -2552,6 +2817,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
 
                base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
                base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
+               base->phy_res[chan].reserved = true;
+               gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+                                              D40_DREG_GCC_SRC);
+               gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+                                              D40_DREG_GCC_DST);
                num_phy_chans_avail--;
        }
 
@@ -2572,6 +2842,15 @@ static int __init d40_phy_res_init(struct d40_base *base)
                val[0] = val[0] >> 2;
        }
 
+       /*
+        * To keep things simple, Enable all clocks initially.
+        * The clocks will get managed later post channel allocation.
+        * The clocks for the event lines on which reserved channels exists
+        * are not managed here.
+        */
+       writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
+       base->gcc_pwr_off_mask = gcc;
+
        return num_phy_chans_avail;
 }
 
@@ -2699,10 +2978,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
                        goto failure;
        }
 
-       base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
-                                           sizeof(struct d40_desc *) *
-                                           D40_LCLA_LINK_PER_EVENT_GRP,
+       base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
+                                           sizeof(d40_backup_regs_chan),
                                            GFP_KERNEL);
+       if (!base->reg_val_backup_chan)
+               goto failure;
+
+       base->lcla_pool.alloc_map =
+               kzalloc(num_phy_chans * sizeof(struct d40_desc *)
+                       * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
        if (!base->lcla_pool.alloc_map)
                goto failure;
 
@@ -2741,9 +3025,9 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
 static void __init d40_hw_init(struct d40_base *base)
 {
 
-       static const struct d40_reg_val dma_init_reg[] = {
+       static struct d40_reg_val dma_init_reg[] = {
                /* Clock every part of the DMA block from start */
-               { .reg = D40_DREG_GCC,    .val = 0x0000ff01},
+               { .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
 
                /* Interrupts on all logical channels */
                { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
@@ -2943,11 +3227,31 @@ static int __init d40_probe(struct platform_device *pdev)
                d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
                goto failure;
        }
+       /* If lcla has to be located in ESRAM we don't need to allocate */
+       if (base->plat_data->use_esram_lcla) {
+               res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                                       "lcla_esram");
+               if (!res) {
+                       ret = -ENOENT;
+                       d40_err(&pdev->dev,
+                               "No \"lcla_esram\" memory resource\n");
+                       goto failure;
+               }
+               base->lcla_pool.base = ioremap(res->start,
+                                               resource_size(res));
+               if (!base->lcla_pool.base) {
+                       ret = -ENOMEM;
+                       d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
+                       goto failure;
+               }
+               writel(res->start, base->virtbase + D40_DREG_LCLA);
 
-       ret = d40_lcla_allocate(base);
-       if (ret) {
-               d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
-               goto failure;
+       } else {
+               ret = d40_lcla_allocate(base);
+               if (ret) {
+                       d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
+                       goto failure;
+               }
        }
 
        spin_lock_init(&base->lcla_pool.lock);
@@ -2960,6 +3264,32 @@ static int __init d40_probe(struct platform_device *pdev)
                goto failure;
        }
 
+       pm_runtime_irq_safe(base->dev);
+       pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
+       pm_runtime_use_autosuspend(base->dev);
+       pm_runtime_enable(base->dev);
+       pm_runtime_resume(base->dev);
+
+       if (base->plat_data->use_esram_lcla) {
+
+               base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
+               if (IS_ERR(base->lcpa_regulator)) {
+                       d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
+                       base->lcpa_regulator = NULL;
+                       goto failure;
+               }
+
+               ret = regulator_enable(base->lcpa_regulator);
+               if (ret) {
+                       d40_err(&pdev->dev,
+                               "Failed to enable lcpa_regulator\n");
+                       regulator_put(base->lcpa_regulator);
+                       base->lcpa_regulator = NULL;
+                       goto failure;
+               }
+       }
+
+       base->initialized = true;
        err = d40_dmaengine_init(base, num_reserved_chans);
        if (err)
                goto failure;
@@ -2976,6 +3306,11 @@ static int __init d40_probe(struct platform_device *pdev)
                if (base->virtbase)
                        iounmap(base->virtbase);
 
+               if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
+                       iounmap(base->lcla_pool.base);
+                       base->lcla_pool.base = NULL;
+               }
+
                if (base->lcla_pool.dma_addr)
                        dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
                                         SZ_1K * base->num_phy_chans,
@@ -2998,6 +3333,11 @@ static int __init d40_probe(struct platform_device *pdev)
                        clk_put(base->clk);
                }
 
+               if (base->lcpa_regulator) {
+                       regulator_disable(base->lcpa_regulator);
+                       regulator_put(base->lcpa_regulator);
+               }
+
                kfree(base->lcla_pool.alloc_map);
                kfree(base->lookup_log_chans);
                kfree(base->lookup_phy_chans);
@@ -3013,6 +3353,7 @@ static struct platform_driver d40_driver = {
        .driver = {
                .owner = THIS_MODULE,
                .name  = D40_NAME,
+               .pm = DMA40_PM_OPS,
        },
 };
 
index b44c455158de3461f31ee5a0b0594cb136b2ad6a..8d3d490968a3a8240b6f91e1631609a2425c3f4f 100644 (file)
@@ -16,6 +16,8 @@
 
 #define D40_TYPE_TO_GROUP(type) (type / 16)
 #define D40_TYPE_TO_EVENT(type) (type % 16)
+#define D40_GROUP_SIZE 8
+#define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2)
 
 /* Most bits of the CFG register are the same in log as in phy mode */
 #define D40_SREG_CFG_MST_POS           15
 
 /* DMA Register Offsets */
 #define D40_DREG_GCC           0x000
+#define D40_DREG_GCC_ENA       0x1
+/* This assumes that there are only 4 event groups */
+#define D40_DREG_GCC_ENABLE_ALL        0xff01
+#define D40_DREG_GCC_EVTGRP_POS 8
+#define D40_DREG_GCC_SRC 0
+#define D40_DREG_GCC_DST 1
+#define D40_DREG_GCC_EVTGRP_ENA(x, y) \
+       (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y))
+
 #define D40_DREG_PRTYP         0x004
 #define D40_DREG_PRSME         0x008
 #define D40_DREG_PRSMO         0x00C
index a4a398f2ef61eb5a362ca20c37ebe9a479e2121a..a6f9c1684a0fc1dc4a9c7a2dbd2407fe6252b68d 100644 (file)
@@ -90,7 +90,7 @@ struct timb_dma_chan {
        struct list_head        queue;
        struct list_head        free_list;
        unsigned int            bytes_per_line;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction     direction;
        unsigned int            descs; /* Descriptors to allocate */
        unsigned int            desc_elems; /* number of elems per descriptor */
 };
@@ -166,10 +166,10 @@ static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
 
        if (single)
                dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
-                       td_chan->direction);
+                       DMA_TO_DEVICE);
        else
                dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
-                       td_chan->direction);
+                       DMA_TO_DEVICE);
 }
 
 static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
@@ -235,7 +235,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
                "td_chan: %p, chan: %d, membase: %p\n",
                td_chan, td_chan->chan.chan_id, td_chan->membase);
 
-       if (td_chan->direction == DMA_FROM_DEVICE) {
+       if (td_chan->direction == DMA_DEV_TO_MEM) {
 
                /* descriptor address */
                iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
@@ -278,7 +278,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
                txd->cookie);
 
        /* make sure to stop the transfer */
-       if (td_chan->direction == DMA_FROM_DEVICE)
+       if (td_chan->direction == DMA_DEV_TO_MEM)
                iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
 /* Currently no support for stopping DMA transfers
        else
@@ -558,7 +558,7 @@ static void td_issue_pending(struct dma_chan *chan)
 
 static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
        struct scatterlist *sgl, unsigned int sg_len,
-       enum dma_data_direction direction, unsigned long flags)
+       enum dma_transfer_direction direction, unsigned long flags)
 {
        struct timb_dma_chan *td_chan =
                container_of(chan, struct timb_dma_chan, chan);
@@ -606,7 +606,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
        }
 
        dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
-               td_desc->desc_list_len, DMA_TO_DEVICE);
+               td_desc->desc_list_len, DMA_MEM_TO_DEV);
 
        return &td_desc->txd;
 }
@@ -775,8 +775,8 @@ static int __devinit td_probe(struct platform_device *pdev)
                td_chan->descs = pchan->descriptors;
                td_chan->desc_elems = pchan->descriptor_elements;
                td_chan->bytes_per_line = pchan->bytes_per_line;
-               td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
-                       DMA_TO_DEVICE;
+               td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
+                       DMA_MEM_TO_DEV;
 
                td_chan->membase = td->membase +
                        (i / 2) * TIMBDMA_INSTANCE_OFFSET +
@@ -841,17 +841,7 @@ static struct platform_driver td_driver = {
        .remove = __exit_p(td_remove),
 };
 
-static int __init td_init(void)
-{
-       return platform_driver_register(&td_driver);
-}
-module_init(td_init);
-
-static void __exit td_exit(void)
-{
-       platform_driver_unregister(&td_driver);
-}
-module_exit(td_exit);
+module_platform_driver(td_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Timberdale DMA controller driver");
index cbd83e362b5e0a78bcb25dec65f4fb29663710d9..6122c364cf11bb0050fb2b029c083b3c40233722 100644 (file)
@@ -845,7 +845,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 
 static struct dma_async_tx_descriptor *
 txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags)
 {
        struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
@@ -860,9 +860,9 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 
        BUG_ON(!ds || !ds->reg_width);
        if (ds->tx_reg)
-               BUG_ON(direction != DMA_TO_DEVICE);
+               BUG_ON(direction != DMA_MEM_TO_DEV);
        else
-               BUG_ON(direction != DMA_FROM_DEVICE);
+               BUG_ON(direction != DMA_DEV_TO_MEM);
        if (unlikely(!sg_len))
                return NULL;
 
@@ -882,7 +882,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                mem = sg_dma_address(sg);
 
                if (__is_dmac64(ddev)) {
-                       if (direction == DMA_TO_DEVICE) {
+                       if (direction == DMA_MEM_TO_DEV) {
                                desc->hwdesc.SAR = mem;
                                desc->hwdesc.DAR = ds->tx_reg;
                        } else {
@@ -891,7 +891,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        }
                        desc->hwdesc.CNTR = sg_dma_len(sg);
                } else {
-                       if (direction == DMA_TO_DEVICE) {
+                       if (direction == DMA_MEM_TO_DEV) {
                                desc->hwdesc32.SAR = mem;
                                desc->hwdesc32.DAR = ds->tx_reg;
                        } else {
@@ -900,7 +900,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        }
                        desc->hwdesc32.CNTR = sg_dma_len(sg);
                }
-               if (direction == DMA_TO_DEVICE) {
+               if (direction == DMA_MEM_TO_DEV) {
                        sai = ds->reg_width;
                        dai = 0;
                } else {
index 27555995f7e4c344f74afdfa14824ecbfac2b912..b5ee3ebfcfca03e2f9fd5973cd5d72b52d1c5f53 100644 (file)
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
+/* Registers (Write-only) */
+#define XREG_INIT         0x00
+#define XREG_RF_FREQ      0x02
+#define XREG_POWER_DOWN   0x08
+
+/* Registers (Read-only) */
+#define XREG_FREQ_ERROR   0x01
+#define XREG_LOCK         0x02
+#define XREG_VERSION      0x04
+#define XREG_PRODUCT_ID   0x08
+#define XREG_HSYNC_FREQ   0x10
+#define XREG_FRAME_LINES  0x20
+#define XREG_SNR          0x40
+
+#define XREG_ADC_ENV      0x0100
 
 static int debug;
 module_param(debug, int, 0644);
@@ -885,7 +900,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
        mutex_lock(&priv->lock);
 
        /* Sync Lock Indicator */
-       rc = xc2028_get_reg(priv, 0x0002, &frq_lock);
+       rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
        if (rc < 0)
                goto ret;
 
@@ -894,7 +909,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
                signal = 1 << 11;
 
        /* Get SNR of the video signal */
-       rc = xc2028_get_reg(priv, 0x0040, &signal);
+       rc = xc2028_get_reg(priv, XREG_SNR, &signal);
        if (rc < 0)
                goto ret;
 
@@ -1019,9 +1034,9 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
 
        /* CMD= Set frequency */
        if (priv->firm_version < 0x0202)
-               rc = send_seq(priv, {0x00, 0x02, 0x00, 0x00});
+               rc = send_seq(priv, {0x00, XREG_RF_FREQ, 0x00, 0x00});
        else
-               rc = send_seq(priv, {0x80, 0x02, 0x00, 0x00});
+               rc = send_seq(priv, {0x80, XREG_RF_FREQ, 0x00, 0x00});
        if (rc < 0)
                goto ret;
 
@@ -1201,9 +1216,9 @@ static int xc2028_sleep(struct dvb_frontend *fe)
        mutex_lock(&priv->lock);
 
        if (priv->firm_version < 0x0202)
-               rc = send_seq(priv, {0x00, 0x08, 0x00, 0x00});
+               rc = send_seq(priv, {0x00, XREG_POWER_DOWN, 0x00, 0x00});
        else
-               rc = send_seq(priv, {0x80, 0x08, 0x00, 0x00});
+               rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
 
        priv->cur_fw.type = 0;  /* need firmware reload */
 
index d218c1d68c33a622cdc791aa684e5645441965bf..68397110b7d932fec46c3cdd4ecc0b9d2e99ddf4 100644 (file)
@@ -154,6 +154,8 @@ struct xc4000_priv {
 #define XREG_SNR          0x06
 #define XREG_VERSION      0x07
 #define XREG_PRODUCT_ID   0x08
+#define XREG_SIGNAL_LEVEL 0x0A
+#define XREG_NOISE_LEVEL  0x0B
 
 /*
    Basic firmware description. This will remain with
@@ -486,6 +488,16 @@ static int xc_get_quality(struct xc4000_priv *priv, u16 *quality)
        return xc4000_readreg(priv, XREG_QUALITY, quality);
 }
 
+static int xc_get_signal_level(struct xc4000_priv *priv, u16 *signal)
+{
+       return xc4000_readreg(priv, XREG_SIGNAL_LEVEL, signal);
+}
+
+static int xc_get_noise_level(struct xc4000_priv *priv, u16 *noise)
+{
+       return xc4000_readreg(priv, XREG_NOISE_LEVEL, noise);
+}
+
 static u16 xc_wait_for_lock(struct xc4000_priv *priv)
 {
        u16     lock_state = 0;
@@ -1089,6 +1101,8 @@ static void xc_debug_dump(struct xc4000_priv *priv)
        u32     hsync_freq_hz = 0;
        u16     frame_lines;
        u16     quality;
+       u16     signal = 0;
+       u16     noise = 0;
        u8      hw_majorversion = 0, hw_minorversion = 0;
        u8      fw_majorversion = 0, fw_minorversion = 0;
 
@@ -1119,6 +1133,12 @@ static void xc_debug_dump(struct xc4000_priv *priv)
 
        xc_get_quality(priv, &quality);
        dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality);
+
+       xc_get_signal_level(priv, &signal);
+       dprintk(1, "*** Signal level = -%ddB (%d)\n", signal >> 8, signal);
+
+       xc_get_noise_level(priv, &noise);
+       dprintk(1, "*** Noise level = %ddB (%d)\n", noise >> 8, noise);
 }
 
 static int xc4000_set_params(struct dvb_frontend *fe)
@@ -1432,6 +1452,71 @@ static int xc4000_set_analog_params(struct dvb_frontend *fe,
        return ret;
 }
 
+static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength)
+{
+       struct xc4000_priv *priv = fe->tuner_priv;
+       u16 value = 0;
+       int rc;
+
+       mutex_lock(&priv->lock);
+       rc = xc4000_readreg(priv, XREG_SIGNAL_LEVEL, &value);
+       mutex_unlock(&priv->lock);
+
+       if (rc < 0)
+               goto ret;
+
+       /* Informations from real testing of DVB-T and radio part,
+          coeficient for one dB is 0xff.
+        */
+       tuner_dbg("Signal strength: -%ddB (%05d)\n", value >> 8, value);
+
+       /* all known digital modes */
+       if ((priv->video_standard == XC4000_DTV6) ||
+           (priv->video_standard == XC4000_DTV7) ||
+           (priv->video_standard == XC4000_DTV7_8) ||
+           (priv->video_standard == XC4000_DTV8))
+               goto digital;
+
+       /* Analog mode has NOISE LEVEL important, signal
+          depends only on gain of antenna and amplifiers,
+          but it doesn't tell anything about real quality
+          of reception.
+        */
+       mutex_lock(&priv->lock);
+       rc = xc4000_readreg(priv, XREG_NOISE_LEVEL, &value);
+       mutex_unlock(&priv->lock);
+
+       tuner_dbg("Noise level: %ddB (%05d)\n", value >> 8, value);
+
+       /* highest noise level: 32dB */
+       if (value >= 0x2000) {
+               value = 0;
+       } else {
+               value = ~value << 3;
+       }
+
+       goto ret;
+
+       /* Digital mode has SIGNAL LEVEL important and real
+          noise level is stored in demodulator registers.
+        */
+digital:
+       /* best signal: -50dB */
+       if (value <= 0x3200) {
+               value = 0xffff;
+       /* minimum: -114dB - should be 0x7200 but real zero is 0x713A */
+       } else if (value >= 0x713A) {
+               value = 0;
+       } else {
+               value = ~(value - 0x3200) << 2;
+       }
+
+ret:
+       *strength = value;
+
+       return rc;
+}
+
 static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
 {
        struct xc4000_priv *priv = fe->tuner_priv;
@@ -1559,6 +1644,7 @@ static const struct dvb_tuner_ops xc4000_tuner_ops = {
        .set_params        = xc4000_set_params,
        .set_analog_params = xc4000_set_analog_params,
        .get_frequency     = xc4000_get_frequency,
+       .get_rf_strength   = xc4000_get_signal,
        .get_bandwidth     = xc4000_get_bandwidth,
        .get_status        = xc4000_get_status
 };
index b15db4fe347b9218ec8b9e348802f424abb5d72b..fbbe545a74cb5357c3c1cdf445511c80f32dd724 100644 (file)
@@ -904,8 +904,11 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
 {
        struct dtv_frontend_properties *c = &fe->dtv_property_cache;
        int i;
+       u32 delsys;
 
+       delsys = c->delivery_system;
        memset(c, 0, sizeof(struct dtv_frontend_properties));
+       c->delivery_system = delsys;
 
        c->state = DTV_CLEAR;
 
@@ -1009,25 +1012,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
        _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0),
        _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0),
 
-       _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 0, 0),
-       _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 0, 0),
-       _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 0, 0),
-       _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 0, 0),
-       _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0),
-
        _DTV_CMD(DTV_ISDBS_TS_ID, 1, 0),
        _DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0),
 
@@ -1413,6 +1397,15 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system)
        struct dtv_frontend_properties *c = &fe->dtv_property_cache;
        enum dvbv3_emulation_type type;
 
+       /*
+        * It was reported that some old DVBv5 applications were
+        * filling delivery_system with SYS_UNDEFINED. If this happens,
+        * assume that the application wants to use the first supported
+        * delivery system.
+        */
+       if (c->delivery_system == SYS_UNDEFINED)
+               c->delivery_system = fe->ops.delsys[0];
+
        if (desired_system == SYS_UNDEFINED) {
                /*
                 * A DVBv3 call doesn't know what's the desired system.
@@ -1732,6 +1725,7 @@ static int dvb_frontend_ioctl_properties(struct file *file,
 {
        struct dvb_device *dvbdev = file->private_data;
        struct dvb_frontend *fe = dvbdev->priv;
+       struct dvb_frontend_private *fepriv = fe->frontend_priv;
        struct dtv_frontend_properties *c = &fe->dtv_property_cache;
        int err = 0;
 
@@ -1798,9 +1792,14 @@ static int dvb_frontend_ioctl_properties(struct file *file,
 
                /*
                 * Fills the cache out struct with the cache contents, plus
-                * the data retrieved from get_frontend.
+                * the data retrieved from get_frontend, if the frontend
+                * is not idle. Otherwise, returns the cached content
                 */
-               dtv_get_frontend(fe, NULL);
+               if (fepriv->state != FESTATE_IDLE) {
+                       err = dtv_get_frontend(fe, NULL);
+                       if (err < 0)
+                               goto out;
+               }
                for (i = 0; i < tvps->num; i++) {
                        err = dtv_property_process_get(fe, c, tvp + i, file);
                        if (err < 0)
index d66192974d68a0a2de44fd41f156f7b6c9c90a85..1455e2644ab5e4e08a0a5324a0d3e873b9941256 100644 (file)
@@ -877,24 +877,18 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
        case ANYSEE_HW_508T2C: /* 20 */
                /* E7 T2C */
 
+               if (state->fe_id)
+                       break;
+
                /* enable DVB-T/T2/C demod on IOE[5] */
                ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20);
                if (ret)
                        goto error;
 
-               if (state->fe_id == 0)  {
-                       /* DVB-T/T2 */
-                       adap->fe_adap[state->fe_id].fe =
-                               dvb_attach(cxd2820r_attach,
-                               &anysee_cxd2820r_config,
-                               &adap->dev->i2c_adap, NULL);
-               } else {
-                       /* DVB-C */
-                       adap->fe_adap[state->fe_id].fe =
-                               dvb_attach(cxd2820r_attach,
-                               &anysee_cxd2820r_config,
-                               &adap->dev->i2c_adap, adap->fe_adap[0].fe);
-               }
+               /* attach demod */
+               adap->fe_adap[state->fe_id].fe = dvb_attach(cxd2820r_attach,
+                               &anysee_cxd2820r_config, &adap->dev->i2c_adap,
+                               NULL);
 
                state->has_ci = true;
 
index 9bd6d51b3b938b21659dffe90f7ee68877ad6302..7de125c0b36f4576f951ea8e90a87d7c308f0857 100644 (file)
@@ -48,6 +48,8 @@ struct dib0700_state {
        u8 disable_streaming_master_mode;
        u32 fw_version;
        u32 nb_packet_buffer_size;
+       int (*read_status)(struct dvb_frontend *, fe_status_t *);
+       int (*sleep)(struct dvb_frontend* fe);
        u8 buf[255];
 };
 
index 206999476f028238c430797ba11b762b55dee379..070e82aa53f53248f33448d9e61d22b484e6b28a 100644 (file)
@@ -834,6 +834,7 @@ static struct usb_driver dib0700_driver = {
 
 module_usb_driver(dib0700_driver);
 
+MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw");
 MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
 MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
 MODULE_VERSION("1.0");
index 81ef4b46f790f936129b849e55878e8b8a9ff89e..f9e966aa26e75d19b0ce129221be038bd6787fe5 100644 (file)
@@ -3066,19 +3066,25 @@ static struct dib7000p_config stk7070pd_dib7000p_config[2] = {
        }
 };
 
-static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
+static void stk7070pd_init(struct dvb_usb_device *dev)
 {
-       dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+       dib0700_set_gpio(dev, GPIO6, GPIO_OUT, 1);
        msleep(10);
-       dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
-       dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
-       dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
-       dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+       dib0700_set_gpio(dev, GPIO9, GPIO_OUT, 1);
+       dib0700_set_gpio(dev, GPIO4, GPIO_OUT, 1);
+       dib0700_set_gpio(dev, GPIO7, GPIO_OUT, 1);
+       dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 0);
 
-       dib0700_ctrl_clock(adap->dev, 72, 1);
+       dib0700_ctrl_clock(dev, 72, 1);
 
        msleep(10);
-       dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+       dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 1);
+}
+
+static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
+{
+       stk7070pd_init(adap->dev);
+
        msleep(10);
        dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
 
@@ -3099,6 +3105,77 @@ static int stk7070pd_frontend_attach1(struct dvb_usb_adapter *adap)
        return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
 }
 
+static int novatd_read_status_override(struct dvb_frontend *fe,
+               fe_status_t *stat)
+{
+       struct dvb_usb_adapter *adap = fe->dvb->priv;
+       struct dvb_usb_device *dev = adap->dev;
+       struct dib0700_state *state = dev->priv;
+       int ret;
+
+       ret = state->read_status(fe, stat);
+
+       if (!ret)
+               dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT,
+                               !!(*stat & FE_HAS_LOCK));
+
+       return ret;
+}
+
+static int novatd_sleep_override(struct dvb_frontend* fe)
+{
+       struct dvb_usb_adapter *adap = fe->dvb->priv;
+       struct dvb_usb_device *dev = adap->dev;
+       struct dib0700_state *state = dev->priv;
+
+       /* turn off LED */
+       dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, 0);
+
+       return state->sleep(fe);
+}
+
+/**
+ * novatd_frontend_attach - Nova-TD specific attach
+ *
+ * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for
+ * information purposes.
+ */
+static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
+{
+       struct dvb_usb_device *dev = adap->dev;
+       struct dib0700_state *st = dev->priv;
+
+       if (adap->id == 0) {
+               stk7070pd_init(dev);
+
+               /* turn the power LED on, the other two off (just in case) */
+               dib0700_set_gpio(dev, GPIO0, GPIO_OUT, 0);
+               dib0700_set_gpio(dev, GPIO1, GPIO_OUT, 0);
+               dib0700_set_gpio(dev, GPIO2, GPIO_OUT, 1);
+
+               if (dib7000p_i2c_enumeration(&dev->i2c_adap, 2, 18,
+                                            stk7070pd_dib7000p_config) != 0) {
+                       err("%s: dib7000p_i2c_enumeration failed.  Cannot continue\n",
+                           __func__);
+                       return -ENODEV;
+               }
+       }
+
+       adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &dev->i2c_adap,
+                       adap->id == 0 ? 0x80 : 0x82,
+                       &stk7070pd_dib7000p_config[adap->id]);
+
+       if (adap->fe_adap[0].fe == NULL)
+               return -ENODEV;
+
+       st->read_status = adap->fe_adap[0].fe->ops.read_status;
+       adap->fe_adap[0].fe->ops.read_status = novatd_read_status_override;
+       st->sleep = adap->fe_adap[0].fe->ops.sleep;
+       adap->fe_adap[0].fe->ops.sleep = novatd_sleep_override;
+
+       return 0;
+}
+
 /* S5H1411 */
 static struct s5h1411_config pinnacle_801e_config = {
        .output_mode   = S5H1411_PARALLEL_OUTPUT,
@@ -3861,6 +3938,57 @@ struct dvb_usb_device_properties dib0700_devices[] = {
                },
        }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
 
+               .num_adapters = 2,
+               .adapter = {
+                       {
+                       .num_frontends = 1,
+                       .fe = {{
+                               .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+                               .pid_filter_count = 32,
+                               .pid_filter       = stk70x0p_pid_filter,
+                               .pid_filter_ctrl  = stk70x0p_pid_filter_ctrl,
+                               .frontend_attach  = novatd_frontend_attach,
+                               .tuner_attach     = dib7070p_tuner_attach,
+
+                               DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+                       }},
+                               .size_of_priv     = sizeof(struct dib0700_adapter_state),
+                       }, {
+                       .num_frontends = 1,
+                       .fe = {{
+                               .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+                               .pid_filter_count = 32,
+                               .pid_filter       = stk70x0p_pid_filter,
+                               .pid_filter_ctrl  = stk70x0p_pid_filter_ctrl,
+                               .frontend_attach  = novatd_frontend_attach,
+                               .tuner_attach     = dib7070p_tuner_attach,
+
+                               DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+                       }},
+                               .size_of_priv     = sizeof(struct dib0700_adapter_state),
+                       }
+               },
+
+               .num_device_descs = 1,
+               .devices = {
+                       {   "Hauppauge Nova-TD Stick (52009)",
+                               { &dib0700_usb_id_table[35], NULL },
+                               { NULL },
+                       },
+               },
+
+               .rc.core = {
+                       .rc_interval      = DEFAULT_RC_INTERVAL,
+                       .rc_codes         = RC_MAP_DIB0700_RC5_TABLE,
+                       .module_name      = "dib0700",
+                       .rc_query         = dib0700_rc_query_old_firmware,
+                       .allowed_protos   = RC_TYPE_RC5 |
+                                           RC_TYPE_RC6 |
+                                           RC_TYPE_NEC,
+                       .change_protocol = dib0700_change_protocol,
+               },
+       }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+
                .num_adapters = 2,
                .adapter = {
                        {
@@ -3892,7 +4020,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
                        }
                },
 
-               .num_device_descs = 6,
+               .num_device_descs = 5,
                .devices = {
                        {   "DiBcom STK7070PD reference design",
                                { &dib0700_usb_id_table[17], NULL },
@@ -3902,10 +4030,6 @@ struct dvb_usb_device_properties dib0700_devices[] = {
                                { &dib0700_usb_id_table[18], NULL },
                                { NULL },
                        },
-                       {   "Hauppauge Nova-TD Stick (52009)",
-                               { &dib0700_usb_id_table[35], NULL },
-                               { NULL },
-                       },
                        {   "Hauppauge Nova-TD-500 (84xxx)",
                                { &dib0700_usb_id_table[36], NULL },
                                { NULL },
index 93e1b12e79077ddc851ffd5fd85c8240c795eed7..caae7f79c8379323d1bce9c6b998bc996c63fda4 100644 (file)
@@ -309,9 +309,14 @@ static int cxd2820r_read_status(struct dvb_frontend *fe, fe_status_t *status)
 
 static int cxd2820r_get_frontend(struct dvb_frontend *fe)
 {
+       struct cxd2820r_priv *priv = fe->demodulator_priv;
        int ret;
 
        dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
+
+       if (priv->delivery_system == SYS_UNDEFINED)
+               return 0;
+
        switch (fe->dtv_property_cache.delivery_system) {
        case SYS_DVBT:
                ret = cxd2820r_get_frontend_t(fe);
@@ -476,10 +481,10 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
        dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
 
        /* switch between DVB-T and DVB-T2 when tune fails */
-       if (priv->last_tune_failed && (priv->delivery_system != SYS_DVBC_ANNEX_A)) {
+       if (priv->last_tune_failed) {
                if (priv->delivery_system == SYS_DVBT)
                        c->delivery_system = SYS_DVBT2;
-               else
+               else if (priv->delivery_system == SYS_DVBT2)
                        c->delivery_system = SYS_DVBT;
        }
 
@@ -492,6 +497,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
        /* frontend lock wait loop count */
        switch (priv->delivery_system) {
        case SYS_DVBT:
+       case SYS_DVBC_ANNEX_A:
                i = 20;
                break;
        case SYS_DVBT2:
index 938777065de6d1d14effed1ab4f9a01b3f7dc8c7..af65d013db11e8ed843c555fe0dc73d114103729 100644 (file)
@@ -1195,7 +1195,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
 
        for (i = 0; i < 30 ; i++) {
                ds3000_read_status(fe, &status);
-               if (status && FE_HAS_LOCK)
+               if (status & FE_HAS_LOCK)
                        break;
 
                msleep(10);
index 7fa3e472cdcaf40e0af6d97a4515a6b383dea0dd..fade566927c3ee93d65cba68a14309c798051d7b 100644 (file)
@@ -402,7 +402,7 @@ static int mb86a20s_get_modulation(struct mb86a20s_state *state,
                [2] = 0x8e,     /* Layer C */
        };
 
-       if (layer > ARRAY_SIZE(reg))
+       if (layer >= ARRAY_SIZE(reg))
                return -EINVAL;
        rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
        if (rc < 0)
@@ -435,7 +435,7 @@ static int mb86a20s_get_fec(struct mb86a20s_state *state,
                [2] = 0x8f,     /* Layer C */
        };
 
-       if (layer > ARRAY_SIZE(reg))
+       if (layer >= ARRAY_SIZE(reg))
                return -EINVAL;
        rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
        if (rc < 0)
@@ -470,7 +470,7 @@ static int mb86a20s_get_interleaving(struct mb86a20s_state *state,
                [2] = 0x90,     /* Layer C */
        };
 
-       if (layer > ARRAY_SIZE(reg))
+       if (layer >= ARRAY_SIZE(reg))
                return -EINVAL;
        rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
        if (rc < 0)
@@ -494,7 +494,7 @@ static int mb86a20s_get_segment_count(struct mb86a20s_state *state,
                [2] = 0x91,     /* Layer C */
        };
 
-       if (layer > ARRAY_SIZE(reg))
+       if (layer >= ARRAY_SIZE(reg))
                return -EINVAL;
        rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
        if (rc < 0)
index 86da3d816498e320772a3a094702d9699b894e19..ad7c72e8f517728539f93adde89f63ddf980d137 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/i2c.h>
-#include <linux/version.h>
 #include <asm/div64.h>
 
 #include "dvb_frontend.h"
index ec859a580651e327475f0ce57f7faba7f5a1d26b..f241702a0f36442aa7e3ab1c8d9fe55213e00d78 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/slab.h>
 
 #include <media/as3645a.h>
 #include <media/v4l2-ctrls.h>
index 14cb961c22bdba5ef506ba56ad55e59d2dc1ade5..4bfd865a4106069538b893720622e424b0097915 100644 (file)
@@ -751,20 +751,10 @@ int cx18_v4l2_close(struct file *filp)
 
        CX18_DEBUG_IOCTL("close() of %s\n", s->name);
 
-       v4l2_fh_del(fh);
-       v4l2_fh_exit(fh);
-
-       /* Easy case first: this stream was never claimed by us */
-       if (s->id != id->open_id) {
-               kfree(id);
-               return 0;
-       }
-
-       /* 'Unclaim' this stream */
-
-       /* Stop radio */
        mutex_lock(&cx->serialize_lock);
-       if (id->type == CX18_ENC_STREAM_TYPE_RAD) {
+       /* Stop radio */
+       if (id->type == CX18_ENC_STREAM_TYPE_RAD &&
+                       v4l2_fh_is_singular_file(filp)) {
                /* Closing radio device, return to TV mode */
                cx18_mute(cx);
                /* Mark that the radio is no longer in use */
@@ -781,10 +771,14 @@ int cx18_v4l2_close(struct file *filp)
                }
                /* Done! Unmute and continue. */
                cx18_unmute(cx);
-               cx18_release_stream(s);
-       } else {
-               cx18_stop_capture(id, 0);
        }
+
+       v4l2_fh_del(fh);
+       v4l2_fh_exit(fh);
+
+       /* 'Unclaim' this stream */
+       if (s->id == id->open_id)
+               cx18_stop_capture(id, 0);
        kfree(id);
        mutex_unlock(&cx->serialize_lock);
        return 0;
@@ -810,21 +804,15 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
 
        item->open_id = cx->open_id++;
        filp->private_data = &item->fh;
+       v4l2_fh_add(&item->fh);
 
-       if (item->type == CX18_ENC_STREAM_TYPE_RAD) {
-               /* Try to claim this stream */
-               if (cx18_claim_stream(item, item->type)) {
-                       /* No, it's already in use */
-                       v4l2_fh_exit(&item->fh);
-                       kfree(item);
-                       return -EBUSY;
-               }
-
+       if (item->type == CX18_ENC_STREAM_TYPE_RAD &&
+                       v4l2_fh_is_singular_file(filp)) {
                if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
                        if (atomic_read(&cx->ana_capturing) > 0) {
                                /* switching to radio while capture is
                                   in progress is not polite */
-                               cx18_release_stream(s);
+                               v4l2_fh_del(&item->fh);
                                v4l2_fh_exit(&item->fh);
                                kfree(item);
                                return -EBUSY;
@@ -842,7 +830,6 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
                /* Done! Unmute and continue. */
                cx18_unmute(cx);
        }
-       v4l2_fh_add(&item->fh);
        return 0;
 }
 
index 919ed77b32f2dec7fc6e6b195a9722c6f77f6fbb..875a7ce947361ffb720ced4e811dd909cb03b15b 100644 (file)
@@ -1052,7 +1052,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        if (dev == NULL) {
                cx231xx_err(DRIVER_NAME ": out of memory!\n");
-               clear_bit(dev->devno, &cx231xx_devused);
+               clear_bit(nr, &cx231xx_devused);
                return -ENOMEM;
        }
 
index 3c01be999e353817718ab342c27943fc1a52d78f..19b5499d2624cc9b130e8340062222fd12a465e3 100644 (file)
@@ -213,8 +213,8 @@ struct cx23885_board cx23885_boards[] = {
                .portc          = CX23885_MPEG_DVB,
                .tuner_type     = TUNER_XC4000,
                .tuner_addr     = 0x61,
-               .radio_type     = TUNER_XC4000,
-               .radio_addr     = 0x61,
+               .radio_type     = UNSET,
+               .radio_addr     = ADDR_UNSET,
                .input          = {{
                        .type   = CX23885_VMUX_TELEVISION,
                        .vmux   = CX25840_VIN2_CH1 |
index af8a225763d3dbfc3a41068855625a83bf13d4cd..6835eb1fc09319cb52c006f6af6e299f7fc76833 100644 (file)
@@ -943,6 +943,11 @@ static int dvb_register(struct cx23885_tsport *port)
 
                        fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
                                        &dev->i2c_bus[1].i2c_adap, &cfg);
+                       if (!fe) {
+                               printk(KERN_ERR "%s/2: xc4000 attach failed\n",
+                                      dev->name);
+                               goto frontend_detach;
+                       }
                }
                break;
        case CX23885_BOARD_TBS_6920:
index 4bbf9bb97bde984e23c0665fda59a72bc7dbd80a..c654bdc7ccb201dd4e285c0b7ddfe703ab89cb61 100644 (file)
@@ -1550,7 +1550,6 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
        struct v4l2_control ctrl;
        struct videobuf_dvb_frontend *vfe;
        struct dvb_frontend *fe;
-       int err = 0;
 
        struct analog_parameters params = {
                .mode      = V4L2_TUNER_ANALOG_TV,
@@ -1572,8 +1571,10 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
                params.frequency, f->tuner, params.std);
 
        vfe = videobuf_dvb_get_frontend(&dev->ts2.frontends, 1);
-       if (!vfe)
-               err = -EINVAL;
+       if (!vfe) {
+               mutex_unlock(&dev->lock);
+               return -EINVAL;
+       }
 
        fe = vfe->dvb.frontend;
 
index 62c7ad050f9bb0b067a963405ddda6e8cbcd1838..cbd5d119a2c660ebd7f8e9aba9b4206795d290e9 100644 (file)
@@ -1573,8 +1573,8 @@ static const struct cx88_board cx88_boards[] = {
                .name           = "Pinnacle Hybrid PCTV",
                .tuner_type     = TUNER_XC2028,
                .tuner_addr     = 0x61,
-               .radio_type     = TUNER_XC2028,
-               .radio_addr     = 0x61,
+               .radio_type     = UNSET,
+               .radio_addr     = ADDR_UNSET,
                .input          = { {
                        .type   = CX88_VMUX_TELEVISION,
                        .vmux   = 0,
@@ -1611,8 +1611,8 @@ static const struct cx88_board cx88_boards[] = {
                .name           = "Leadtek TV2000 XP Global",
                .tuner_type     = TUNER_XC2028,
                .tuner_addr     = 0x61,
-               .radio_type     = TUNER_XC2028,
-               .radio_addr     = 0x61,
+               .radio_type     = UNSET,
+               .radio_addr     = ADDR_UNSET,
                .input          = { {
                        .type   = CX88_VMUX_TELEVISION,
                        .vmux   = 0,
@@ -2115,8 +2115,8 @@ static const struct cx88_board cx88_boards[] = {
                .name           = "Terratec Cinergy HT PCI MKII",
                .tuner_type     = TUNER_XC2028,
                .tuner_addr     = 0x61,
-               .radio_type     = TUNER_XC2028,
-               .radio_addr     = 0x61,
+               .radio_type     = UNSET,
+               .radio_addr     = ADDR_UNSET,
                .input          = { {
                        .type   = CX88_VMUX_TELEVISION,
                        .vmux   = 0,
@@ -2154,9 +2154,9 @@ static const struct cx88_board cx88_boards[] = {
        [CX88_BOARD_WINFAST_DTV1800H] = {
                .name           = "Leadtek WinFast DTV1800 Hybrid",
                .tuner_type     = TUNER_XC2028,
-               .radio_type     = TUNER_XC2028,
+               .radio_type     = UNSET,
                .tuner_addr     = 0x61,
-               .radio_addr     = 0x61,
+               .radio_addr     = ADDR_UNSET,
                /*
                 * GPIO setting
                 *
@@ -2195,9 +2195,9 @@ static const struct cx88_board cx88_boards[] = {
        [CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
                .name           = "Leadtek WinFast DTV1800 H (XC4000)",
                .tuner_type     = TUNER_XC4000,
-               .radio_type     = TUNER_XC4000,
+               .radio_type     = UNSET,
                .tuner_addr     = 0x61,
-               .radio_addr     = 0x61,
+               .radio_addr     = ADDR_UNSET,
                /*
                 * GPIO setting
                 *
@@ -2236,9 +2236,9 @@ static const struct cx88_board cx88_boards[] = {
        [CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
                .name           = "Leadtek WinFast DTV2000 H PLUS",
                .tuner_type     = TUNER_XC4000,
-               .radio_type     = TUNER_XC4000,
+               .radio_type     = UNSET,
                .tuner_addr     = 0x61,
-               .radio_addr     = 0x61,
+               .radio_addr     = ADDR_UNSET,
                /*
                 * GPIO
                 *   2: 1: mute audio
index 544af91cbdc1b0fb9962d6550cb9c4892299a74b..3949b7dc2368370ebe6d28f15b3b83bc2999babe 100644 (file)
@@ -731,9 +731,6 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
 
        init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
 
-       /* start counting open_id at 1 */
-       itv->open_id = 1;
-
        /* Initial settings */
        itv->cxhdl.port = CX2341X_PORT_MEMORY;
        itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
index 8f9cc17b518eaaf6320e092d0a667d8d4876e586..06f3d78389bfbb250f89d655a9d6e57ebe359d24 100644 (file)
@@ -332,7 +332,7 @@ struct ivtv_stream {
        const char *name;               /* name of the stream */
        int type;                       /* stream type */
 
-       u32 id;
+       struct v4l2_fh *fh;             /* pointer to the streaming filehandle */
        spinlock_t qlock;               /* locks access to the queues */
        unsigned long s_flags;          /* status flags, see above */
        int dma;                        /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */
@@ -379,7 +379,6 @@ struct ivtv_stream {
 
 struct ivtv_open_id {
        struct v4l2_fh fh;
-       u32 open_id;                    /* unique ID for this file descriptor */
        int type;                       /* stream type */
        int yuv_frames;                 /* 1: started OUT_UDMA_YUV output mode */
        struct ivtv *itv;
index 38f052257f4620498d6b23e0bf3b7cfb88aa5ba9..2cd6c89b7d917d618de7791bf919bb1015c899b9 100644 (file)
@@ -50,16 +50,16 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
 
        if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
                /* someone already claimed this stream */
-               if (s->id == id->open_id) {
+               if (s->fh == &id->fh) {
                        /* yes, this file descriptor did. So that's OK. */
                        return 0;
                }
-               if (s->id == -1 && (type == IVTV_DEC_STREAM_TYPE_VBI ||
+               if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
                                         type == IVTV_ENC_STREAM_TYPE_VBI)) {
                        /* VBI is handled already internally, now also assign
                           the file descriptor to this stream for external
                           reading of the stream. */
-                       s->id = id->open_id;
+                       s->fh = &id->fh;
                        IVTV_DEBUG_INFO("Start Read VBI\n");
                        return 0;
                }
@@ -67,7 +67,7 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
                IVTV_DEBUG_INFO("Stream %d is busy\n", type);
                return -EBUSY;
        }
-       s->id = id->open_id;
+       s->fh = &id->fh;
        if (type == IVTV_DEC_STREAM_TYPE_VBI) {
                /* Enable reinsertion interrupt */
                ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
@@ -104,7 +104,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
        struct ivtv *itv = s->itv;
        struct ivtv_stream *s_vbi;
 
-       s->id = -1;
+       s->fh = NULL;
        if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) &&
                test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
                /* this stream is still in use internally */
@@ -136,7 +136,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
                /* was already cleared */
                return;
        }
-       if (s_vbi->id != -1) {
+       if (s_vbi->fh) {
                /* VBI stream still claimed by a file descriptor */
                return;
        }
@@ -268,11 +268,13 @@ static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block,
                }
 
                /* wait for more data to arrive */
+               mutex_unlock(&itv->serialize_lock);
                prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
                /* New buffers might have become available before we were added to the waitqueue */
                if (!s->q_full.buffers)
                        schedule();
                finish_wait(&s->waitq, &wait);
+               mutex_lock(&itv->serialize_lock);
                if (signal_pending(current)) {
                        /* return if a signal was received */
                        IVTV_DEBUG_INFO("User stopped %s\n", s->name);
@@ -357,7 +359,7 @@ static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_co
        size_t tot_written = 0;
        int single_frame = 0;
 
-       if (atomic_read(&itv->capturing) == 0 && s->id == -1) {
+       if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) {
                /* shouldn't happen */
                IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name);
                return -EIO;
@@ -507,9 +509,7 @@ ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_
 
        IVTV_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name);
 
-       mutex_lock(&itv->serialize_lock);
        rc = ivtv_start_capture(id);
-       mutex_unlock(&itv->serialize_lock);
        if (rc)
                return rc;
        return ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
@@ -584,9 +584,7 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
        set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
 
        /* Start decoder (returns 0 if already started) */
-       mutex_lock(&itv->serialize_lock);
        rc = ivtv_start_decoding(id, itv->speed);
-       mutex_unlock(&itv->serialize_lock);
        if (rc) {
                IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);
 
@@ -627,11 +625,13 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
                        break;
                if (filp->f_flags & O_NONBLOCK)
                        return -EAGAIN;
+               mutex_unlock(&itv->serialize_lock);
                prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
                /* New buffers might have become free before we were added to the waitqueue */
                if (!s->q_free.buffers)
                        schedule();
                finish_wait(&s->waitq, &wait);
+               mutex_lock(&itv->serialize_lock);
                if (signal_pending(current)) {
                        IVTV_DEBUG_INFO("User stopped %s\n", s->name);
                        return -EINTR;
@@ -686,12 +686,14 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
                        if (mode == OUT_YUV)
                                ivtv_yuv_setup_stream_frame(itv);
 
+                       mutex_unlock(&itv->serialize_lock);
                        prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
                        while (!(got_sig = signal_pending(current)) &&
                                        test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) {
                                schedule();
                        }
                        finish_wait(&itv->dma_waitq, &wait);
+                       mutex_lock(&itv->serialize_lock);
                        if (got_sig) {
                                IVTV_DEBUG_INFO("User interrupted %s\n", s->name);
                                return -EINTR;
@@ -756,9 +758,7 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
        if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
                int rc;
 
-               mutex_lock(&itv->serialize_lock);
                rc = ivtv_start_capture(id);
-               mutex_unlock(&itv->serialize_lock);
                if (rc) {
                        IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n",
                                        s->name, rc);
@@ -808,7 +808,7 @@ void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
                     id->type == IVTV_ENC_STREAM_TYPE_VBI) &&
                    test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
                        /* Also used internally, don't stop capturing */
-                       s->id = -1;
+                       s->fh = NULL;
                }
                else {
                        ivtv_stop_v4l2_encode_stream(s, gop_end);
@@ -861,20 +861,9 @@ int ivtv_v4l2_close(struct file *filp)
 
        IVTV_DEBUG_FILE("close %s\n", s->name);
 
-       v4l2_fh_del(fh);
-       v4l2_fh_exit(fh);
-
-       /* Easy case first: this stream was never claimed by us */
-       if (s->id != id->open_id) {
-               kfree(id);
-               return 0;
-       }
-
-       /* 'Unclaim' this stream */
-
        /* Stop radio */
-       mutex_lock(&itv->serialize_lock);
-       if (id->type == IVTV_ENC_STREAM_TYPE_RAD) {
+       if (id->type == IVTV_ENC_STREAM_TYPE_RAD &&
+                       v4l2_fh_is_singular_file(filp)) {
                /* Closing radio device, return to TV mode */
                ivtv_mute(itv);
                /* Mark that the radio is no longer in use */
@@ -890,13 +879,25 @@ int ivtv_v4l2_close(struct file *filp)
                if (atomic_read(&itv->capturing) > 0) {
                        /* Undo video mute */
                        ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
-                               v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
-                               (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
+                                       v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
+                                       (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
                }
                /* Done! Unmute and continue. */
                ivtv_unmute(itv);
-               ivtv_release_stream(s);
-       } else if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
+       }
+
+       v4l2_fh_del(fh);
+       v4l2_fh_exit(fh);
+
+       /* Easy case first: this stream was never claimed by us */
+       if (s->fh != &id->fh) {
+               kfree(id);
+               return 0;
+       }
+
+       /* 'Unclaim' this stream */
+
+       if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
                struct ivtv_stream *s_vout = &itv->streams[IVTV_DEC_STREAM_TYPE_VOUT];
 
                ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0);
@@ -911,21 +912,25 @@ int ivtv_v4l2_close(struct file *filp)
                ivtv_stop_capture(id, 0);
        }
        kfree(id);
-       mutex_unlock(&itv->serialize_lock);
        return 0;
 }
 
-static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
+int ivtv_v4l2_open(struct file *filp)
 {
-#ifdef CONFIG_VIDEO_ADV_DEBUG
        struct video_device *vdev = video_devdata(filp);
-#endif
+       struct ivtv_stream *s = video_get_drvdata(vdev);
        struct ivtv *itv = s->itv;
        struct ivtv_open_id *item;
        int res = 0;
 
        IVTV_DEBUG_FILE("open %s\n", s->name);
 
+       if (ivtv_init_on_first_open(itv)) {
+               IVTV_ERR("Failed to initialize on device %s\n",
+                        video_device_node_name(vdev));
+               return -ENXIO;
+       }
+
 #ifdef CONFIG_VIDEO_ADV_DEBUG
        /* Unless ivtv_fw_debug is set, error out if firmware dead. */
        if (ivtv_fw_debug) {
@@ -966,31 +971,19 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
                return -ENOMEM;
        }
        v4l2_fh_init(&item->fh, s->vdev);
-       if (res < 0) {
-               v4l2_fh_exit(&item->fh);
-               kfree(item);
-               return res;
-       }
        item->itv = itv;
        item->type = s->type;
 
-       item->open_id = itv->open_id++;
        filp->private_data = &item->fh;
+       v4l2_fh_add(&item->fh);
 
-       if (item->type == IVTV_ENC_STREAM_TYPE_RAD) {
-               /* Try to claim this stream */
-               if (ivtv_claim_stream(item, item->type)) {
-                       /* No, it's already in use */
-                       v4l2_fh_exit(&item->fh);
-                       kfree(item);
-                       return -EBUSY;
-               }
-
+       if (item->type == IVTV_ENC_STREAM_TYPE_RAD &&
+                       v4l2_fh_is_singular_file(filp)) {
                if (!test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
                        if (atomic_read(&itv->capturing) > 0) {
                                /* switching to radio while capture is
                                   in progress is not polite */
-                               ivtv_release_stream(s);
+                               v4l2_fh_del(&item->fh);
                                v4l2_fh_exit(&item->fh);
                                kfree(item);
                                return -EBUSY;
@@ -1022,32 +1015,9 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
                                1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
                itv->yuv_info.stream_size = 0;
        }
-       v4l2_fh_add(&item->fh);
        return 0;
 }
 
-int ivtv_v4l2_open(struct file *filp)
-{
-       int res;
-       struct ivtv *itv = NULL;
-       struct ivtv_stream *s = NULL;
-       struct video_device *vdev = video_devdata(filp);
-
-       s = video_get_drvdata(vdev);
-       itv = s->itv;
-
-       mutex_lock(&itv->serialize_lock);
-       if (ivtv_init_on_first_open(itv)) {
-               IVTV_ERR("Failed to initialize on device %s\n",
-                        video_device_node_name(vdev));
-               mutex_unlock(&itv->serialize_lock);
-               return -ENXIO;
-       }
-       res = ivtv_serialized_open(s, filp);
-       mutex_unlock(&itv->serialize_lock);
-       return res;
-}
-
 void ivtv_mute(struct ivtv *itv)
 {
        if (atomic_read(&itv->capturing))
index ecafa697326e302803ab85604c0a7ab8aec697f6..c4bc481430985c6405580a461e2cc8e518ae2d73 100644 (file)
@@ -179,6 +179,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
                ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0);
 
                /* Wait for any DMA to finish */
+               mutex_unlock(&itv->serialize_lock);
                prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
                while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
                        got_sig = signal_pending(current);
@@ -188,6 +189,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
                        schedule();
                }
                finish_wait(&itv->dma_waitq, &wait);
+               mutex_lock(&itv->serialize_lock);
                if (got_sig)
                        return -EINTR;
 
@@ -1107,6 +1109,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
         * happens within the first 100 lines of the top field.
         * Make 4 attempts to sync to the decoder before giving up.
         */
+       mutex_unlock(&itv->serialize_lock);
        for (f = 0; f < 4; f++) {
                prepare_to_wait(&itv->vsync_waitq, &wait,
                                TASK_UNINTERRUPTIBLE);
@@ -1115,6 +1118,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
                schedule_timeout(msecs_to_jiffies(25));
        }
        finish_wait(&itv->vsync_waitq, &wait);
+       mutex_lock(&itv->serialize_lock);
 
        if (f == 4)
                IVTV_WARN("Mode change failed to sync to decoder\n");
@@ -1842,8 +1846,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
        return 0;
 }
 
-static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
-               unsigned int cmd, unsigned long arg)
+long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        struct video_device *vfd = video_devdata(filp);
        long ret;
@@ -1855,21 +1858,6 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
        return ret;
 }
 
-long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-       struct ivtv_open_id *id = fh2id(filp->private_data);
-       struct ivtv *itv = id->itv;
-       long res;
-
-       /* DQEVENT can block, so this should not run with the serialize lock */
-       if (cmd == VIDIOC_DQEVENT)
-               return ivtv_serialized_ioctl(itv, filp, cmd, arg);
-       mutex_lock(&itv->serialize_lock);
-       res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
-       mutex_unlock(&itv->serialize_lock);
-       return res;
-}
-
 static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
        .vidioc_querycap                    = ivtv_querycap,
        .vidioc_s_audio                     = ivtv_s_audio,
index 9c29e964d400b955d36074487c17c22e1195bbcd..1b3b9578bf47657915b464e9b9513f48b481aa0e 100644 (file)
@@ -288,13 +288,13 @@ static void dma_post(struct ivtv_stream *s)
                        ivtv_process_vbi_data(itv, buf, 0, s->type);
                        s->q_dma.bytesused += buf->bytesused;
                }
-               if (s->id == -1) {
+               if (s->fh == NULL) {
                        ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
                        return;
                }
        }
        ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
-       if (s->id != -1)
+       if (s->fh)
                wake_up(&s->waitq);
 }
 
index e7794dc1330e50348a5e358714abf9f54702e56d..c6e28b4ebbed6dfe3e821e443dcff04d8e4694e3 100644 (file)
@@ -159,7 +159,6 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
                s->buffers = (itv->options.kilobytes[type] * 1024 + s->buf_size - 1) / s->buf_size;
        spin_lock_init(&s->qlock);
        init_waitqueue_head(&s->waitq);
-       s->id = -1;
        s->sg_handle = IVTV_DMA_UNMAPPED;
        ivtv_queue_init(&s->q_free);
        ivtv_queue_init(&s->q_full);
@@ -214,6 +213,7 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
        s->vdev->fops = ivtv_stream_info[type].fops;
        s->vdev->release = video_device_release;
        s->vdev->tvnorms = V4L2_STD_ALL;
+       s->vdev->lock = &itv->serialize_lock;
        set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags);
        ivtv_set_funcs(s->vdev);
        return 0;
index dcbab6ad4c26ee8843336f673aeb1f9f7fe7ef80..2ad65eb29832754b54a5d190257cadd5d6a09031 100644 (file)
@@ -1149,23 +1149,37 @@ int ivtv_yuv_udma_stream_frame(struct ivtv *itv, void __user *src)
 {
        struct yuv_playback_info *yi = &itv->yuv_info;
        struct ivtv_dma_frame dma_args;
+       int res;
 
        ivtv_yuv_setup_stream_frame(itv);
 
        /* We only need to supply source addresses for this */
        dma_args.y_source = src;
        dma_args.uv_source = src + 720 * ((yi->v4l2_src_h + 31) & ~31);
-       return ivtv_yuv_udma_frame(itv, &dma_args);
+       /* Wait for frame DMA. Note that serialize_lock is locked,
+          so to allow other processes to access the driver while
+          we are waiting unlock first and later lock again. */
+       mutex_unlock(&itv->serialize_lock);
+       res = ivtv_yuv_udma_frame(itv, &dma_args);
+       mutex_lock(&itv->serialize_lock);
+       return res;
 }
 
 /* IVTV_IOC_DMA_FRAME ioctl handler */
 int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
 {
-/*     IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
+       int res;
 
+/*     IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
        ivtv_yuv_next_free(itv);
        ivtv_yuv_setup_frame(itv, args);
-       return ivtv_yuv_udma_frame(itv, args);
+       /* Wait for frame DMA. Note that serialize_lock is locked,
+          so to allow other processes to access the driver while
+          we are waiting unlock first and later lock again. */
+       mutex_unlock(&itv->serialize_lock);
+       res = ivtv_yuv_udma_frame(itv, args);
+       mutex_lock(&itv->serialize_lock);
+       return res;
 }
 
 void ivtv_yuv_close(struct ivtv *itv)
@@ -1174,7 +1188,9 @@ void ivtv_yuv_close(struct ivtv *itv)
        int h_filter, v_filter_1, v_filter_2;
 
        IVTV_DEBUG_YUV("ivtv_yuv_close\n");
+       mutex_unlock(&itv->serialize_lock);
        ivtv_waitq(&itv->vsync_waitq);
+       mutex_lock(&itv->serialize_lock);
 
        yi->running = 0;
        atomic_set(&yi->next_dma_frame, -1);
index 0cb461dd396af39ffbff65b387fdae11742d4ebf..74522773e934c18e417f36b36aed40017b35e02f 100644 (file)
@@ -287,7 +287,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
                sg_dma_len(sg)          = new_size;
 
                txd = ichan->dma_chan.device->device_prep_slave_sg(
-                       &ichan->dma_chan, sg, 1, DMA_FROM_DEVICE,
+                       &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM,
                        DMA_PREP_INTERRUPT);
                if (!txd)
                        goto error;
index a277f95091ef2b481b369dca5cacc30cbf04afe4..1fb7d5bd5ec26a3f8955e8a6cdc3e7a6cd3841f9 100644 (file)
@@ -1042,7 +1042,8 @@ static int vidioc_querycap(struct file *file, void *fh,
        strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
        strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
        cap->bus_info[0] = '\0';
-       cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+       cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
+               V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
 
        return 0;
 }
@@ -1825,7 +1826,9 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
        ovid = &vout->vid_info;
        ovl = ovid->overlays[0];
 
-       a->flags = 0x0;
+       /* The video overlay must stay within the framebuffer and can't be
+          positioned independently. */
+       a->flags = V4L2_FBUF_FLAG_OVERLAY;
        a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
                | V4L2_FBUF_CAP_SRC_CHROMAKEY;
 
index 905d41d90c6aa816b7162ddf1d0f5e99bee4912a..1f506fde97d0a52433a7477f6ac843cc60af1f2f 100644 (file)
@@ -104,47 +104,16 @@ static struct Nala_table_entry Nala_table[PSZ_MAX][PWC_FPS_MAX_NALA] =
 
 /****************************************************************************/
 
-static int _send_control_msg(struct pwc_device *pdev,
-       u8 request, u16 value, int index, void *buf, int buflen)
-{
-       int rc;
-       void *kbuf = NULL;
-
-       if (buflen) {
-               kbuf = kmemdup(buf, buflen, GFP_KERNEL); /* not allowed on stack */
-               if (kbuf == NULL)
-                       return -ENOMEM;
-       }
-
-       rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
-               request,
-               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-               value,
-               index,
-               kbuf, buflen, USB_CTRL_SET_TIMEOUT);
-
-       kfree(kbuf);
-       return rc;
-}
-
 static int recv_control_msg(struct pwc_device *pdev,
-       u8 request, u16 value, void *buf, int buflen)
+       u8 request, u16 value, int recv_count)
 {
        int rc;
-       void *kbuf = kmalloc(buflen, GFP_KERNEL); /* not allowed on stack */
-
-       if (kbuf == NULL)
-               return -ENOMEM;
 
        rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0),
                request,
                USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-               value,
-               pdev->vcinterface,
-               kbuf, buflen, USB_CTRL_GET_TIMEOUT);
-       memcpy(buf, kbuf, buflen);
-       kfree(kbuf);
-
+               value, pdev->vcinterface,
+               pdev->ctrl_buf, recv_count, USB_CTRL_GET_TIMEOUT);
        if (rc < 0)
                PWC_ERROR("recv_control_msg error %d req %02x val %04x\n",
                          rc, request, value);
@@ -152,27 +121,39 @@ static int recv_control_msg(struct pwc_device *pdev,
 }
 
 static inline int send_video_command(struct pwc_device *pdev,
-       int index, void *buf, int buflen)
+       int index, const unsigned char *buf, int buflen)
 {
-       return _send_control_msg(pdev,
-               SET_EP_STREAM_CTL,
-               VIDEO_OUTPUT_CONTROL_FORMATTER,
-               index,
-               buf, buflen);
+       int rc;
+
+       memcpy(pdev->ctrl_buf, buf, buflen);
+
+       rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
+                       SET_EP_STREAM_CTL,
+                       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                       VIDEO_OUTPUT_CONTROL_FORMATTER, index,
+                       pdev->ctrl_buf, buflen, USB_CTRL_SET_TIMEOUT);
+       if (rc >= 0)
+               memcpy(pdev->cmd_buf, buf, buflen);
+       else
+               PWC_ERROR("send_video_command error %d\n", rc);
+
+       return rc;
 }
 
 int send_control_msg(struct pwc_device *pdev,
        u8 request, u16 value, void *buf, int buflen)
 {
-       return _send_control_msg(pdev,
-               request, value, pdev->vcinterface, buf, buflen);
+       return usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
+                       request,
+                       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                       value, pdev->vcinterface,
+                       buf, buflen, USB_CTRL_SET_TIMEOUT);
 }
 
-static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
-                              int *compression)
+static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt,
+                              int frames, int *compression, int send_to_cam)
 {
-       unsigned char buf[3];
-       int ret, fps;
+       int fps, ret = 0;
        struct Nala_table_entry *pEntry;
        int frames2frames[31] =
        { /* closest match of framerate */
@@ -194,30 +175,29 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
          7              /* 30    */
        };
 
-       if (size < 0 || size > PSZ_CIF || frames < 4 || frames > 25)
+       if (size < 0 || size > PSZ_CIF)
                return -EINVAL;
+       if (frames < 4)
+               frames = 4;
+       else if (frames > 25)
+               frames = 25;
        frames = frames2frames[frames];
        fps = frames2table[frames];
        pEntry = &Nala_table[size][fps];
        if (pEntry->alternate == 0)
                return -EINVAL;
 
-       memcpy(buf, pEntry->mode, 3);
-       ret = send_video_command(pdev, pdev->vendpoint, buf, 3);
-       if (ret < 0) {
-               PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret);
+       if (send_to_cam)
+               ret = send_video_command(pdev, pdev->vendpoint,
+                                        pEntry->mode, 3);
+       if (ret < 0)
                return ret;
-       }
-       if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
-               ret = pwc_dec1_init(pdev, pdev->type, pdev->release, buf);
-               if (ret < 0)
-                       return ret;
-       }
 
-       pdev->cmd_len = 3;
-       memcpy(pdev->cmd_buf, buf, 3);
+       if (pEntry->compressed && pixfmt == V4L2_PIX_FMT_YUV420)
+               pwc_dec1_init(pdev, pEntry->mode);
 
        /* Set various parameters */
+       pdev->pixfmt = pixfmt;
        pdev->vframes = frames;
        pdev->valternate = pEntry->alternate;
        pdev->width  = pwc_image_sizes[size][0];
@@ -243,18 +223,20 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
 }
 
 
-static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
-       int *compression)
+static int set_video_mode_Timon(struct pwc_device *pdev, int size, int pixfmt,
+                               int frames, int *compression, int send_to_cam)
 {
-       unsigned char buf[13];
        const struct Timon_table_entry *pChoose;
-       int ret, fps;
+       int fps, ret = 0;
 
-       if (size >= PSZ_MAX || frames < 5 || frames > 30 ||
-           *compression < 0 || *compression > 3)
-               return -EINVAL;
-       if (size == PSZ_VGA && frames > 15)
+       if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
                return -EINVAL;
+       if (frames < 5)
+               frames = 5;
+       else if (size == PSZ_VGA && frames > 15)
+               frames = 15;
+       else if (frames > 30)
+               frames = 30;
        fps = (frames / 5) - 1;
 
        /* Find a supported framerate with progressively higher compression */
@@ -268,22 +250,18 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
        if (pChoose == NULL || pChoose->alternate == 0)
                return -ENOENT; /* Not supported. */
 
-       memcpy(buf, pChoose->mode, 13);
-       ret = send_video_command(pdev, pdev->vendpoint, buf, 13);
+       if (send_to_cam)
+               ret = send_video_command(pdev, pdev->vendpoint,
+                                        pChoose->mode, 13);
        if (ret < 0)
                return ret;
 
-       if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
-               ret = pwc_dec23_init(pdev, pdev->type, buf);
-               if (ret < 0)
-                       return ret;
-       }
-
-       pdev->cmd_len = 13;
-       memcpy(pdev->cmd_buf, buf, 13);
+       if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
+               pwc_dec23_init(pdev, pChoose->mode);
 
        /* Set various parameters */
-       pdev->vframes = frames;
+       pdev->pixfmt = pixfmt;
+       pdev->vframes = (fps + 1) * 5;
        pdev->valternate = pChoose->alternate;
        pdev->width  = pwc_image_sizes[size][0];
        pdev->height = pwc_image_sizes[size][1];
@@ -296,18 +274,20 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
 }
 
 
-static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
-       int *compression)
+static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int pixfmt,
+                               int frames, int *compression, int send_to_cam)
 {
        const struct Kiara_table_entry *pChoose = NULL;
-       int fps, ret;
-       unsigned char buf[12];
+       int fps, ret = 0;
 
-       if (size >= PSZ_MAX || frames < 5 || frames > 30 ||
-           *compression < 0 || *compression > 3)
-               return -EINVAL;
-       if (size == PSZ_VGA && frames > 15)
+       if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
                return -EINVAL;
+       if (frames < 5)
+               frames = 5;
+       else if (size == PSZ_VGA && frames > 15)
+               frames = 15;
+       else if (frames > 30)
+               frames = 30;
        fps = (frames / 5) - 1;
 
        /* Find a supported framerate with progressively higher compression */
@@ -320,26 +300,18 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
        if (pChoose == NULL || pChoose->alternate == 0)
                return -ENOENT; /* Not supported. */
 
-       PWC_TRACE("Using alternate setting %d.\n", pChoose->alternate);
-
-       /* usb_control_msg won't take staticly allocated arrays as argument?? */
-       memcpy(buf, pChoose->mode, 12);
-
        /* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */
-       ret = send_video_command(pdev, 4 /* pdev->vendpoint */, buf, 12);
+       if (send_to_cam)
+               ret = send_video_command(pdev, 4, pChoose->mode, 12);
        if (ret < 0)
                return ret;
 
-       if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
-               ret = pwc_dec23_init(pdev, pdev->type, buf);
-               if (ret < 0)
-                       return ret;
-       }
+       if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
+               pwc_dec23_init(pdev, pChoose->mode);
 
-       pdev->cmd_len = 12;
-       memcpy(pdev->cmd_buf, buf, 12);
        /* All set and go */
-       pdev->vframes = frames;
+       pdev->pixfmt = pixfmt;
+       pdev->vframes = (fps + 1) * 5;
        pdev->valternate = pChoose->alternate;
        pdev->width  = pwc_image_sizes[size][0];
        pdev->height = pwc_image_sizes[size][1];
@@ -354,22 +326,24 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
 }
 
 int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
-       int frames, int *compression)
+       int pixfmt, int frames, int *compression, int send_to_cam)
 {
        int ret, size;
 
-       PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pdev->pixfmt);
+       PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n",
+                      width, height, frames, pixfmt);
        size = pwc_get_size(pdev, width, height);
        PWC_TRACE("decode_size = %d.\n", size);
 
        if (DEVICE_USE_CODEC1(pdev->type)) {
-               ret = set_video_mode_Nala(pdev, size, frames, compression);
-
+               ret = set_video_mode_Nala(pdev, size, pixfmt, frames,
+                                         compression, send_to_cam);
        } else if (DEVICE_USE_CODEC3(pdev->type)) {
-               ret = set_video_mode_Kiara(pdev, size, frames, compression);
-
+               ret = set_video_mode_Kiara(pdev, size, pixfmt, frames,
+                                          compression, send_to_cam);
        } else {
-               ret = set_video_mode_Timon(pdev, size, frames, compression);
+               ret = set_video_mode_Timon(pdev, size, pixfmt, frames,
+                                          compression, send_to_cam);
        }
        if (ret < 0) {
                PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret);
@@ -436,13 +410,12 @@ unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned i
 int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
 {
        int ret;
-       u8 buf;
 
-       ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
+       ret = recv_control_msg(pdev, request, value, 1);
        if (ret < 0)
                return ret;
 
-       *data = buf;
+       *data = pdev->ctrl_buf[0];
        return 0;
 }
 
@@ -450,7 +423,8 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
 {
        int ret;
 
-       ret = send_control_msg(pdev, request, value, &data, sizeof(data));
+       pdev->ctrl_buf[0] = data;
+       ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 1);
        if (ret < 0)
                return ret;
 
@@ -460,37 +434,34 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
 int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
 {
        int ret;
-       s8 buf;
 
-       ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
+       ret = recv_control_msg(pdev, request, value, 1);
        if (ret < 0)
                return ret;
 
-       *data = buf;
+       *data = ((s8 *)pdev->ctrl_buf)[0];
        return 0;
 }
 
 int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
 {
        int ret;
-       u8 buf[2];
 
-       ret = recv_control_msg(pdev, request, value, buf, sizeof(buf));
+       ret = recv_control_msg(pdev, request, value, 2);
        if (ret < 0)
                return ret;
 
-       *data = (buf[1] << 8) | buf[0];
+       *data = (pdev->ctrl_buf[1] << 8) | pdev->ctrl_buf[0];
        return 0;
 }
 
 int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data)
 {
        int ret;
-       u8 buf[2];
 
-       buf[0] = data & 0xff;
-       buf[1] = data >> 8;
-       ret = send_control_msg(pdev, request, value, buf, sizeof(buf));
+       pdev->ctrl_buf[0] = data & 0xff;
+       pdev->ctrl_buf[1] = data >> 8;
+       ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 2);
        if (ret < 0)
                return ret;
 
@@ -511,7 +482,6 @@ int pwc_button_ctrl(struct pwc_device *pdev, u16 value)
 /* POWER */
 void pwc_camera_power(struct pwc_device *pdev, int power)
 {
-       char buf;
        int r;
 
        if (!pdev->power_save)
@@ -521,13 +491,11 @@ void pwc_camera_power(struct pwc_device *pdev, int power)
                return; /* Not supported by Nala or Timon < release 6 */
 
        if (power)
-               buf = 0x00; /* active */
+               pdev->ctrl_buf[0] = 0x00; /* active */
        else
-               buf = 0xFF; /* power save */
-       r = send_control_msg(pdev,
-               SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER,
-               &buf, sizeof(buf));
-
+               pdev->ctrl_buf[0] = 0xFF; /* power save */
+       r = send_control_msg(pdev, SET_STATUS_CTL,
+               SET_POWER_SAVE_MODE_FORMATTER, pdev->ctrl_buf, 1);
        if (r < 0)
                PWC_ERROR("Failed to power %s camera (%d)\n",
                          power ? "on" : "off", r);
@@ -535,7 +503,6 @@ void pwc_camera_power(struct pwc_device *pdev, int power)
 
 int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
 {
-       unsigned char buf[2];
        int r;
 
        if (pdev->type < 730)
@@ -551,11 +518,11 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
        if (off_value > 0xff)
                off_value = 0xff;
 
-       buf[0] = on_value;
-       buf[1] = off_value;
+       pdev->ctrl_buf[0] = on_value;
+       pdev->ctrl_buf[1] = off_value;
 
        r = send_control_msg(pdev,
-               SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf));
+               SET_STATUS_CTL, LED_FORMATTER, pdev->ctrl_buf, 2);
        if (r < 0)
                PWC_ERROR("Failed to set LED on/off time (%d)\n", r);
 
@@ -565,7 +532,6 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
 #ifdef CONFIG_USB_PWC_DEBUG
 int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
 {
-       unsigned char buf;
        int ret = -1, request;
 
        if (pdev->type < 675)
@@ -575,14 +541,13 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
        else
                request = SENSOR_TYPE_FORMATTER2;
 
-       ret = recv_control_msg(pdev,
-               GET_STATUS_CTL, request, &buf, sizeof(buf));
+       ret = recv_control_msg(pdev, GET_STATUS_CTL, request, 1);
        if (ret < 0)
                return ret;
        if (pdev->type < 675)
-               *sensor = buf | 0x100;
+               *sensor = pdev->ctrl_buf[0] | 0x100;
        else
-               *sensor = buf;
+               *sensor = pdev->ctrl_buf[0];
        return 0;
 }
 #endif
index be0e02cb487f1ff2cba5f32fbccd1826056eb93f..e899036aadf4a4734e0dd611386bf47909fe34bb 100644 (file)
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
-#include "pwc-dec1.h"
+#include "pwc.h"
 
-int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer)
+void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd)
 {
-       struct pwc_dec1_private *pdec;
+       struct pwc_dec1_private *pdec = &pdev->dec1;
 
-       if (pwc->decompress_data == NULL) {
-               pdec = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
-               if (pdec == NULL)
-                       return -ENOMEM;
-               pwc->decompress_data = pdec;
-       }
-       pdec = pwc->decompress_data;
-
-       return 0;
+       pdec->version = pdev->release;
 }
index a57d8601080babf0ca5ed403aef63220f900f2af..c565ef8f52fb3a83e29f5c428744aaf354b16db6 100644 (file)
 #ifndef PWC_DEC1_H
 #define PWC_DEC1_H
 
-#include "pwc.h"
+#include <linux/mutex.h>
+
+struct pwc_device;
 
 struct pwc_dec1_private
 {
        int version;
 };
 
-int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer);
+void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd);
 
 #endif
index 2c6709112b2f2c4d206e40701505658c73483a38..3792fedff9515e85734200d8ffb22aa1ed15bb93 100644 (file)
@@ -294,22 +294,17 @@ static unsigned char pwc_crop_table[256 + 2*MAX_OUTER_CROP_VALUE];
 
 
 /* If the type or the command change, we rebuild the lookup table */
-int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
+void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd)
 {
        int flags, version, shift, i;
-       struct pwc_dec23_private *pdec;
-
-       if (pwc->decompress_data == NULL) {
-               pdec = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
-               if (pdec == NULL)
-                       return -ENOMEM;
-               pwc->decompress_data = pdec;
-       }
-       pdec = pwc->decompress_data;
+       struct pwc_dec23_private *pdec = &pdev->dec23;
 
        mutex_init(&pdec->lock);
 
-       if (DEVICE_USE_CODEC3(type)) {
+       if (pdec->last_cmd_valid && pdec->last_cmd == cmd[2])
+               return;
+
+       if (DEVICE_USE_CODEC3(pdev->type)) {
                flags = cmd[2] & 0x18;
                if (flags == 8)
                        pdec->nbits = 7;        /* More bits, mean more bits to encode the stream, but better quality */
@@ -356,7 +351,8 @@ int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
                pwc_crop_table[MAX_OUTER_CROP_VALUE+256+i] = 255;
 #endif
 
-       return 0;
+       pdec->last_cmd = cmd[2];
+       pdec->last_cmd_valid = 1;
 }
 
 /*
@@ -659,12 +655,12 @@ static void DecompressBand23(struct pwc_dec23_private *pdec,
  * src: raw data
  * dst: image output
  */
-void pwc_dec23_decompress(const struct pwc_device *pwc,
+void pwc_dec23_decompress(struct pwc_device *pdev,
                          const void *src,
                          void *dst)
 {
        int bandlines_left, bytes_per_block;
-       struct pwc_dec23_private *pdec = pwc->decompress_data;
+       struct pwc_dec23_private *pdec = &pdev->dec23;
 
        /* YUV420P image format */
        unsigned char *pout_planar_y;
@@ -674,23 +670,22 @@ void pwc_dec23_decompress(const struct pwc_device *pwc,
 
        mutex_lock(&pdec->lock);
 
-       bandlines_left = pwc->height / 4;
-       bytes_per_block = pwc->width * 4;
-       plane_size = pwc->height * pwc->width;
+       bandlines_left = pdev->height / 4;
+       bytes_per_block = pdev->width * 4;
+       plane_size = pdev->height * pdev->width;
 
        pout_planar_y = dst;
        pout_planar_u = dst + plane_size;
        pout_planar_v = dst + plane_size + plane_size / 4;
 
        while (bandlines_left--) {
-               DecompressBand23(pwc->decompress_data,
-                                src,
+               DecompressBand23(pdec, src,
                                 pout_planar_y, pout_planar_u, pout_planar_v,
-                                pwc->width, pwc->width);
-               src += pwc->vbandlength;
+                                pdev->width, pdev->width);
+               src += pdev->vbandlength;
                pout_planar_y += bytes_per_block;
-               pout_planar_u += pwc->width;
-               pout_planar_v += pwc->width;
+               pout_planar_u += pdev->width;
+               pout_planar_v += pdev->width;
        }
        mutex_unlock(&pdec->lock);
 }
index d64a3c281af6aa0d5fe03471c5cd03d16f010588..c655b1c1e6a9f7d7c7e8ff725672190bb2829e0c 100644 (file)
 #ifndef PWC_DEC23_H
 #define PWC_DEC23_H
 
-#include "pwc.h"
+struct pwc_device;
 
 struct pwc_dec23_private
 {
        struct mutex lock;
 
+       unsigned char last_cmd, last_cmd_valid;
+
   unsigned int scalebits;
   unsigned int nbitsmask, nbits; /* Number of bits of a color in the compressed stream */
 
   unsigned int reservoir;
   unsigned int nbits_in_reservoir;
+
   const unsigned char *stream;
   int temp_colors[16];
 
@@ -51,8 +54,8 @@ struct pwc_dec23_private
 
 };
 
-int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd);
-void pwc_dec23_decompress(const struct pwc_device *pwc,
+void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd);
+void pwc_dec23_decompress(struct pwc_device *pdev,
                          const void *src,
                          void *dst);
 #endif
index 943d37ad0d33dd3983318bbddd0f4d19f185f55b..122fbd0081eb548a3416f2848661cc144066a78b 100644 (file)
@@ -128,18 +128,11 @@ static struct usb_driver pwc_driver = {
 #define MAX_DEV_HINTS  20
 #define MAX_ISOC_ERRORS        20
 
-static int default_fps = 10;
 #ifdef CONFIG_USB_PWC_DEBUG
        int pwc_trace = PWC_DEBUG_LEVEL;
 #endif
 static int power_save = -1;
-static int led_on = 100, led_off; /* defaults to LED that is on while in use */
-static struct {
-       int type;
-       char serial_number[30];
-       int device_node;
-       struct pwc_device *pdev;
-} device_hint[MAX_DEV_HINTS];
+static int leds[2] = { 100, 0 };
 
 /***/
 
@@ -386,8 +379,8 @@ static int pwc_isoc_init(struct pwc_device *pdev)
 retry:
        /* We first try with low compression and then retry with a higher
           compression setting if there is not enough bandwidth. */
-       ret = pwc_set_video_mode(pdev, pdev->width, pdev->height,
-                                pdev->vframes, &compression);
+       ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
+                                pdev->vframes, &compression, 1);
 
        /* Get the current alternate interface, adjust packet size */
        intf = usb_ifnum_to_if(udev, 0);
@@ -597,23 +590,9 @@ int pwc_test_n_set_capt_file(struct pwc_device *pdev, struct file *file)
 static void pwc_video_release(struct v4l2_device *v)
 {
        struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
-       int hint;
-
-       /* search device_hint[] table if we occupy a slot, by any chance */
-       for (hint = 0; hint < MAX_DEV_HINTS; hint++)
-               if (device_hint[hint].pdev == pdev)
-                       device_hint[hint].pdev = NULL;
-
-       /* Free intermediate decompression buffer & tables */
-       if (pdev->decompress_data != NULL) {
-               PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n",
-                                pdev->decompress_data);
-               kfree(pdev->decompress_data);
-               pdev->decompress_data = NULL;
-       }
 
        v4l2_ctrl_handler_free(&pdev->ctrl_handler);
-
+       kfree(pdev->ctrl_buf);
        kfree(pdev);
 }
 
@@ -758,7 +737,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
 
        /* Turn on camera and set LEDS on */
        pwc_camera_power(pdev, 1);
-       pwc_set_leds(pdev, led_on, led_off);
+       pwc_set_leds(pdev, leds[0], leds[1]);
 
        r = pwc_isoc_init(pdev);
        if (r) {
@@ -813,10 +792,9 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        struct usb_device *udev = interface_to_usbdev(intf);
        struct pwc_device *pdev = NULL;
        int vendor_id, product_id, type_id;
-       int hint, rc;
+       int rc;
        int features = 0;
        int compression = 0;
-       int video_nr = -1; /* default: use next available device */
        int my_power_save = power_save;
        char serial_number[30], *name;
 
@@ -1076,7 +1054,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
                return -ENOMEM;
        }
        pdev->type = type_id;
-       pdev->vframes = default_fps;
        pdev->features = features;
        pwc_construct(pdev); /* set min/max sizes correct */
 
@@ -1107,24 +1084,14 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        pdev->release = le16_to_cpu(udev->descriptor.bcdDevice);
        PWC_DEBUG_PROBE("Release: %04x\n", pdev->release);
 
-       /* Now search device_hint[] table for a match, so we can hint a node number. */
-       for (hint = 0; hint < MAX_DEV_HINTS; hint++) {
-               if (((device_hint[hint].type == -1) || (device_hint[hint].type == pdev->type)) &&
-                    (device_hint[hint].pdev == NULL)) {
-                       /* so far, so good... try serial number */
-                       if ((device_hint[hint].serial_number[0] == '*') || !strcmp(device_hint[hint].serial_number, serial_number)) {
-                               /* match! */
-                               video_nr = device_hint[hint].device_node;
-                               PWC_DEBUG_PROBE("Found hint, will try to register as /dev/video%d\n", video_nr);
-                               break;
-                       }
-               }
+       /* Allocate USB command buffers */
+       pdev->ctrl_buf = kmalloc(sizeof(pdev->cmd_buf), GFP_KERNEL);
+       if (!pdev->ctrl_buf) {
+               PWC_ERROR("Oops, could not allocate memory for pwc_device.\n");
+               rc = -ENOMEM;
+               goto err_free_mem;
        }
 
-       /* occupy slot */
-       if (hint < MAX_DEV_HINTS)
-               device_hint[hint].pdev = pdev;
-
 #ifdef CONFIG_USB_PWC_DEBUG
        /* Query sensor type */
        if (pwc_get_cmos_sensor(pdev, &rc) >= 0) {
@@ -1138,8 +1105,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        pwc_set_leds(pdev, 0, 0);
 
        /* Setup intial videomode */
-       rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, pdev->vframes,
-                               &compression);
+       rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT,
+                               V4L2_PIX_FMT_YUV420, 30, &compression, 1);
        if (rc)
                goto err_free_mem;
 
@@ -1164,7 +1131,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler;
        pdev->vdev.v4l2_dev = &pdev->v4l2_dev;
 
-       rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr);
+       rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1);
        if (rc < 0) {
                PWC_ERROR("Failed to register as video device (%d).\n", rc);
                goto err_unregister_v4l2_dev;
@@ -1207,8 +1174,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
 err_free_controls:
        v4l2_ctrl_handler_free(&pdev->ctrl_handler);
 err_free_mem:
-       if (hint < MAX_DEV_HINTS)
-               device_hint[hint].pdev = NULL;
+       kfree(pdev->ctrl_buf);
        kfree(pdev);
        return rc;
 }
@@ -1243,27 +1209,19 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
  * Initialization code & module stuff
  */
 
-static int fps;
-static int leds[2] = { -1, -1 };
 static unsigned int leds_nargs;
-static char *dev_hint[MAX_DEV_HINTS];
-static unsigned int dev_hint_nargs;
 
-module_param(fps, int, 0444);
 #ifdef CONFIG_USB_PWC_DEBUG
 module_param_named(trace, pwc_trace, int, 0644);
 #endif
 module_param(power_save, int, 0644);
 module_param_array(leds, int, &leds_nargs, 0444);
-module_param_array(dev_hint, charp, &dev_hint_nargs, 0444);
 
-MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
 #ifdef CONFIG_USB_PWC_DEBUG
 MODULE_PARM_DESC(trace, "For debugging purposes");
 #endif
 MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off");
 MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
-MODULE_PARM_DESC(dev_hint, "Device node hints");
 
 MODULE_DESCRIPTION("Philips & OEM USB webcam driver");
 MODULE_AUTHOR("Luc Saillard <luc@saillard.org>");
@@ -1273,114 +1231,13 @@ MODULE_VERSION( PWC_VERSION );
 
 static int __init usb_pwc_init(void)
 {
-       int i;
-
-#ifdef CONFIG_USB_PWC_DEBUG
-       PWC_INFO("Philips webcam module version " PWC_VERSION " loaded.\n");
-       PWC_INFO("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n");
-       PWC_INFO("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n");
-       PWC_INFO("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n");
-
-       if (pwc_trace >= 0) {
-               PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
-       }
-#endif
-
-       if (fps) {
-               if (fps < 4 || fps > 30) {
-                       PWC_ERROR("Framerate out of bounds (4-30).\n");
-                       return -EINVAL;
-               }
-               default_fps = fps;
-               PWC_DEBUG_MODULE("Default framerate set to %d.\n", default_fps);
-       }
-
-       if (leds[0] >= 0)
-               led_on = leds[0];
-       if (leds[1] >= 0)
-               led_off = leds[1];
-
-       /* Big device node whoopla. Basically, it allows you to assign a
-          device node (/dev/videoX) to a camera, based on its type
-          & serial number. The format is [type[.serialnumber]:]node.
-
-          Any camera that isn't matched by these rules gets the next
-          available free device node.
-        */
-       for (i = 0; i < MAX_DEV_HINTS; i++) {
-               char *s, *colon, *dot;
-
-               /* This loop also initializes the array */
-               device_hint[i].pdev = NULL;
-               s = dev_hint[i];
-               if (s != NULL && *s != '\0') {
-                       device_hint[i].type = -1; /* wildcard */
-                       strcpy(device_hint[i].serial_number, "*");
-
-                       /* parse string: chop at ':' & '/' */
-                       colon = dot = s;
-                       while (*colon != '\0' && *colon != ':')
-                               colon++;
-                       while (*dot != '\0' && *dot != '.')
-                               dot++;
-                       /* Few sanity checks */
-                       if (*dot != '\0' && dot > colon) {
-                               PWC_ERROR("Malformed camera hint: the colon must be after the dot.\n");
-                               return -EINVAL;
-                       }
-
-                       if (*colon == '\0') {
-                               /* No colon */
-                               if (*dot != '\0') {
-                                       PWC_ERROR("Malformed camera hint: no colon + device node given.\n");
-                                       return -EINVAL;
-                               }
-                               else {
-                                       /* No type or serial number specified, just a number. */
-                                       device_hint[i].device_node =
-                                               simple_strtol(s, NULL, 10);
-                               }
-                       }
-                       else {
-                               /* There's a colon, so we have at least a type and a device node */
-                               device_hint[i].type =
-                                       simple_strtol(s, NULL, 10);
-                               device_hint[i].device_node =
-                                       simple_strtol(colon + 1, NULL, 10);
-                               if (*dot != '\0') {
-                                       /* There's a serial number as well */
-                                       int k;
-
-                                       dot++;
-                                       k = 0;
-                                       while (*dot != ':' && k < 29) {
-                                               device_hint[i].serial_number[k++] = *dot;
-                                               dot++;
-                                       }
-                                       device_hint[i].serial_number[k] = '\0';
-                               }
-                       }
-                       PWC_TRACE("device_hint[%d]:\n", i);
-                       PWC_TRACE("  type    : %d\n", device_hint[i].type);
-                       PWC_TRACE("  serial# : %s\n", device_hint[i].serial_number);
-                       PWC_TRACE("  node    : %d\n", device_hint[i].device_node);
-               }
-               else
-                       device_hint[i].type = 0; /* not filled */
-       } /* ..for MAX_DEV_HINTS */
-
-       PWC_DEBUG_PROBE("Registering driver at address 0x%p.\n", &pwc_driver);
        return usb_register(&pwc_driver);
 }
 
 static void __exit usb_pwc_exit(void)
 {
-       PWC_DEBUG_MODULE("Deregistering driver.\n");
        usb_deregister(&pwc_driver);
-       PWC_INFO("Philips webcam module removed.\n");
 }
 
 module_init(usb_pwc_init);
 module_exit(usb_pwc_exit);
-
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
index 23a55b5814fc494ea2f2c95e8fb37b4142769726..9be5adffa874683929ec57d41e17eeb36bf600aa 100644 (file)
@@ -90,5 +90,4 @@ void pwc_construct(struct pwc_device *pdev)
                pdev->frame_header_size = 0;
                pdev->frame_trailer_size = 0;
        }
-       pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */
 }
index 80e25842e84a218dc3dada13e96ced27e68c3b39..f495eeb5403aaff31dd66ba582a07d86297752b9 100644 (file)
@@ -493,16 +493,11 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
                        (pixelformat>>24)&255);
 
        ret = pwc_set_video_mode(pdev, f->fmt.pix.width, f->fmt.pix.height,
-                                pdev->vframes, &compression);
+                                pixelformat, 30, &compression, 0);
 
        PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret);
 
-       if (ret == 0) {
-               pdev->pixfmt = pixelformat;
-               pwc_vidioc_fill_fmt(f, pdev->width, pdev->height,
-                                   pdev->pixfmt);
-       }
-
+       pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
 leave:
        mutex_unlock(&pdev->udevlock);
        return ret;
@@ -777,33 +772,33 @@ static int pwc_set_autogain_expo(struct pwc_device *pdev)
 static int pwc_set_motor(struct pwc_device *pdev)
 {
        int ret;
-       u8 buf[4];
 
-       buf[0] = 0;
+       pdev->ctrl_buf[0] = 0;
        if (pdev->motor_pan_reset->is_new)
-               buf[0] |= 0x01;
+               pdev->ctrl_buf[0] |= 0x01;
        if (pdev->motor_tilt_reset->is_new)
-               buf[0] |= 0x02;
+               pdev->ctrl_buf[0] |= 0x02;
        if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) {
                ret = send_control_msg(pdev, SET_MPT_CTL,
-                                      PT_RESET_CONTROL_FORMATTER, buf, 1);
+                                      PT_RESET_CONTROL_FORMATTER,
+                                      pdev->ctrl_buf, 1);
                if (ret < 0)
                        return ret;
        }
 
-       memset(buf, 0, sizeof(buf));
+       memset(pdev->ctrl_buf, 0, 4);
        if (pdev->motor_pan->is_new) {
-               buf[0] = pdev->motor_pan->val & 0xFF;
-               buf[1] = (pdev->motor_pan->val >> 8);
+               pdev->ctrl_buf[0] = pdev->motor_pan->val & 0xFF;
+               pdev->ctrl_buf[1] = (pdev->motor_pan->val >> 8);
        }
        if (pdev->motor_tilt->is_new) {
-               buf[2] = pdev->motor_tilt->val & 0xFF;
-               buf[3] = (pdev->motor_tilt->val >> 8);
+               pdev->ctrl_buf[2] = pdev->motor_tilt->val & 0xFF;
+               pdev->ctrl_buf[3] = (pdev->motor_tilt->val >> 8);
        }
        if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) {
                ret = send_control_msg(pdev, SET_MPT_CTL,
                                       PT_RELATIVE_CONTROL_FORMATTER,
-                                      buf, sizeof(buf));
+                                      pdev->ctrl_buf, 4);
                if (ret < 0)
                        return ret;
        }
@@ -1094,6 +1089,63 @@ static int pwc_enum_frameintervals(struct file *file, void *fh,
        return 0;
 }
 
+static int pwc_g_parm(struct file *file, void *fh,
+                     struct v4l2_streamparm *parm)
+{
+       struct pwc_device *pdev = video_drvdata(file);
+
+       if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+               return -EINVAL;
+
+       memset(parm, 0, sizeof(*parm));
+
+       parm->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       parm->parm.capture.readbuffers = MIN_FRAMES;
+       parm->parm.capture.capability |= V4L2_CAP_TIMEPERFRAME;
+       parm->parm.capture.timeperframe.denominator = pdev->vframes;
+       parm->parm.capture.timeperframe.numerator = 1;
+
+       return 0;
+}
+
+static int pwc_s_parm(struct file *file, void *fh,
+                     struct v4l2_streamparm *parm)
+{
+       struct pwc_device *pdev = video_drvdata(file);
+       int compression = 0;
+       int ret, fps;
+
+       if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+           parm->parm.capture.timeperframe.numerator == 0)
+               return -EINVAL;
+
+       if (pwc_test_n_set_capt_file(pdev, file))
+               return -EBUSY;
+
+       fps = parm->parm.capture.timeperframe.denominator /
+             parm->parm.capture.timeperframe.numerator;
+
+       mutex_lock(&pdev->udevlock);
+       if (!pdev->udev) {
+               ret = -ENODEV;
+               goto leave;
+       }
+
+       if (pdev->iso_init) {
+               ret = -EBUSY;
+               goto leave;
+       }
+
+       ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
+                                fps, &compression, 0);
+
+       pwc_g_parm(file, fh, parm);
+
+leave:
+       mutex_unlock(&pdev->udevlock);
+       return ret;
+}
+
 static int pwc_log_status(struct file *file, void *priv)
 {
        struct pwc_device *pdev = video_drvdata(file);
@@ -1120,4 +1172,6 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
        .vidioc_log_status                  = pwc_log_status,
        .vidioc_enum_framesizes             = pwc_enum_framesizes,
        .vidioc_enum_frameintervals         = pwc_enum_frameintervals,
+       .vidioc_g_parm                      = pwc_g_parm,
+       .vidioc_s_parm                      = pwc_s_parm,
 };
index 47c518fef179949be25f50c51eac12f95ea7c819..e4d4d711dd1f4df7182d20bcc3591fda9e32f628 100644 (file)
@@ -44,6 +44,8 @@
 #ifdef CONFIG_USB_PWC_INPUT_EVDEV
 #include <linux/input.h>
 #endif
+#include "pwc-dec1.h"
+#include "pwc-dec23.h"
 
 /* Version block */
 #define PWC_VERSION    "10.0.15"
 #define DEVICE_USE_CODEC3(x) ((x)>=700)
 #define DEVICE_USE_CODEC23(x) ((x)>=675)
 
-/* from pwc-dec.h */
-#define PWCX_FLAG_PLANAR        0x0001
-
 /* Request types: video */
 #define SET_LUM_CTL                    0x01
 #define GET_LUM_CTL                    0x02
@@ -248,8 +247,8 @@ struct pwc_device
        char vmirror;           /* for ToUCaM series */
        char power_save;        /* Do powersaving for this cam */
 
-       int cmd_len;
        unsigned char cmd_buf[13];
+       unsigned char *ctrl_buf;
 
        struct urb *urbs[MAX_ISO_BUFS];
        char iso_init;
@@ -272,7 +271,10 @@ struct pwc_device
        int frame_total_size;   /* including header & trailer */
        int drop_frames;
 
-       void *decompress_data;  /* private data for decompression engine */
+       union { /* private data for decompression engine */
+               struct pwc_dec1_private dec1;
+               struct pwc_dec23_private dec23;
+       };
 
        /*
         * We have an 'image' and a 'view', where 'image' is the fixed-size img
@@ -364,7 +366,7 @@ void pwc_construct(struct pwc_device *pdev);
 /** Functions in pwc-ctrl.c */
 /* Request a certain video mode. Returns < 0 if not possible */
 extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
-                             int frames, int *compression);
+       int pixfmt, int frames, int *compression, int send_to_cam);
 extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size);
 extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value);
 extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor);
index 510cfab477fff48c57ddd2d9151b53ec144f91ea..a9e9653beeb4581a90ea34eed1b3d96eab2aae5d 100644 (file)
@@ -693,7 +693,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
                        mf->code = 0;
                        continue;
                }
-               if (mf->width != tfmt->width || mf->width != tfmt->width) {
+               if (mf->width != tfmt->width || mf->height != tfmt->height) {
                        u32 fcc = ffmt->fourcc;
                        tfmt->width  = mf->width;
                        tfmt->height = mf->height;
@@ -702,7 +702,8 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
                                               NULL, &fcc, FIMC_SD_PAD_SOURCE);
                        if (ffmt && ffmt->mbus_code)
                                mf->code = ffmt->mbus_code;
-                       if (mf->width != tfmt->width || mf->width != tfmt->width)
+                       if (mf->width != tfmt->width ||
+                           mf->height != tfmt->height)
                                continue;
                        tfmt->code = mf->code;
                }
@@ -710,7 +711,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
                        ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt);
 
                if (mf->code == tfmt->code &&
-                   mf->width == tfmt->width && mf->width == tfmt->width)
+                   mf->width == tfmt->width && mf->height == tfmt->height)
                        break;
        }
 
index f5cbb8a4c540fc7d6beac2891c36aa812f05f281..81bcbb9492ea0a6d425375a57f3600837082d773 100644 (file)
@@ -848,11 +848,11 @@ int fimc_ctrls_create(struct fimc_ctx *ctx)
        v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
 
        ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
-                                    V4L2_CID_HFLIP, 0, 1, 1, 0);
+                                       V4L2_CID_ROTATE, 0, 270, 90, 0);
        ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
-                                   V4L2_CID_VFLIP, 0, 1, 1, 0);
+                                       V4L2_CID_HFLIP, 0, 1, 1, 0);
        ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
-                                   V4L2_CID_ROTATE, 0, 270, 90, 0);
+                                       V4L2_CID_VFLIP, 0, 1, 1, 0);
        if (variant->has_alpha)
                ctx->ctrl_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
                                    &fimc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT,
index 615c862f0360ef20061e28701d1a99f460d0a7e1..8ea4ee116e46900224a528fa415d300d2c43196b 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/types.h>
 #include <linux/slab.h>
-#include <linux/version.h>
 #include <media/v4l2-ctrls.h>
 #include <media/media-device.h>
 
index c40b0dde188353eb9dba6b9f88e894c7f586d4c3..febaa673d3635c019011ff174c70eee17818de17 100644 (file)
@@ -184,6 +184,7 @@ static int g2d_s_ctrl(struct v4l2_ctrl *ctrl)
                        ctx->rop = ROP4_INVERT;
                else
                        ctx->rop = ROP4_COPY;
+               break;
        default:
                v4l2_err(&ctx->dev->v4l2_dev, "unknown control\n");
                return -EINVAL;
index f841a3e9845c6f6a00028884beedd27595a0da1e..1105a8749c8b23922c979adf2abe7dee222fe14c 100644 (file)
@@ -989,9 +989,10 @@ static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
  * ============================================================================
  */
 
-static int s5p_jpeg_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
-                               unsigned int *nplanes, unsigned int sizes[],
-                               void *alloc_ctxs[])
+static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
+                          const struct v4l2_format *fmt,
+                          unsigned int *nbuffers, unsigned int *nplanes,
+                          unsigned int sizes[], void *alloc_ctxs[])
 {
        struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
        struct s5p_jpeg_q_data *q_data = NULL;
index e43e128baf5f7c3ec7051337a1574821e9e863a2..83fe461af263529df7502f020886f58d63b68c1c 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/version.h>
 #include <linux/videodev2.h>
 #include <linux/workqueue.h>
 #include <media/videobuf2-core.h>
@@ -475,7 +474,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
                        ctx->mv_size = 0;
                }
                ctx->dpb_count = s5p_mfc_get_dpb_count();
-               if (ctx->img_width == 0 || ctx->img_width == 0)
+               if (ctx->img_width == 0 || ctx->img_height == 0)
                        ctx->state = MFCINST_ERROR;
                else
                        ctx->state = MFCINST_HEAD_PARSED;
index 844a4d7797bc0be85ff01eae08837d5d49dbee69..c25ec022d2678f734e46b1f1f3b5b0d496150c8d 100644 (file)
@@ -165,7 +165,7 @@ static struct mfc_control controls[] = {
                .maximum = 32,
                .step = 1,
                .default_value = 1,
-               .flags = V4L2_CTRL_FLAG_VOLATILE,
+               .is_volatile = 1,
        },
 };
 
index 971591d6450fccc5a7164adb94cb40e3404fb635..5b72da5ce4183c15bf98dd570762f2f8f2ffb9de 100644 (file)
@@ -269,8 +269,6 @@ struct saa7164_board saa7164_boards[] = {
                .portb          = SAA7164_MPEG_DVB,
                .portc          = SAA7164_MPEG_ENCODER,
                .portd          = SAA7164_MPEG_ENCODER,
-               .portc          = SAA7164_MPEG_ENCODER,
-               .portd          = SAA7164_MPEG_ENCODER,
                .porte          = SAA7164_MPEG_VBI,
                .portf          = SAA7164_MPEG_VBI,
                .chiprev        = SAA7164_CHIP_REV3,
@@ -333,8 +331,6 @@ struct saa7164_board saa7164_boards[] = {
                .portd          = SAA7164_MPEG_ENCODER,
                .porte          = SAA7164_MPEG_VBI,
                .portf          = SAA7164_MPEG_VBI,
-               .porte          = SAA7164_MPEG_VBI,
-               .portf          = SAA7164_MPEG_VBI,
                .chiprev        = SAA7164_CHIP_REV3,
                .unit           = {{
                        .id             = 0x28,
index 0a2d75f0406631f582b17b50aaf6c4cd11e074df..4ed1c7c28ae704b4f1e843cb8db0ae7b74981c02 100644 (file)
@@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
        spin_unlock_irq(&fh->queue_lock);
 
        desc = fh->chan->device->device_prep_slave_sg(fh->chan,
-               buf->sg, sg_elems, DMA_FROM_DEVICE,
+               buf->sg, sg_elems, DMA_DEV_TO_MEM,
                DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
        if (!desc) {
                spin_lock_irq(&fh->queue_lock);
index 129f135d5a5fe1deffaa12fdc81611bd2ecd12d4..c096b3f742003bcbcfc40df79b49a2728f8b9539 100644 (file)
@@ -374,7 +374,7 @@ static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev)
 }
 #endif
 
-static bool check_firmware(struct usb_device *udev, int *down_firmware)
+static int check_firmware(struct usb_device *udev, int *down_firmware)
 {
        void *buf;
        int ret;
@@ -398,7 +398,7 @@ static bool check_firmware(struct usb_device *udev, int *down_firmware)
                *down_firmware = 1;
                return firmware_download(udev);
        }
-       return ret;
+       return 0;
 }
 
 static int poseidon_probe(struct usb_interface *interface,
index da1f4c2d2d4b8d217cd2e3dafc716b88a25ef762..cccd42be718ae7cc25d2b21189f9880528f38225 100644 (file)
@@ -465,8 +465,8 @@ const char *v4l2_ctrl_get_name(u32 id)
        case V4L2_CID_CHROMA_GAIN:              return "Chroma Gain";
        case V4L2_CID_ILLUMINATORS_1:           return "Illuminator 1";
        case V4L2_CID_ILLUMINATORS_2:           return "Illuminator 2";
-       case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:  return "Minimum Number of Capture Buffers";
-       case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:   return "Minimum Number of Output Buffers";
+       case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:  return "Min Number of Capture Buffers";
+       case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:   return "Min Number of Output Buffers";
        case V4L2_CID_ALPHA_COMPONENT:          return "Alpha Component";
 
        /* MPEG controls */
@@ -506,25 +506,25 @@ const char *v4l2_ctrl_get_name(u32 id)
        case V4L2_CID_MPEG_VIDEO_MUTE_YUV:      return "Video Mute YUV";
        case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:       return "Decoder Slice Interface";
        case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:  return "MPEG4 Loop Filter Enable";
-       case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:       return "The Number of Intra Refresh MBs";
+       case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:       return "Number of Intra Refresh MBs";
        case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:               return "Frame Level Rate Control Enable";
        case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:                  return "H264 MB Level Rate Control";
        case V4L2_CID_MPEG_VIDEO_HEADER_MODE:                   return "Sequence Header Mode";
-       case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC:                   return "The Max Number of Reference Picture";
+       case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC:                   return "Max Number of Reference Pics";
        case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP:               return "H263 I-Frame QP Value";
-       case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP:               return "H263 P frame QP Value";
-       case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP:               return "H263 B frame QP Value";
+       case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP:               return "H263 P-Frame QP Value";
+       case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP:               return "H263 B-Frame QP Value";
        case V4L2_CID_MPEG_VIDEO_H263_MIN_QP:                   return "H263 Minimum QP Value";
        case V4L2_CID_MPEG_VIDEO_H263_MAX_QP:                   return "H263 Maximum QP Value";
        case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:               return "H264 I-Frame QP Value";
-       case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:               return "H264 P frame QP Value";
-       case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:               return "H264 B frame QP Value";
+       case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:               return "H264 P-Frame QP Value";
+       case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:               return "H264 B-Frame QP Value";
        case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:                   return "H264 Maximum QP Value";
        case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:                   return "H264 Minimum QP Value";
        case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:            return "H264 8x8 Transform Enable";
        case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:                 return "H264 CPB Buffer Size";
-       case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:             return "H264 Entorpy Mode";
-       case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:                 return "H264 I Period";
+       case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:             return "H264 Entropy Mode";
+       case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:                 return "H264 I-Frame Period";
        case V4L2_CID_MPEG_VIDEO_H264_LEVEL:                    return "H264 Level";
        case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:        return "H264 Loop Filter Alpha Offset";
        case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:         return "H264 Loop Filter Beta Offset";
@@ -535,16 +535,16 @@ const char *v4l2_ctrl_get_name(u32 id)
        case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:           return "Aspect Ratio VUI Enable";
        case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:              return "VUI Aspect Ratio IDC";
        case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:              return "MPEG4 I-Frame QP Value";
-       case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:              return "MPEG4 P frame QP Value";
-       case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP:              return "MPEG4 B frame QP Value";
+       case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:              return "MPEG4 P-Frame QP Value";
+       case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP:              return "MPEG4 B-Frame QP Value";
        case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP:                  return "MPEG4 Minimum QP Value";
        case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP:                  return "MPEG4 Maximum QP Value";
        case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:                   return "MPEG4 Level";
        case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:                 return "MPEG4 Profile";
        case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:                    return "Quarter Pixel Search Enable";
-       case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:         return "The Maximum Bytes Per Slice";
-       case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:            return "The Number of MB in a Slice";
-       case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:              return "The Slice Partitioning Method";
+       case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:         return "Maximum Bytes in a Slice";
+       case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:            return "Number of MBs in a Slice";
+       case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:              return "Slice Partitioning Method";
        case V4L2_CID_MPEG_VIDEO_VBV_SIZE:                      return "VBV Buffer Size";
 
        /* CAMERA controls */
@@ -580,7 +580,7 @@ const char *v4l2_ctrl_get_name(u32 id)
        case V4L2_CID_AUDIO_LIMITER_ENABLED:    return "Audio Limiter Feature Enabled";
        case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
        case V4L2_CID_AUDIO_LIMITER_DEVIATION:  return "Audio Limiter Deviation";
-       case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Feature Enabled";
+       case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
        case V4L2_CID_AUDIO_COMPRESSION_GAIN:   return "Audio Compression Gain";
        case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
        case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
@@ -588,24 +588,24 @@ const char *v4l2_ctrl_get_name(u32 id)
        case V4L2_CID_PILOT_TONE_ENABLED:       return "Pilot Tone Feature Enabled";
        case V4L2_CID_PILOT_TONE_DEVIATION:     return "Pilot Tone Deviation";
        case V4L2_CID_PILOT_TONE_FREQUENCY:     return "Pilot Tone Frequency";
-       case V4L2_CID_TUNE_PREEMPHASIS:         return "Pre-emphasis settings";
+       case V4L2_CID_TUNE_PREEMPHASIS:         return "Pre-Emphasis";
        case V4L2_CID_TUNE_POWER_LEVEL:         return "Tune Power Level";
        case V4L2_CID_TUNE_ANTENNA_CAPACITOR:   return "Tune Antenna Capacitor";
 
        /* Flash controls */
-       case V4L2_CID_FLASH_CLASS:              return "Flash controls";
-       case V4L2_CID_FLASH_LED_MODE:           return "LED mode";
-       case V4L2_CID_FLASH_STROBE_SOURCE:      return "Strobe source";
+       case V4L2_CID_FLASH_CLASS:              return "Flash Controls";
+       case V4L2_CID_FLASH_LED_MODE:           return "LED Mode";
+       case V4L2_CID_FLASH_STROBE_SOURCE:      return "Strobe Source";
        case V4L2_CID_FLASH_STROBE:             return "Strobe";
-       case V4L2_CID_FLASH_STROBE_STOP:        return "Stop strobe";
-       case V4L2_CID_FLASH_STROBE_STATUS:      return "Strobe status";
-       case V4L2_CID_FLASH_TIMEOUT:            return "Strobe timeout";
-       case V4L2_CID_FLASH_INTENSITY:          return "Intensity, flash mode";
-       case V4L2_CID_FLASH_TORCH_INTENSITY:    return "Intensity, torch mode";
-       case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, indicator";
+       case V4L2_CID_FLASH_STROBE_STOP:        return "Stop Strobe";
+       case V4L2_CID_FLASH_STROBE_STATUS:      return "Strobe Status";
+       case V4L2_CID_FLASH_TIMEOUT:            return "Strobe Timeout";
+       case V4L2_CID_FLASH_INTENSITY:          return "Intensity, Flash Mode";
+       case V4L2_CID_FLASH_TORCH_INTENSITY:    return "Intensity, Torch Mode";
+       case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
        case V4L2_CID_FLASH_FAULT:              return "Faults";
        case V4L2_CID_FLASH_CHARGE:             return "Charge";
-       case V4L2_CID_FLASH_READY:              return "Ready to strobe";
+       case V4L2_CID_FLASH_READY:              return "Ready to Strobe";
 
        default:
                return NULL;
index 77feeb67e2db389334e186299c882ce4fd87e847..3f623859a337126aa035df7c00817a6ad5da6fee 100644 (file)
@@ -1871,6 +1871,7 @@ static long __video_do_ioctl(struct file *file,
        case VIDIOC_S_FREQUENCY:
        {
                struct v4l2_frequency *p = arg;
+               enum v4l2_tuner_type type;
 
                if (!ops->vidioc_s_frequency)
                        break;
@@ -1878,9 +1879,14 @@ static long __video_do_ioctl(struct file *file,
                        ret = ret_prio;
                        break;
                }
+               type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+                       V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
                dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
                                p->tuner, p->type, p->frequency);
-               ret = ops->vidioc_s_frequency(file, fh, p);
+               if (p->type != type)
+                       ret = -EINVAL;
+               else
+                       ret = ops->vidioc_s_frequency(file, fh, p);
                break;
        }
        case VIDIOC_G_SLICED_VBI_CAP:
index f6d26419445ee8728367fe925ed3f80d2d484092..4c09ab781ec3a3cbf503f2626c64b449e94dabf3 100644 (file)
@@ -1958,7 +1958,6 @@ static int zoran_g_fbuf(struct file *file, void *__fh,
        mutex_unlock(&zr->resource_lock);
        fb->fmt.colorspace = V4L2_COLORSPACE_SRGB;
        fb->fmt.field = V4L2_FIELD_INTERLACED;
-       fb->flags = V4L2_FBUF_FLAG_OVERLAY;
        fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
 
        return 0;
index eb5cd28bc6d8d7a1917f7f15d82b267b692315f8..a2d25e4857e31387fc0457b5a47efc4c1e438bd0 100644 (file)
@@ -513,7 +513,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
         * transaction, and then put it under external control
         */
        memset(&config, 0, sizeof(config));
-       config.direction = DMA_TO_DEVICE;
+       config.direction = DMA_MEM_TO_DEV;
        config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
        config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
        ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG,
index a7ee5027146528aafc6b18fa8c2e55f32e32a174..fcfe1eb5acc8f421a1e10a3c387c3c139171ef4d 100644 (file)
@@ -823,6 +823,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
        struct scatterlist              *sg;
        unsigned int                    i;
        enum dma_data_direction         direction;
+       enum dma_transfer_direction     slave_dirn;
        unsigned int                    sglen;
        u32 iflags;
 
@@ -860,16 +861,19 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
        if (host->caps.has_dma)
                atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
 
-       if (data->flags & MMC_DATA_READ)
+       if (data->flags & MMC_DATA_READ) {
                direction = DMA_FROM_DEVICE;
-       else
+               slave_dirn = DMA_DEV_TO_MEM;
+       } else {
                direction = DMA_TO_DEVICE;
+               slave_dirn = DMA_MEM_TO_DEV;
+       }
 
        sglen = dma_map_sg(chan->device->dev, data->sg,
                        data->sg_len, direction);
 
        desc = chan->device->device_prep_slave_sg(chan,
-                       data->sg, sglen, direction,
+                       data->sg, sglen, slave_dirn,
                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc)
                goto unmap_exit;
index ece03b491c7db824fe7a698353f2c14f9f0cb6d2..0d955ffaf44e2c3ec5961f966687da3e675819d9 100644 (file)
@@ -374,6 +374,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
        struct dma_chan *chan;
        struct dma_device *device;
        struct dma_async_tx_descriptor *desc;
+       enum dma_data_direction buffer_dirn;
        int nr_sg;
 
        /* Check if next job is already prepared */
@@ -387,10 +388,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
        }
 
        if (data->flags & MMC_DATA_READ) {
-               conf.direction = DMA_FROM_DEVICE;
+               conf.direction = DMA_DEV_TO_MEM;
+               buffer_dirn = DMA_FROM_DEVICE;
                chan = host->dma_rx_channel;
        } else {
-               conf.direction = DMA_TO_DEVICE;
+               conf.direction = DMA_MEM_TO_DEV;
+               buffer_dirn = DMA_TO_DEVICE;
                chan = host->dma_tx_channel;
        }
 
@@ -403,7 +406,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
                return -EINVAL;
 
        device = chan->device;
-       nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
+       nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
        if (nr_sg == 0)
                return -EINVAL;
 
@@ -426,7 +429,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
  unmap_exit:
        if (!next)
                dmaengine_terminate_all(chan);
-       dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
+       dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
        return -ENOMEM;
 }
 
index 7088b40f95797b00172edf0bdf4f50437b18947f..4184b7946bbf34fd459c671a921ae6baf659e4d2 100644 (file)
@@ -218,6 +218,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
        unsigned int blksz = data->blksz;
        unsigned int datasize = nob * blksz;
        struct scatterlist *sg;
+       enum dma_transfer_direction slave_dirn;
        int i, nents;
 
        if (data->flags & MMC_DATA_STREAM)
@@ -240,10 +241,13 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
                }
        }
 
-       if (data->flags & MMC_DATA_READ)
+       if (data->flags & MMC_DATA_READ) {
                host->dma_dir = DMA_FROM_DEVICE;
-       else
+               slave_dirn = DMA_DEV_TO_MEM;
+       } else {
                host->dma_dir = DMA_TO_DEVICE;
+               slave_dirn = DMA_MEM_TO_DEV;
+       }
 
        nents = dma_map_sg(host->dma->device->dev, data->sg,
                                     data->sg_len,  host->dma_dir);
@@ -251,7 +255,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
                return -EINVAL;
 
        host->desc = host->dma->device->device_prep_slave_sg(host->dma,
-               data->sg, data->sg_len, host->dma_dir,
+               data->sg, data->sg_len, slave_dirn,
                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
        if (!host->desc) {
index 4e2e019dd5c94e02a8a6b54b413dcf2e162e7777..382c835d217cf23372594b230dcd7923971288d5 100644 (file)
@@ -154,6 +154,7 @@ struct mxs_mmc_host {
        struct dma_chan                 *dmach;
        struct mxs_dma_data             dma_data;
        unsigned int                    dma_dir;
+       enum dma_transfer_direction     slave_dirn;
        u32                             ssp_pio_words[SSP_PIO_NUM];
 
        unsigned int                    version;
@@ -324,7 +325,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
        }
 
        desc = host->dmach->device->device_prep_slave_sg(host->dmach,
-                               sgl, sg_len, host->dma_dir, append);
+                               sgl, sg_len, host->slave_dirn, append);
        if (desc) {
                desc->callback = mxs_mmc_dma_irq_callback;
                desc->callback_param = host;
@@ -356,6 +357,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
        host->ssp_pio_words[1] = cmd0;
        host->ssp_pio_words[2] = cmd1;
        host->dma_dir = DMA_NONE;
+       host->slave_dirn = DMA_TRANS_NONE;
        desc = mxs_mmc_prep_dma(host, 0);
        if (!desc)
                goto out;
@@ -395,6 +397,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
        host->ssp_pio_words[1] = cmd0;
        host->ssp_pio_words[2] = cmd1;
        host->dma_dir = DMA_NONE;
+       host->slave_dirn = DMA_TRANS_NONE;
        desc = mxs_mmc_prep_dma(host, 0);
        if (!desc)
                goto out;
@@ -433,6 +436,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
        int i;
 
        unsigned short dma_data_dir, timeout;
+       enum dma_transfer_direction slave_dirn;
        unsigned int data_size = 0, log2_blksz;
        unsigned int blocks = data->blocks;
 
@@ -448,9 +452,11 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
 
        if (data->flags & MMC_DATA_WRITE) {
                dma_data_dir = DMA_TO_DEVICE;
+               slave_dirn = DMA_MEM_TO_DEV;
                read = 0;
        } else {
                dma_data_dir = DMA_FROM_DEVICE;
+               slave_dirn = DMA_DEV_TO_MEM;
                read = BM_SSP_CTRL0_READ;
        }
 
@@ -510,6 +516,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
        host->ssp_pio_words[1] = cmd0;
        host->ssp_pio_words[2] = cmd1;
        host->dma_dir = DMA_NONE;
+       host->slave_dirn = DMA_TRANS_NONE;
        desc = mxs_mmc_prep_dma(host, 0);
        if (!desc)
                goto out;
@@ -518,6 +525,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
        WARN_ON(host->data != NULL);
        host->data = data;
        host->dma_dir = dma_data_dir;
+       host->slave_dirn = slave_dirn;
        desc = mxs_mmc_prep_dma(host, 1);
        if (!desc)
                goto out;
index 4a2c5b2355f21d81887e0f92d7f3350c1c51f288..f5d8b53be333aa9c997b0e82c1e8e69a8204eb0d 100644 (file)
@@ -286,7 +286,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
        if (ret > 0) {
                host->dma_active = true;
                desc = chan->device->device_prep_slave_sg(chan, sg, ret,
-                       DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+                       DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        }
 
        if (desc) {
@@ -335,7 +335,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
        if (ret > 0) {
                host->dma_active = true;
                desc = chan->device->device_prep_slave_sg(chan, sg, ret,
-                       DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+                       DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        }
 
        if (desc) {
index 86f259cdfcbcbe620206cc1c479395759fb2b2ad..7a6e6cc8f8b842ddb065208aea07d093809bc79d 100644 (file)
@@ -77,7 +77,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
        ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
        if (ret > 0)
                desc = chan->device->device_prep_slave_sg(chan, sg, ret,
-                       DMA_FROM_DEVICE, DMA_CTRL_ACK);
+                       DMA_DEV_TO_MEM, DMA_CTRL_ACK);
 
        if (desc) {
                cookie = dmaengine_submit(desc);
@@ -158,7 +158,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
        ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
        if (ret > 0)
                desc = chan->device->device_prep_slave_sg(chan, sg, ret,
-                       DMA_TO_DEVICE, DMA_CTRL_ACK);
+                       DMA_MEM_TO_DEV, DMA_CTRL_ACK);
 
        if (desc) {
                cookie = dmaengine_submit(desc);
index 2a56fc6f399a871d2794445324a920eff5a0b228..7f680420bfab609f7490c7fa4237271f567cdf56 100644 (file)
@@ -827,7 +827,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
        pio[1] = pio[2] = 0;
        desc = channel->device->device_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_NONE, 0);
+                                       ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
        if (!desc) {
                pr_err("step 1 error\n");
                return -1;
@@ -839,7 +839,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
        sg_init_one(sgl, this->cmd_buffer, this->command_length);
        dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
        desc = channel->device->device_prep_slave_sg(channel,
-                                       sgl, 1, DMA_TO_DEVICE, 1);
+                                       sgl, 1, DMA_MEM_TO_DEV, 1);
        if (!desc) {
                pr_err("step 2 error\n");
                return -1;
@@ -872,7 +872,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
        pio[1] = 0;
        desc = channel->device->device_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_NONE, 0);
+                                       ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
        if (!desc) {
                pr_err("step 1 error\n");
                return -1;
@@ -881,7 +881,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
        /* [2] send DMA request */
        prepare_data_dma(this, DMA_TO_DEVICE);
        desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
-                                               1, DMA_TO_DEVICE, 1);
+                                               1, DMA_MEM_TO_DEV, 1);
        if (!desc) {
                pr_err("step 2 error\n");
                return -1;
@@ -908,7 +908,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
        pio[1] = 0;
        desc = channel->device->device_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_NONE, 0);
+                                       ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
        if (!desc) {
                pr_err("step 1 error\n");
                return -1;
@@ -917,7 +917,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
        /* [2] : send DMA request */
        prepare_data_dma(this, DMA_FROM_DEVICE);
        desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
-                                               1, DMA_FROM_DEVICE, 1);
+                                               1, DMA_DEV_TO_MEM, 1);
        if (!desc) {
                pr_err("step 2 error\n");
                return -1;
@@ -964,7 +964,7 @@ int gpmi_send_page(struct gpmi_nand_data *this,
 
        desc = channel->device->device_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_NONE, 0);
+                                       ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
        if (!desc) {
                pr_err("step 2 error\n");
                return -1;
@@ -998,7 +998,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
                | BF_GPMI_CTRL0_XFER_COUNT(0);
        pio[1] = 0;
        desc = channel->device->device_prep_slave_sg(channel,
-                               (struct scatterlist *)pio, 2, DMA_NONE, 0);
+                               (struct scatterlist *)pio, 2,
+                               DMA_TRANS_NONE, 0);
        if (!desc) {
                pr_err("step 1 error\n");
                return -1;
@@ -1027,7 +1028,7 @@ int gpmi_read_page(struct gpmi_nand_data *this,
        pio[5] = auxiliary;
        desc = channel->device->device_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_NONE, 1);
+                                       ARRAY_SIZE(pio), DMA_TRANS_NONE, 1);
        if (!desc) {
                pr_err("step 2 error\n");
                return -1;
@@ -1045,7 +1046,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
                | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
        pio[1] = 0;
        desc = channel->device->device_prep_slave_sg(channel,
-                               (struct scatterlist *)pio, 2, DMA_NONE, 1);
+                               (struct scatterlist *)pio, 2,
+                               DMA_TRANS_NONE, 1);
        if (!desc) {
                pr_err("step 3 error\n");
                return -1;
index a688b9d975a2576f5285417d3a23ed53a5c3fb5b..f99c6e312a5d49a072a97354941a7547a19698ba 100644 (file)
@@ -365,13 +365,18 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
 
        if (cmd->autoneg == AUTONEG_ENABLE) {
+               u32 an_supported_speed = bp->port.supported[cfg_idx];
+               if (bp->link_params.phy[EXT_PHY1].type ==
+                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+                       an_supported_speed |= (SUPPORTED_100baseT_Half |
+                                              SUPPORTED_100baseT_Full);
                if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
                        DP(NETIF_MSG_LINK, "Autoneg not supported\n");
                        return -EINVAL;
                }
 
                /* advertise the requested speed and duplex if supported */
-               if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
+               if (cmd->advertising & ~an_supported_speed) {
                        DP(NETIF_MSG_LINK, "Advertisement parameters "
                                           "are not supported\n");
                        return -EINVAL;
index 4df9505b67b62e1d482460cc83fcfb6999a83746..2091e5dbbcdd9f50d0d0dec422cd2e1c3aac2d4f 100644 (file)
@@ -2502,7 +2502,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
                struct bnx2x_nig_brb_pfc_port_params *nig_params)
 {
        u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
-       u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
+       u32 llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
        u32 pkt_priority_to_cos = 0;
        struct bnx2x *bp = params->bp;
        u8 port = params->port;
@@ -2516,9 +2516,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
         * MAC control frames (that are not pause packets)
         * will be forwarded to the XCM.
         */
-       xcm_mask = REG_RD(bp,
-                               port ? NIG_REG_LLH1_XCM_MASK :
-                               NIG_REG_LLH0_XCM_MASK);
+       xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
+                         NIG_REG_LLH0_XCM_MASK);
        /*
         * nig params will override non PFC params, since it's possible to
         * do transition from PFC to SAFC
@@ -2533,8 +2532,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
                ppp_enable = 1;
                xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
                                     NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
-               xcm0_out_en = 0;
-               p0_hwpfc_enable = 1;
+               xcm_out_en = 0;
+               hwpfc_enable = 1;
        } else  {
                if (nig_params) {
                        llfc_out_en = nig_params->llfc_out_en;
@@ -2545,7 +2544,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
 
                xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
                        NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
-               xcm0_out_en = 1;
+               xcm_out_en = 1;
        }
 
        if (CHIP_IS_E3(bp))
@@ -2564,13 +2563,16 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
        REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
               NIG_REG_LLH0_XCM_MASK, xcm_mask);
 
-       REG_WR(bp,  NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
+       REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
+              NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
 
        /* output enable for RX_XCM # IF */
-       REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
+       REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
+              NIG_REG_XCM0_OUT_EN, xcm_out_en);
 
        /* HW PFC TX enable */
-       REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
+       REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE :
+              NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable);
 
        if (nig_params) {
                u8 i = 0;
@@ -3761,7 +3763,15 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
        /* Advertise pause */
        bnx2x_ext_phy_set_pause(params, phy, vars);
 
-       vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+       /*
+        * Set KR Autoneg Work-Around flag for Warpcore version older than D108
+        */
+       bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+                       MDIO_WC_REG_UC_INFO_B1_VERSION, &val16);
+       if (val16 < 0xd108) {
+               DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
+               vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+       }
 
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
                        MDIO_WC_REG_DIGITAL5_MISC7, &val16);
@@ -9266,62 +9276,68 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
 /*             BCM8481/BCM84823/BCM84833 PHY SECTION             */
 /******************************************************************/
 static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
-                                          struct link_params *params)
+                                           struct bnx2x *bp,
+                                           u8 port)
 {
        u16 val, fw_ver1, fw_ver2, cnt;
-       u8 port;
-       struct bnx2x *bp = params->bp;
 
-       port = params->port;
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+               bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
+               bnx2x_save_spirom_version(bp, port,
+                               ((fw_ver1 & 0xf000)>>5) | (fw_ver1 & 0x7f),
+                               phy->ver_addr);
+       } else {
+               /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
+               /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+
+               for (cnt = 0; cnt < 100; cnt++) {
+                       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+                       if (val & 1)
+                               break;
+                       udelay(5);
+               }
+               if (cnt == 100) {
+                       DP(NETIF_MSG_LINK, "Unable to read 848xx "
+                                       "phy fw version(1)\n");
+                       bnx2x_save_spirom_version(bp, port, 0,
+                                                 phy->ver_addr);
+                       return;
+               }
 
-       /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
-       /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
 
-       for (cnt = 0; cnt < 100; cnt++) {
-               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
-               if (val & 1)
-                       break;
-               udelay(5);
-       }
-       if (cnt == 100) {
-               DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
-               bnx2x_save_spirom_version(bp, port, 0,
-                                         phy->ver_addr);
-               return;
-       }
+               /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+               for (cnt = 0; cnt < 100; cnt++) {
+                       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+                       if (val & 1)
+                               break;
+                       udelay(5);
+               }
+               if (cnt == 100) {
+                       DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw "
+                                       "version(2)\n");
+                       bnx2x_save_spirom_version(bp, port, 0,
+                                                 phy->ver_addr);
+                       return;
+               }
 
+               /* lower 16 bits of the register SPI_FW_STATUS */
+               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+               /* upper 16 bits of register SPI_FW_STATUS */
+               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
 
-       /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
-       for (cnt = 0; cnt < 100; cnt++) {
-               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
-               if (val & 1)
-                       break;
-               udelay(5);
-       }
-       if (cnt == 100) {
-               DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
-               bnx2x_save_spirom_version(bp, port, 0,
+               bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
                                          phy->ver_addr);
-               return;
        }
 
-       /* lower 16 bits of the register SPI_FW_STATUS */
-       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
-       /* upper 16 bits of register SPI_FW_STATUS */
-       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
-
-       bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
-                                 phy->ver_addr);
 }
-
 static void bnx2x_848xx_set_led(struct bnx2x *bp,
                                struct bnx2x_phy *phy)
 {
@@ -9392,10 +9408,13 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
        u16 tmp_req_line_speed;
 
        tmp_req_line_speed = phy->req_line_speed;
-       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
                if (phy->req_line_speed == SPEED_10000)
                        phy->req_line_speed = SPEED_AUTO_NEG;
-
+       } else {
+               /* Save spirom version */
+               bnx2x_save_848xx_spirom_version(phy, bp, params->port);
+       }
        /*
         * This phy uses the NIG latch mechanism since link indication
         * arrives through its LED4 and not via its LASI signal, so we
@@ -9443,13 +9462,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                         an_1000_val);
 
        /* set 100 speed advertisement */
-       if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+       if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
             (phy->speed_cap_mask &
              (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
-              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) &&
-            (phy->supported &
-             (SUPPORTED_100baseT_Half |
-              SUPPORTED_100baseT_Full)))) {
+              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) {
                an_10_100_val |= (1<<7);
                /* Enable autoneg and restart autoneg for legacy speeds */
                autoneg_val |= (1<<9 | 1<<12);
@@ -9539,9 +9555,6 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                                 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
                                 1);
 
-       /* Save spirom version */
-       bnx2x_save_848xx_spirom_version(phy, params);
-
        phy->req_line_speed = tmp_req_line_speed;
 
        return 0;
@@ -9749,17 +9762,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 
        /* Wait for GPHY to come out of reset */
        msleep(50);
-       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
-               /* Bring PHY out of super isolate mode */
-               bnx2x_cl45_read(bp, phy,
-                               MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
-               val &= ~MDIO_84833_SUPER_ISOLATE;
-               bnx2x_cl45_write(bp, phy,
-                               MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
-               bnx2x_84833_pair_swap_cfg(phy, params, vars);
-       } else {
+       if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
                /*
                 * BCM84823 requires that XGXS links up first @ 10G for normal
                 * behavior.
@@ -9816,24 +9819,23 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
                   params->multi_phy_config, val);
 
-       /* AutogrEEEn */
-       if (params->feature_config_flags &
-               FEATURE_CONFIG_AUTOGREEEN_ENABLED)
-               cmd_args[0] = 0x2;
-       else
-               cmd_args[0] = 0x0;
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+               bnx2x_84833_pair_swap_cfg(phy, params, vars);
 
-       cmd_args[1] = 0x0;
-       cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
-       cmd_args[3] = PHY84833_CONSTANT_LATENCY;
-       rc = bnx2x_84833_cmd_hdlr(phy, params,
-               PHY84833_CMD_SET_EEE_MODE, cmd_args);
-       if (rc != 0)
-               DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
+               /* Keep AutogrEEEn disabled. */
+               cmd_args[0] = 0x0;
+               cmd_args[1] = 0x0;
+               cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
+               cmd_args[3] = PHY84833_CONSTANT_LATENCY;
+               rc = bnx2x_84833_cmd_hdlr(phy, params,
+                       PHY84833_CMD_SET_EEE_MODE, cmd_args);
+               if (rc != 0)
+                       DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
+       }
        if (initialize)
                rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
        else
-               bnx2x_save_848xx_spirom_version(phy, params);
+               bnx2x_save_848xx_spirom_version(phy, bp, params->port);
        /* 84833 PHY has a better feature and doesn't need to support this. */
        if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
                cms_enable = REG_RD(bp, params->shmem_base +
@@ -9851,6 +9853,16 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                                 MDIO_CTL_REG_84823_USER_CTRL_REG, val);
        }
 
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+               /* Bring PHY out of super isolate mode as the final step. */
+               bnx2x_cl45_read(bp, phy,
+                               MDIO_CTL_DEVAD,
+                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+               val &= ~MDIO_84833_SUPER_ISOLATE;
+               bnx2x_cl45_write(bp, phy,
+                               MDIO_CTL_DEVAD,
+                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+       }
        return rc;
 }
 
@@ -9988,10 +10000,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
        } else {
                bnx2x_cl45_read(bp, phy,
                                MDIO_CTL_DEVAD,
-                               0x400f, &val16);
+                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16);
+               val16 |= MDIO_84833_SUPER_ISOLATE;
                bnx2x_cl45_write(bp, phy,
-                               MDIO_PMA_DEVAD,
-                               MDIO_PMA_REG_CTRL, 0x800);
+                                MDIO_CTL_DEVAD,
+                                MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16);
        }
 }
 
@@ -11516,6 +11529,19 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
        }
        phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
 
+       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+           (phy->ver_addr)) {
+               /*
+                * Remove 100Mb link supported for BCM84833 when phy fw
+                * version lower than or equal to 1.39
+                */
+               u32 raw_ver = REG_RD(bp, phy->ver_addr);
+               if (((raw_ver & 0x7F) <= 39) &&
+                   (((raw_ver & 0xF80) >> 7) <= 1))
+                       phy->supported &= ~(SUPPORTED_100baseT_Half |
+                                           SUPPORTED_100baseT_Full);
+       }
+
        /*
         * In case mdc/mdio_access of the external phy is different than the
         * mdc/mdio access of the XGXS, a HW lock must be taken in each access
@@ -12333,55 +12359,69 @@ static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
                                                u32 chip_id)
 {
        u8 reset_gpios;
-       struct bnx2x_phy phy;
-       u32 shmem_base, shmem2_base, cnt;
-       s8 port = 0;
-       u16 val;
-
        reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
        bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
        udelay(10);
        bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
        DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
                reset_gpios);
-       for (port = PORT_MAX - 1; port >= PORT_0; port--) {
-               /* This PHY is for E2 and E3. */
-               shmem_base = shmem_base_path[port];
-               shmem2_base = shmem2_base_path[port];
-               /* Extract the ext phy address for the port */
-               if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
-                                      0, &phy) !=
-                   0) {
-                       DP(NETIF_MSG_LINK, "populate_phy failed\n");
-                       return -EINVAL;
-               }
+       return 0;
+}
 
-               /* Wait for FW completing its initialization. */
-               for (cnt = 0; cnt < 1000; cnt++) {
-                       bnx2x_cl45_read(bp, &phy,
+static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
+                                              struct bnx2x_phy *phy)
+{
+       u16 val, cnt;
+       /* Wait for FW completing its initialization. */
+       for (cnt = 0; cnt < 1500; cnt++) {
+               bnx2x_cl45_read(bp, phy,
                                MDIO_PMA_DEVAD,
                                MDIO_PMA_REG_CTRL, &val);
-                       if (!(val & (1<<15)))
-                               break;
-                       msleep(1);
-               }
-               if (cnt >= 1000)
-                       DP(NETIF_MSG_LINK,
-                               "84833 Cmn reset timeout (%d)\n", port);
-
-               /* Put the port in super isolate mode. */
-               bnx2x_cl45_read(bp, &phy,
-                               MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
-               val |= MDIO_84833_SUPER_ISOLATE;
-               bnx2x_cl45_write(bp, &phy,
-                               MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+               if (!(val & (1<<15)))
+                       break;
+               msleep(1);
+       }
+       if (cnt >= 1500) {
+               DP(NETIF_MSG_LINK, "84833 reset timeout\n");
+               return -EINVAL;
        }
 
+       /* Put the port in super isolate mode. */
+       bnx2x_cl45_read(bp, phy,
+                       MDIO_CTL_DEVAD,
+                       MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+       val |= MDIO_84833_SUPER_ISOLATE;
+       bnx2x_cl45_write(bp, phy,
+                        MDIO_CTL_DEVAD,
+                        MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+
+       /* Save spirom version */
+       bnx2x_save_848xx_spirom_version(phy, bp, PORT_0);
        return 0;
 }
 
+int bnx2x_pre_init_phy(struct bnx2x *bp,
+                                 u32 shmem_base,
+                                 u32 shmem2_base,
+                                 u32 chip_id)
+{
+       int rc = 0;
+       struct bnx2x_phy phy;
+       bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
+       if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base,
+                              PORT_0, &phy)) {
+               DP(NETIF_MSG_LINK, "populate_phy failed\n");
+               return -EINVAL;
+       }
+       switch (phy.type) {
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+               rc = bnx2x_84833_pre_init_phy(bp, &phy);
+               break;
+       default:
+               break;
+       }
+       return rc;
+}
 
 static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                                     u32 shmem2_base_path[], u8 phy_index,
index 44609de4e5dc59f7ecbe41d3ea8c0e01f17d837e..dddbcf6e154ec58d8d1f0720706df5ad0076c338 100644 (file)
  * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
  * accommodate the 9 input clients to ETS arbiter. */
 #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB                  0x18684
+#define NIG_REG_P1_HWPFC_ENABLE                                         0x181d0
 #define NIG_REG_P1_MAC_IN_EN                                    0x185c0
 /* [RW 1] Output enable for TX MAC interface */
 #define NIG_REG_P1_MAC_OUT_EN                                   0x185c4
index 076e02a415a09d09878db3ea1c0071c03ac4a685..d529af99157dd6ce752fa6c3d22e0da572b2f7ae 100644 (file)
@@ -8846,9 +8846,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
        udelay(100);
 
-       if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
+       if (tg3_flag(tp, USING_MSIX)) {
                val = tr32(MSGINT_MODE);
-               val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
+               val |= MSGINT_MODE_ENABLE;
+               if (tp->irq_cnt > 1)
+                       val |= MSGINT_MODE_MULTIVEC_EN;
                if (!tg3_flag(tp, 1SHOT_MSI))
                        val |= MSGINT_MODE_ONE_SHOT_DISABLE;
                tw32(MSGINT_MODE, val);
@@ -9548,19 +9550,18 @@ static int tg3_request_firmware(struct tg3 *tp)
 
 static bool tg3_enable_msix(struct tg3 *tp)
 {
-       int i, rc, cpus = num_online_cpus();
+       int i, rc;
        struct msix_entry msix_ent[tp->irq_max];
 
-       if (cpus == 1)
-               /* Just fallback to the simpler MSI mode. */
-               return false;
-
-       /*
-        * We want as many rx rings enabled as there are cpus.
-        * The first MSIX vector only deals with link interrupts, etc,
-        * so we add one to the number of vectors we are requesting.
-        */
-       tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
+       tp->irq_cnt = num_online_cpus();
+       if (tp->irq_cnt > 1) {
+               /* We want as many rx rings enabled as there are cpus.
+                * In multiqueue MSI-X mode, the first MSI-X vector
+                * only deals with link interrupts, etc, so we add
+                * one to the number of vectors we are requesting.
+                */
+               tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
+       }
 
        for (i = 0; i < tp->irq_max; i++) {
                msix_ent[i].entry  = i;
index 05b7359bde8da1931ac53ebc6fc95310bd398e11..6bdd8e36e564c4868eac9259b63a81c23f345337 100644 (file)
@@ -263,7 +263,7 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
                data[i++] = atomic_read(&port->port_res[k].swqe_avail);
 }
 
-const struct ethtool_ops ehea_ethtool_ops = {
+static const struct ethtool_ops ehea_ethtool_ops = {
        .get_settings = ehea_get_settings,
        .get_drvinfo = ehea_get_drvinfo,
        .get_msglevel = ehea_get_msglevel,
index 3554414eb5e289287e3a9ab174320c7e9fa1bf30..5d5fb2627184f9b7f303d64c22875182c63d88e6 100644 (file)
@@ -94,8 +94,8 @@ static int port_name_cnt;
 static LIST_HEAD(adapter_list);
 static unsigned long ehea_driver_flags;
 static DEFINE_MUTEX(dlpar_mem_lock);
-struct ehea_fw_handle_array ehea_fw_handles;
-struct ehea_bcmc_reg_array ehea_bcmc_regs;
+static struct ehea_fw_handle_array ehea_fw_handles;
+static struct ehea_bcmc_reg_array ehea_bcmc_regs;
 
 
 static int __devinit ehea_probe_adapter(struct platform_device *dev,
@@ -133,7 +133,7 @@ void ehea_dump(void *adr, int len, char *msg)
        }
 }
 
-void ehea_schedule_port_reset(struct ehea_port *port)
+static void ehea_schedule_port_reset(struct ehea_port *port)
 {
        if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
                schedule_work(&port->reset_task);
@@ -1404,7 +1404,7 @@ static int ehea_configure_port(struct ehea_port *port)
        return ret;
 }
 
-int ehea_gen_smrs(struct ehea_port_res *pr)
+static int ehea_gen_smrs(struct ehea_port_res *pr)
 {
        int ret;
        struct ehea_adapter *adapter = pr->port->adapter;
@@ -1426,7 +1426,7 @@ int ehea_gen_smrs(struct ehea_port_res *pr)
        return -EIO;
 }
 
-int ehea_rem_smrs(struct ehea_port_res *pr)
+static int ehea_rem_smrs(struct ehea_port_res *pr)
 {
        if ((ehea_rem_mr(&pr->send_mr)) ||
            (ehea_rem_mr(&pr->recv_mr)))
@@ -2190,7 +2190,7 @@ static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
        return err;
 }
 
-int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
+static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
 {
        int ret = -EIO;
        u64 hret;
@@ -2531,7 +2531,7 @@ static void ehea_flush_sq(struct ehea_port *port)
        }
 }
 
-int ehea_stop_qps(struct net_device *dev)
+static int ehea_stop_qps(struct net_device *dev)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
@@ -2600,7 +2600,7 @@ int ehea_stop_qps(struct net_device *dev)
        return ret;
 }
 
-void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
+static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
 {
        struct ehea_qp qp = *orig_qp;
        struct ehea_qp_init_attr *init_attr = &qp.init_attr;
@@ -2633,7 +2633,7 @@ void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
        }
 }
 
-int ehea_restart_qps(struct net_device *dev)
+static int ehea_restart_qps(struct net_device *dev)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
@@ -2824,7 +2824,7 @@ static void ehea_tx_watchdog(struct net_device *dev)
                ehea_schedule_port_reset(port);
 }
 
-int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
+static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
 {
        struct hcp_query_ehea *cb;
        u64 hret;
@@ -2852,7 +2852,7 @@ int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
        return ret;
 }
 
-int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
+static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
 {
        struct hcp_ehea_port_cb4 *cb4;
        u64 hret;
@@ -2966,7 +2966,7 @@ static const struct net_device_ops ehea_netdev_ops = {
        .ndo_tx_timeout         = ehea_tx_watchdog,
 };
 
-struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
+static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
                                         u32 logical_port_id,
                                         struct device_node *dn)
 {
@@ -3237,7 +3237,7 @@ static ssize_t ehea_remove_port(struct device *dev,
 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
 
-int ehea_create_device_sysfs(struct platform_device *dev)
+static int ehea_create_device_sysfs(struct platform_device *dev)
 {
        int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
        if (ret)
@@ -3248,7 +3248,7 @@ int ehea_create_device_sysfs(struct platform_device *dev)
        return ret;
 }
 
-void ehea_remove_device_sysfs(struct platform_device *dev)
+static void ehea_remove_device_sysfs(struct platform_device *dev)
 {
        device_remove_file(&dev->dev, &dev_attr_probe_port);
        device_remove_file(&dev->dev, &dev_attr_remove_port);
@@ -3379,7 +3379,7 @@ static int __devexit ehea_remove(struct platform_device *dev)
        return 0;
 }
 
-void ehea_crash_handler(void)
+static void ehea_crash_handler(void)
 {
        int i;
 
@@ -3491,7 +3491,7 @@ static ssize_t ehea_show_capabilities(struct device_driver *drv,
 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
                   ehea_show_capabilities, NULL);
 
-int __init ehea_module_init(void)
+static int __init ehea_module_init(void)
 {
        int ret;
 
index 95b9f4fa811e7102cc6db2248dbabf1af7e3d1e9..c25b05b94daae698e0b8640e40da8614ea9ad4f7 100644 (file)
@@ -34,9 +34,7 @@
 #include "ehea_phyp.h"
 #include "ehea_qmr.h"
 
-struct ehea_bmap *ehea_bmap = NULL;
-
-
+static struct ehea_bmap *ehea_bmap;
 
 static void *hw_qpageit_get_inc(struct hw_queue *queue)
 {
@@ -212,7 +210,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
        return NULL;
 }
 
-u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
+static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
 {
        u64 hret;
        u64 adapter_handle = cq->adapter->handle;
@@ -337,7 +335,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
        return eqe;
 }
 
-u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
+static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
 {
        u64 hret;
        unsigned long flags;
@@ -381,7 +379,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
 /**
  * allocates memory for a queue and registers pages in phyp
  */
-int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
+static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
                           int nr_pages, int wqe_size, int act_nr_sges,
                           struct ehea_adapter *adapter, int h_call_q_selector)
 {
@@ -516,7 +514,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
        return NULL;
 }
 
-u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
+static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
 {
        u64 hret;
        struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
@@ -976,7 +974,7 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
        return 0;
 }
 
-void print_error_data(u64 *data)
+static void print_error_data(u64 *data)
 {
        int length;
        u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
index 75ec87a822b8e2d1ee12486bc6cfcbf0f3e0a248..0a85690a1321ecc17cf24f2398f496d60dc8d175 100644 (file)
@@ -459,7 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
                sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
 
        ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
-               &ctl->sg, 1, DMA_TO_DEVICE,
+               &ctl->sg, 1, DMA_MEM_TO_DEV,
                DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
        if (!ctl->adesc)
                return NETDEV_TX_BUSY;
@@ -571,7 +571,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
                sg_dma_len(sg) = DMA_BUFFER_SIZE;
 
                ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
-                       sg, 1, DMA_FROM_DEVICE,
+                       sg, 1, DMA_DEV_TO_MEM,
                        DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
 
                if (!ctl->adesc)
index 6ece4295d78fd8cd16deafd28cf377f937b4aa92..813d41c4a845501bd37e772f094cc356ce029aab 100644 (file)
@@ -1703,7 +1703,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
        mdp->mii_bus->name = "sh_mii";
        mdp->mii_bus->parent = &ndev->dev;
        snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
-               mdp->pdev->name, pdid);
+               mdp->pdev->name, id);
 
        /* PHY IRQ */
        mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
index 88c81c5706b249a0e08a25c28217775463dd53dc..09b8c9dbf78f5e3267ebfadc2d4bed15f9562487 100644 (file)
@@ -557,10 +557,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
                        rxs->rs_status |= ATH9K_RXERR_DECRYPT;
                else if (rxsp->status11 & AR_MichaelErr)
                        rxs->rs_status |= ATH9K_RXERR_MIC;
-               if (rxsp->status11 & AR_KeyMiss)
-                       rxs->rs_status |= ATH9K_RXERR_KEYMISS;
        }
 
+       if (rxsp->status11 & AR_KeyMiss)
+               rxs->rs_status |= ATH9K_RXERR_KEYMISS;
+
        return 0;
 }
 EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
index fd3f19c2e550e4e9991c4adc9b430a91799c9bd4..e196aba77acf568387d7a4b2b5852e316d801e4f 100644 (file)
@@ -618,10 +618,11 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
                        rs->rs_status |= ATH9K_RXERR_DECRYPT;
                else if (ads.ds_rxstatus8 & AR_MichaelErr)
                        rs->rs_status |= ATH9K_RXERR_MIC;
-               if (ads.ds_rxstatus8 & AR_KeyMiss)
-                       rs->rs_status |= ATH9K_RXERR_KEYMISS;
        }
 
+       if (ads.ds_rxstatus8 & AR_KeyMiss)
+               rs->rs_status |= ATH9K_RXERR_KEYMISS;
+
        return 0;
 }
 EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
index 1c6f19393efa72037b7369993bf7af463a7bb51a..b91f28ef1032e40791f4d385d3e92e4646464a3f 100644 (file)
@@ -4852,6 +4852,9 @@ static void b43_op_stop(struct ieee80211_hw *hw)
 
        cancel_work_sync(&(wl->beacon_update_trigger));
 
+       if (!dev)
+               goto out;
+
        mutex_lock(&wl->mutex);
        if (b43_status(dev) >= B43_STAT_STARTED) {
                dev = b43_wireless_core_stop(dev);
@@ -4863,7 +4866,7 @@ static void b43_op_stop(struct ieee80211_hw *hw)
 
 out_unlock:
        mutex_unlock(&wl->mutex);
-
+out:
        cancel_work_sync(&(wl->txpower_adjust_work));
 }
 
index f23b0c3e4ea3d94985cbdc4b61d6f2688ef52164..bf11850a20f11b1201179c7c7659a16397f2d4cc 100644 (file)
@@ -2475,7 +2475,7 @@ static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv)
        return err;
 }
 
-static void brcmf_delay(u32 ms)
+static __always_inline void brcmf_delay(u32 ms)
 {
        if (ms < 1000 / HZ) {
                cond_resched();
index d106576ce338980b4645ced6084139084f74fc21..448ab9c4eb47b00db5ff558db6a4ab978a4f4f77 100644 (file)
@@ -1128,14 +1128,7 @@ static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
        return 0;
 }
 
-static int brcms_pci_suspend(struct pci_dev *pdev)
-{
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
-       return pci_set_power_state(pdev, PCI_D3hot);
-}
-
-static int brcms_suspend(struct bcma_device *pdev, pm_message_t state)
+static int brcms_suspend(struct bcma_device *pdev)
 {
        struct brcms_info *wl;
        struct ieee80211_hw *hw;
@@ -1153,40 +1146,15 @@ static int brcms_suspend(struct bcma_device *pdev, pm_message_t state)
        wl->pub->hw_up = false;
        spin_unlock_bh(&wl->lock);
 
-       /* temporarily do suspend ourselves */
-       return brcms_pci_suspend(pdev->bus->host_pci);
-}
-
-static int brcms_pci_resume(struct pci_dev *pdev)
-{
-       int err = 0;
-       uint val;
-
-       err = pci_set_power_state(pdev, PCI_D0);
-       if (err)
-               return err;
-
-       pci_restore_state(pdev);
-
-       err = pci_enable_device(pdev);
-       if (err)
-               return err;
-
-       pci_set_master(pdev);
-
-       pci_read_config_dword(pdev, 0x40, &val);
-       if ((val & 0x0000ff00) != 0)
-               pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+       pr_debug("brcms_suspend ok\n");
 
        return 0;
 }
 
 static int brcms_resume(struct bcma_device *pdev)
 {
-       /*
-       *  just do pci resume for now until bcma supports it.
-       */
-       return brcms_pci_resume(pdev->bus->host_pci);
+       pr_debug("brcms_resume ok\n");
+       return 0;
 }
 
 static struct bcma_driver brcms_bcma_driver = {
index 018a8deb88a83b5b2ab2febcc2487b89eeb4ba4f..4fcdac63a3007aff3822e8dd17ccd5db57ce69a6 100644 (file)
@@ -7848,7 +7848,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
         * more efficiently than we can parse it. ORDER MATTERS HERE */
        struct ipw_rt_hdr *ipw_rt;
 
-       short len = le16_to_cpu(pkt->u.frame.length);
+       unsigned short len = le16_to_cpu(pkt->u.frame.length);
 
        /* We received data from the HW, so stop the watchdog */
        dev->trans_start = jiffies;
@@ -8023,7 +8023,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
        s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
        s8 noise = (s8) le16_to_cpu(frame->noise);
        u8 rate = frame->rate;
-       short len = le16_to_cpu(pkt->u.frame.length);
+       unsigned short len = le16_to_cpu(pkt->u.frame.length);
        struct sk_buff *skb;
        int hdr_only = 0;
        u16 filter = priv->prom_priv->filter;
index 084aa2c4ccfb272dab28d57fbc9969644360f314..a6454726737e04b207d0c9ae710098f98cef6c63 100644 (file)
@@ -569,7 +569,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        struct iwl_scan_cmd *scan;
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
        u32 rate_flags = 0;
-       u16 cmd_len;
+       u16 cmd_len = 0;
        u16 rx_chain = 0;
        enum ieee80211_band band;
        u8 n_probes = 0;
index 7becea3dec654de21ab5090fc55b8a5068b7703f..dd5aeaff44ba3483804df003bd5779cbf0138794 100644 (file)
@@ -2777,7 +2777,7 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
        else if (channel->band == IEEE80211_BAND_5GHZ)
                cmd->band = cpu_to_le16(0x4);
 
-       cmd->channel = channel->hw_value;
+       cmd->channel = cpu_to_le16(channel->hw_value);
 
        if (conf->channel_type == NL80211_CHAN_NO_HT ||
            conf->channel_type == NL80211_CHAN_HT20) {
@@ -4066,7 +4066,7 @@ static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
                goto done;
 
        if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
-                       WLAN_CIPHER_SUITE_WEP104)
+                       key->cipher == WLAN_CIPHER_SUITE_WEP104)
                mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
 
        cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
index 4941a1a2321907fb93cc8a2fe08a3ce87d8ecf16..dc88baefa72e88bd2b2f60b3ea2e8f7174ff34ed 100644 (file)
@@ -422,7 +422,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
 static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
                                 enum dev_state state)
 {
-       int mask = (state == STATE_RADIO_IRQ_ON);
        u32 reg;
        unsigned long flags;
 
@@ -436,25 +435,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        }
 
        spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
-       rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
+       reg = 0;
+       if (state == STATE_RADIO_IRQ_ON) {
+               rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
+               rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
+               rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
+               rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+               rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
+       }
        rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
        spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
 
index 06ea3bcfdd2a3064f462592bbed67ca4b24e9c7e..16570aa84aac0e1e420a541d7224ecfd0e567bbc 100644 (file)
@@ -830,16 +830,11 @@ config SCSI_ISCI
        tristate "Intel(R) C600 Series Chipset SAS Controller"
        depends on PCI && SCSI
        depends on X86
-       # (temporary): known alpha quality driver
-       depends on EXPERIMENTAL
        select SCSI_SAS_LIBSAS
-       select SCSI_SAS_HOST_SMP
        ---help---
          This driver supports the 6Gb/s SAS capabilities of the storage
          control unit found in the Intel(R) C600 series chipset.
 
-         The experimental tag will be removed after the driver exits alpha
-
 config SCSI_GENERIC_NCR5380
        tristate "Generic NCR5380/53c400 SCSI PIO support"
        depends on ISA && SCSI
index 78963be2c4fb308f09df6f38b3cf5097b3bb5f53..cb07c628b2f1856a3a26dadba052e2587edd3ae7 100644 (file)
@@ -673,12 +673,7 @@ struct bfa_itnim_iostats_s {
        u32     tm_iocdowns;            /*  TM cleaned-up due to IOC down   */
        u32     tm_cleanups;            /*  TM cleanup requests */
        u32     tm_cleanup_comps;       /*  TM cleanup completions      */
-       u32     lm_lun_across_sg;       /*  LM lun is across sg data buf */
-       u32     lm_lun_not_sup;         /*  LM lun not supported */
-       u32     lm_rpl_data_changed;    /*  LM report-lun data changed */
-       u32     lm_wire_residue_changed; /* LM report-lun rsp residue changed */
-       u32     lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
-       u32     lm_lun_not_rdy;         /* LM lun not ready */
+       u32     rsvd[6];
 };
 
 /* Modify char* port_stt[] in bfal_port.c if a new state was added */
index 50b6a1c86195ac6d6c394295fcdd8150dbd43852..8d0b88f67a382e3582c40382d7dfe3892a19f3c8 100644 (file)
@@ -56,161 +56,6 @@ struct scsi_cdb_s {
 
 #define SCSI_MAX_ALLOC_LEN      0xFF    /* maximum allocarion length */
 
-#define SCSI_SENSE_CUR_ERR     0x70
-#define SCSI_SENSE_DEF_ERR     0x71
-
-/*
- * SCSI additional sense codes
- */
-#define SCSI_ASC_LUN_NOT_READY         0x04
-#define SCSI_ASC_LUN_NOT_SUPPORTED     0x25
-#define SCSI_ASC_TOCC                  0x3F
-
-/*
- * SCSI additional sense code qualifiers
- */
-#define SCSI_ASCQ_MAN_INTR_REQ         0x03    /* manual intervention req */
-#define SCSI_ASCQ_RL_DATA_CHANGED      0x0E    /* report luns data changed */
-
-/*
- * Methods of reporting informational exceptions
- */
-#define SCSI_MP_IEC_UNIT_ATTN          0x2     /* generate unit attention */
-
-struct scsi_report_luns_data_s {
-       u32             lun_list_length;        /* length of LUN list length */
-       u32             reserved;
-       struct scsi_lun lun[1];                 /* first LUN in lun list */
-};
-
-struct scsi_inquiry_vendor_s {
-       u8      vendor_id[8];
-};
-
-struct scsi_inquiry_prodid_s {
-       u8      product_id[16];
-};
-
-struct scsi_inquiry_prodrev_s {
-       u8      product_rev[4];
-};
-
-struct scsi_inquiry_data_s {
-#ifdef __BIG_ENDIAN
-       u8              peripheral_qual:3;      /* peripheral qualifier */
-       u8              device_type:5;          /* peripheral device type */
-       u8              rmb:1;                  /* removable medium bit */
-       u8              device_type_mod:7;      /* device type modifier */
-       u8              version;
-       u8              aenc:1;         /* async evt notification capability */
-       u8              trm_iop:1;      /* terminate I/O process */
-       u8              norm_aca:1;     /* normal ACA supported */
-       u8              hi_support:1;   /* SCSI-3: supports REPORT LUNS */
-       u8              rsp_data_format:4;
-       u8              additional_len;
-       u8              sccs:1;
-       u8              reserved1:7;
-       u8              reserved2:1;
-       u8              enc_serv:1;     /* enclosure service component */
-       u8              reserved3:1;
-       u8              multi_port:1;   /* multi-port device */
-       u8              m_chngr:1;      /* device in medium transport element */
-       u8              ack_req_q:1;    /* SIP specific bit */
-       u8              addr32:1;       /* SIP specific bit */
-       u8              addr16:1;       /* SIP specific bit */
-       u8              rel_adr:1;      /* relative address */
-       u8              w_bus32:1;
-       u8              w_bus16:1;
-       u8              synchronous:1;
-       u8              linked_commands:1;
-       u8              trans_dis:1;
-       u8              cmd_queue:1;    /* command queueing supported */
-       u8              soft_reset:1;   /* soft reset alternative (VS) */
-#else
-       u8              device_type:5;  /* peripheral device type */
-       u8              peripheral_qual:3; /* peripheral qualifier */
-       u8              device_type_mod:7; /* device type modifier */
-       u8              rmb:1;          /* removable medium bit */
-       u8              version;
-       u8              rsp_data_format:4;
-       u8              hi_support:1;   /* SCSI-3: supports REPORT LUNS */
-       u8              norm_aca:1;     /* normal ACA supported */
-       u8              terminate_iop:1;/* terminate I/O process */
-       u8              aenc:1;         /* async evt notification capability */
-       u8              additional_len;
-       u8              reserved1:7;
-       u8              sccs:1;
-       u8              addr16:1;       /* SIP specific bit */
-       u8              addr32:1;       /* SIP specific bit */
-       u8              ack_req_q:1;    /* SIP specific bit */
-       u8              m_chngr:1;      /* device in medium transport element */
-       u8              multi_port:1;   /* multi-port device */
-       u8              reserved3:1;    /* TBD - Vendor Specific */
-       u8              enc_serv:1;     /* enclosure service component */
-       u8              reserved2:1;
-       u8              soft_seset:1;   /* soft reset alternative (VS) */
-       u8              cmd_queue:1;    /* command queueing supported */
-       u8              trans_dis:1;
-       u8              linked_commands:1;
-       u8              synchronous:1;
-       u8              w_bus16:1;
-       u8              w_bus32:1;
-       u8              rel_adr:1;      /* relative address */
-#endif
-       struct scsi_inquiry_vendor_s    vendor_id;
-       struct scsi_inquiry_prodid_s    product_id;
-       struct scsi_inquiry_prodrev_s   product_rev;
-       u8              vendor_specific[20];
-       u8              reserved4[40];
-};
-
-/*
- *     SCSI sense data format
- */
-struct scsi_sense_s {
-#ifdef __BIG_ENDIAN
-       u8              valid:1;
-       u8              rsp_code:7;
-#else
-       u8              rsp_code:7;
-       u8              valid:1;
-#endif
-       u8              seg_num;
-#ifdef __BIG_ENDIAN
-       u8              file_mark:1;
-       u8              eom:1;          /* end of media */
-       u8              ili:1;          /* incorrect length indicator */
-       u8              reserved:1;
-       u8              sense_key:4;
-#else
-       u8              sense_key:4;
-       u8              reserved:1;
-       u8              ili:1;          /* incorrect length indicator */
-       u8              eom:1;          /* end of media */
-       u8              file_mark:1;
-#endif
-       u8              information[4]; /* device-type or cmd specific info */
-       u8              add_sense_length; /* additional sense length */
-       u8              command_info[4];/* command specific information */
-       u8              asc;            /* additional sense code */
-       u8              ascq;           /* additional sense code qualifier */
-       u8              fru_code;       /* field replaceable unit code */
-#ifdef __BIG_ENDIAN
-       u8              sksv:1;         /* sense key specific valid */
-       u8              c_d:1;          /* command/data bit */
-       u8              res1:2;
-       u8              bpv:1;          /* bit pointer valid */
-       u8              bpointer:3;     /* bit pointer */
-#else
-       u8              bpointer:3;     /* bit pointer */
-       u8              bpv:1;          /* bit pointer valid */
-       u8              res1:2;
-       u8              c_d:1;          /* command/data bit */
-       u8              sksv:1;         /* sense key specific valid */
-#endif
-       u8              fpointer[2];    /* field pointer */
-};
-
 /*
  * Fibre Channel Header Structure (FCHS) definition
  */
index e07bd4745d8ba5b968ded24e81785096c1535b84..f0f80e282e39cc023a72dacef7ee23b09c06a79e 100644 (file)
@@ -24,8 +24,6 @@ BFA_TRC_FILE(HAL, FCPIM);
  *  BFA ITNIM Related definitions
  */
 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
-static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
-static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
 static void bfa_ioim_lm_init(struct bfa_s *bfa);
 
 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
@@ -60,14 +58,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
        }                                                               \
 } while (0)
 
-#define bfa_ioim_rp_wwn(__ioim)                                                \
-       (((struct bfa_fcs_rport_s *)                                    \
-        (__ioim)->itnim->rport->rport_drv)->pwwn)
-
-#define bfa_ioim_lp_wwn(__ioim)                                                \
-       ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa),                  \
-       (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn)              \
-
 #define bfa_itnim_sler_cb(__itnim) do {                                        \
        if ((__itnim)->bfa->fcs)                                        \
                bfa_cb_itnim_sler((__itnim)->ditn);      \
@@ -77,13 +67,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
        }                                                               \
 } while (0)
 
-enum bfa_ioim_lm_status {
-       BFA_IOIM_LM_PRESENT = 1,
-       BFA_IOIM_LM_LUN_NOT_SUP = 2,
-       BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
-       BFA_IOIM_LM_LUN_NOT_RDY = 4,
-};
-
 enum bfa_ioim_lm_ua_status {
        BFA_IOIM_LM_UA_RESET = 0,
        BFA_IOIM_LM_UA_SET = 1,
@@ -145,9 +128,6 @@ enum bfa_ioim_event {
        BFA_IOIM_SM_TMDONE      = 16,   /*  IO cleanup from tskim */
        BFA_IOIM_SM_HWFAIL      = 17,   /*  IOC h/w failure event */
        BFA_IOIM_SM_IOTOV       = 18,   /*  ITN offline TOV */
-       BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/*  lunmask lun not supported */
-       BFA_IOIM_SM_LM_RPL_DC = 20,     /*  lunmask report-lun data changed */
-       BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/*  lunmask lun not ready */
 };
 
 
@@ -245,9 +225,6 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
-static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
 
 /*
  * forward declaration of BFA IO state machine
@@ -445,12 +422,6 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
        bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
        bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
        bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
-       bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
-       bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
-       bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
-       bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
-       bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
-       bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
 }
 
 bfa_status_t
@@ -1580,27 +1551,6 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
                        __bfa_cb_ioim_abort, ioim);
                break;
 
-       case BFA_IOIM_SM_LM_LUN_NOT_SUP:
-               bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-               bfa_ioim_move_to_comp_q(ioim);
-               bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-                       __bfa_cb_ioim_lm_lun_not_sup, ioim);
-               break;
-
-       case BFA_IOIM_SM_LM_RPL_DC:
-               bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-               bfa_ioim_move_to_comp_q(ioim);
-               bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-                               __bfa_cb_ioim_lm_rpl_dc, ioim);
-               break;
-
-       case BFA_IOIM_SM_LM_LUN_NOT_RDY:
-               bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-               bfa_ioim_move_to_comp_q(ioim);
-               bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-                       __bfa_cb_ioim_lm_lun_not_rdy, ioim);
-               break;
-
        default:
                bfa_sm_fault(ioim->bfa, event);
        }
@@ -2160,243 +2110,6 @@ bfa_ioim_lm_init(struct bfa_s *bfa)
        }
 }
 
-/*
- * Validate LUN for LUN masking
- */
-static enum bfa_ioim_lm_status
-bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
-               struct bfa_rport_s *rp, struct scsi_lun lun)
-{
-       u8 i;
-       struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
-       struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
-       struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
-
-       if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
-           (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
-               ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
-               return BFA_IOIM_LM_PRESENT;
-       }
-
-       for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
-
-               if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
-                       continue;
-
-               if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
-                   scsilun_to_int((struct scsi_lun *)&lun))
-                   && (rp->rport_tag == lun_list[i].rp_tag)
-                   && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
-                                               lun_list[i].lp_tag)) {
-                       bfa_trc(ioim->bfa, lun_list[i].rp_tag);
-                       bfa_trc(ioim->bfa, lun_list[i].lp_tag);
-                       bfa_trc(ioim->bfa, scsilun_to_int(
-                               (struct scsi_lun *)&lun_list[i].lun));
-
-                       if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
-                           ((cdb->scsi_cdb[0] != INQUIRY) ||
-                           (cdb->scsi_cdb[0] != REPORT_LUNS))) {
-                               lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
-                               return BFA_IOIM_LM_RPL_DATA_CHANGED;
-                       }
-
-                       if (cdb->scsi_cdb[0] == REPORT_LUNS)
-                               ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
-
-                       return BFA_IOIM_LM_PRESENT;
-               }
-       }
-
-       if ((cdb->scsi_cdb[0] == INQUIRY) &&
-           (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
-               ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
-               return BFA_IOIM_LM_PRESENT;
-       }
-
-       if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
-               return BFA_IOIM_LM_LUN_NOT_RDY;
-
-       return BFA_IOIM_LM_LUN_NOT_SUP;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
-{
-       return BFA_TRUE;
-}
-
-static void
-bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
-               int buf_lun_cnt)
-{
-       struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
-       struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
-       struct scsi_lun lun;
-       int i, j;
-
-       bfa_trc(ioim->bfa, buf_lun_cnt);
-       for (j = 0; j < buf_lun_cnt; j++) {
-               lun = *((struct scsi_lun *)(lun_data + j));
-               for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
-                       if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
-                               continue;
-                       if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
-                           (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
-                           (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
-                               == scsilun_to_int((struct scsi_lun *)&lun))) {
-                               lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
-                               break;
-                       }
-               } /* next lun in mask DB */
-       } /* next lun in buf */
-}
-
-static int
-bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
-               struct scsi_report_luns_data_s *rl)
-{
-       struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
-       struct scatterlist *sg = scsi_sglist(cmnd);
-       struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
-       struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
-       int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
-       int lun_across_sg_bytes, bytes_from_next_buf;
-       u64     last_lun, temp_last_lun;
-
-       /* fetch luns from the first sg element */
-       bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
-                       (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
-
-       /* fetch luns from multiple sg elements */
-       scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
-               if (sgeid == 0) {
-                       prev_sg_len = sg_dma_len(sg);
-                       prev_rl_data = (struct scsi_lun *)
-                                       phys_to_virt(sg_dma_address(sg));
-                       continue;
-               }
-
-               /* if the buf is having more data */
-               lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
-               if (lun_across_sg_bytes) {
-                       bfa_trc(ioim->bfa, lun_across_sg_bytes);
-                       bfa_stats(ioim->itnim, lm_lun_across_sg);
-                       bytes_from_next_buf = sizeof(struct scsi_lun) -
-                                             lun_across_sg_bytes;
-
-                       /* from next buf take higher bytes */
-                       temp_last_lun = *((u64 *)
-                                         phys_to_virt(sg_dma_address(sg)));
-                       last_lun |= temp_last_lun >>
-                                   (lun_across_sg_bytes * BITS_PER_BYTE);
-
-                       /* from prev buf take higher bytes */
-                       temp_last_lun = *((u64 *)(prev_rl_data +
-                                         (prev_sg_len - lun_across_sg_bytes)));
-                       temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
-                       last_lun = last_lun | (temp_last_lun <<
-                                  (bytes_from_next_buf * BITS_PER_BYTE));
-
-                       bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
-               } else
-                       bytes_from_next_buf = 0;
-
-               *pgdlen += sg_dma_len(sg);
-               prev_sg_len = sg_dma_len(sg);
-               prev_rl_data = (struct scsi_lun *)
-                               phys_to_virt(sg_dma_address(sg));
-               bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
-                               bytes_from_next_buf,
-                               sg_dma_len(sg) / sizeof(struct scsi_lun));
-       }
-
-       /* update the report luns data - based on fetched luns */
-       sg = scsi_sglist(cmnd);
-       base_rl_data = (struct scsi_lun *)rl->lun;
-       base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
-       for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
-               if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
-                       base_rl_data[j] = lun_list[i].lun;
-                       lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
-                       j++;
-                       lun_fetched_cnt++;
-               }
-
-               if (j > base_count) {
-                       j = 0;
-                       sg = sg_next(sg);
-                       base_rl_data = (struct scsi_lun *)
-                                       phys_to_virt(sg_dma_address(sg));
-                       base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
-               }
-       }
-
-       bfa_trc(ioim->bfa, lun_fetched_cnt);
-       return lun_fetched_cnt;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
-{
-       struct scsi_inquiry_data_s *inq;
-       struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
-
-       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
-       inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
-
-       bfa_trc(ioim->bfa, inq->device_type);
-       inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
-       return 0;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
-{
-       struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
-       struct scatterlist *sg = scsi_sglist(cmnd);
-       struct bfi_ioim_rsp_s *m;
-       struct scsi_report_luns_data_s *rl = NULL;
-       int lun_count = 0, lun_fetched_cnt = 0;
-       u32 residue, pgdlen = 0;
-
-       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
-       if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
-               return BFA_TRUE;
-
-       m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
-       if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
-               return BFA_TRUE;
-
-       pgdlen = sg_dma_len(sg);
-       bfa_trc(ioim->bfa, pgdlen);
-       rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
-       lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
-       lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
-
-       if (lun_count == lun_fetched_cnt)
-               return BFA_TRUE;
-
-       bfa_trc(ioim->bfa, lun_count);
-       bfa_trc(ioim->bfa, lun_fetched_cnt);
-       bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
-
-       if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
-               rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
-                                     sizeof(struct scsi_lun);
-       else
-               bfa_stats(ioim->itnim, lm_small_buf_addresidue);
-
-       bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
-       bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
-
-       residue = be32_to_cpu(m->residue);
-       residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
-       bfa_stats(ioim->itnim, lm_wire_residue_changed);
-       m->residue = be32_to_cpu(residue);
-       bfa_trc(ioim->bfa, ioim->nsges);
-       return BFA_FALSE;
-}
-
 static void
 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
 {
@@ -2454,83 +2167,6 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
                          m->scsi_status, sns_len, snsinfo, residue);
 }
 
-static void
-__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
-{
-       struct bfa_ioim_s *ioim = cbarg;
-       int sns_len = 0xD;
-       u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
-       struct scsi_sense_s *snsinfo;
-
-       if (!complete) {
-               bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-               return;
-       }
-
-       snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
-                                       ioim->fcpim->fcp, ioim->iotag);
-       snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
-       snsinfo->add_sense_length = 0xa;
-       snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
-       snsinfo->sense_key = ILLEGAL_REQUEST;
-       bfa_trc(ioim->bfa, residue);
-       bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
-                       SCSI_STATUS_CHECK_CONDITION, sns_len,
-                       (u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
-{
-       struct bfa_ioim_s *ioim = cbarg;
-       int sns_len = 0xD;
-       u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
-       struct scsi_sense_s *snsinfo;
-
-       if (!complete) {
-               bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-               return;
-       }
-
-       snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
-                                                      ioim->iotag);
-       snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
-       snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
-       snsinfo->asc = SCSI_ASC_TOCC;
-       snsinfo->add_sense_length = 0x6;
-       snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
-       bfa_trc(ioim->bfa, residue);
-       bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
-                       SCSI_STATUS_CHECK_CONDITION, sns_len,
-                       (u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
-{
-       struct bfa_ioim_s *ioim = cbarg;
-       int sns_len = 0xD;
-       u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
-       struct scsi_sense_s *snsinfo;
-
-       if (!complete) {
-               bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-               return;
-       }
-
-       snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
-                                       ioim->fcpim->fcp, ioim->iotag);
-       snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
-       snsinfo->add_sense_length = 0xa;
-       snsinfo->sense_key = NOT_READY;
-       snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
-       snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
-       bfa_trc(ioim->bfa, residue);
-       bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
-                       SCSI_STATUS_CHECK_CONDITION, sns_len,
-                       (u8 *)snsinfo, residue);
-}
-
 void
 bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
                        u16 rp_tag, u8 lp_tag)
@@ -2647,7 +2283,8 @@ bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
        if (port) {
                *pwwn = port->port_cfg.pwwn;
                rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
-               rp = rp_fcs->bfa_rport;
+               if (rp_fcs)
+                       rp = rp_fcs->bfa_rport;
        }
 
        lunm_list = bfa_get_lun_mask_list(bfa);
@@ -2715,7 +2352,8 @@ bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
                if (port) {
                        *pwwn = port->port_cfg.pwwn;
                        rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
-                       rp = rp_fcs->bfa_rport;
+                       if (rp_fcs)
+                               rp = rp_fcs->bfa_rport;
                }
        }
 
@@ -2757,7 +2395,6 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
                return;
        }
 
-       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
        bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
                          0, 0, NULL, 0);
 }
@@ -2773,7 +2410,6 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
                return;
        }
 
-       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
        bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
                          0, 0, NULL, 0);
 }
@@ -2788,7 +2424,6 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
                return;
        }
 
-       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
        bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
 }
 
@@ -3132,7 +2767,6 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
                ioim->bfa     = fcpim->bfa;
                ioim->fcpim   = fcpim;
                ioim->iosp    = iosp;
-               ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
                INIT_LIST_HEAD(&ioim->sgpg_q);
                bfa_reqq_winit(&ioim->iosp->reqq_wait,
                                   bfa_ioim_qresume, ioim);
@@ -3170,7 +2804,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                        evt = BFA_IOIM_SM_DONE;
                else
                        evt = BFA_IOIM_SM_COMP;
-               ioim->proc_rsp_data(ioim);
                break;
 
        case BFI_IOIM_STS_TIMEDOUT:
@@ -3206,7 +2839,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                if (rsp->abort_tag != ioim->abort_tag) {
                        bfa_trc(ioim->bfa, rsp->abort_tag);
                        bfa_trc(ioim->bfa, ioim->abort_tag);
-                       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
                        return;
                }
 
@@ -3225,7 +2857,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                WARN_ON(1);
        }
 
-       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
        bfa_sm_send_event(ioim, evt);
 }
 
@@ -3244,15 +2875,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 
        bfa_ioim_cb_profile_comp(fcpim, ioim);
 
-       if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED)  {
-               bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
-               return;
-       }
-
-       if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
-               bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
-       else
-               bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
+       bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
 }
 
 /*
@@ -3364,35 +2987,6 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
 void
 bfa_ioim_start(struct bfa_ioim_s *ioim)
 {
-       struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
-       struct bfa_lps_s        *lps;
-       enum bfa_ioim_lm_status status;
-       struct scsi_lun scsilun;
-
-       if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
-               lps = BFA_IOIM_TO_LPS(ioim);
-               int_to_scsilun(cmnd->device->lun, &scsilun);
-               status = bfa_ioim_lm_check(ioim, lps,
-                               ioim->itnim->rport, scsilun);
-               if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
-                       bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
-                       bfa_stats(ioim->itnim, lm_lun_not_rdy);
-                       return;
-               }
-
-               if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
-                       bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
-                       bfa_stats(ioim->itnim, lm_lun_not_sup);
-                       return;
-               }
-
-               if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
-                       bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
-                       bfa_stats(ioim->itnim, lm_rpl_data_changed);
-                       return;
-               }
-       }
-
        bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
 
        /*
index 1080bcb81cb73a2caf409d800da3b78deed8234f..36f26da80f76c1c496ec20bc1e42ae93e878df11 100644 (file)
@@ -110,7 +110,6 @@ struct bfad_ioim_s;
 struct bfad_tskim_s;
 
 typedef void    (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
-typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
 
 struct bfa_fcpim_s {
        struct bfa_s            *bfa;
@@ -124,7 +123,6 @@ struct bfa_fcpim_s {
        u32                     path_tov;
        u16                     q_depth;
        u8                      reqq;           /*  Request queue to be used */
-       u8                      lun_masking_pending;
        struct list_head        itnim_q;        /*  queue of active itnim */
        struct list_head        ioim_resfree_q; /*  IOs waiting for f/w */
        struct list_head        ioim_comp_q;    /*  IO global comp Q    */
@@ -181,7 +179,6 @@ struct bfa_ioim_s {
        u8                      reqq;           /*  Request queue for I/O */
        u8                      mode;           /*  IO is passthrough or not */
        u64                     start_time;     /*  IO's Profile start val */
-       bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
 };
 
 struct bfa_ioim_sp_s {
@@ -261,10 +258,6 @@ struct bfa_itnim_s {
        (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET;              \
 } while (0)
 
-#define BFA_IOIM_TO_LPS(__ioim)                \
-       BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa),      \
-               __ioim->itnim->rport->rport_info.lp_tag)
-
 static inline bfa_boolean_t
 bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
 {
index 95adb86d3769d477bdbf0b652aa704621eca665b..b52cbb6bcd5a3b6b4c7623753df79630892686ea 100644 (file)
@@ -582,11 +582,6 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
 #define BFA_LP_TAG_INVALID     0xff
 void   bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
 void   bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
-bfa_boolean_t  bfa_rport_lunmask_active(struct bfa_rport_s *rp);
-wwn_t  bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
-struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
-                                        wwn_t *lpwwn, wwn_t rpwwn);
-void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
 
 /*
  * bfa fcxp API functions
index 66fb72531b34caab0323797761d68ac2ab0e6fbc..404fd10ddb21cd6b89821694807e402d30e3bf21 100644 (file)
@@ -674,6 +674,7 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
 
        spin_lock_irqsave(&bfad->bfad_lock, flags);
        bfa_fcs_vport_start(&vport->fcs_vport);
+       list_add_tail(&vport->list_entry, &bfad->vport_list);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
        return BFA_STATUS_OK;
@@ -1404,6 +1405,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
        bfad->ref_count = 0;
        bfad->pport.bfad = bfad;
        INIT_LIST_HEAD(&bfad->pbc_vport_list);
+       INIT_LIST_HEAD(&bfad->vport_list);
 
        /* Setup the debugfs node for this bfad */
        if (bfa_debugfs_enable)
index 9d95844ab463ededc29b22e417d413e912b7cc48..1938fe0473e99b9aa24a5ee6e50a4e6fe9e9ac4d 100644 (file)
@@ -491,7 +491,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
 
 free_scsi_host:
        bfad_scsi_host_free(bfad, im_port);
-
+       list_del(&vport->list_entry);
        kfree(vport);
 
        return 0;
index 06fc00caeb41f725750a1ddafd44b5731e7eaa4a..530de2b1200a20c58b0e88a49c299ed97c2d1126 100644 (file)
@@ -2394,6 +2394,21 @@ bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
        return 0;
 }
 
+/* Function to reset the LUN SCAN mode */
+static void
+bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
+{
+       struct bfad_im_port_s *pport_im = bfad->pport.im_port;
+       struct bfad_vport_s *vport = NULL;
+
+       /* Set the scsi device LUN SCAN flags for base port */
+       bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
+
+       /* Set the scsi device LUN SCAN flags for the vports */
+       list_for_each_entry(vport, &bfad->vport_list, list_entry)
+               bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
+}
+
 int
 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
 {
@@ -2401,11 +2416,17 @@ bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
        unsigned long   flags;
 
        spin_lock_irqsave(&bfad->bfad_lock, flags);
-       if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
+       if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
                iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
-       else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
+               /* Set the LUN Scanning mode to be Sequential scan */
+               if (iocmd->status == BFA_STATUS_OK)
+                       bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
+       } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
                iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
-       else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+               /* Set the LUN Scanning mode to default REPORT_LUNS scan */
+               if (iocmd->status == BFA_STATUS_OK)
+                       bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
+       } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
                iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
        return 0;
index 5e19a5f820ec6d515773ec942ff4ba1d9184608e..dc5b9d99c4505f1356f7e0bed16d05bd992285ef 100644 (file)
@@ -43,6 +43,7 @@
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_bsg_fc.h>
+#include <scsi/scsi_devinfo.h>
 
 #include "bfa_modules.h"
 #include "bfa_fcs.h"
@@ -227,6 +228,7 @@ struct bfad_s {
        struct list_head        active_aen_q;
        struct bfa_aen_entry_s  aen_list[BFA_AEN_MAX_ENTRY];
        spinlock_t              bfad_aen_spinlock;
+       struct list_head        vport_list;
 };
 
 /* BFAD state machine events */
index e5db649e8eb757dbe79241645c024734d76dd21a..3153923f5b6027f1c16d806e14092e1df5356218 100644 (file)
@@ -917,6 +917,37 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
        return NULL;
 }
 
+/*
+ * Function is invoked from the SCSI Host Template slave_alloc() entry point.
+ * Has the logic to query the LUN Mask database to check if this LUN needs to
+ * be made visible to the SCSI mid-layer or not.
+ *
+ * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack.
+ * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack.
+ */
+static int
+bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
+                                 struct fc_rport *rport)
+{
+       struct bfad_itnim_data_s *itnim_data =
+                               (struct bfad_itnim_data_s *) rport->dd_data;
+       struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
+       struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport;
+       struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa);
+       int i = 0, ret = -ENXIO;
+
+       for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+               if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE &&
+                   scsilun_to_int(&lun_list[i].lun) == sdev->lun &&
+                   lun_list[i].rp_tag == bfa_rport->rport_tag &&
+                   lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) {
+                       ret = BFA_STATUS_OK;
+                       break;
+               }
+       }
+       return ret;
+}
+
 /*
  * Scsi_Host template entry slave_alloc
  */
@@ -924,10 +955,33 @@ static int
 bfad_im_slave_alloc(struct scsi_device *sdev)
 {
        struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+       struct bfad_itnim_data_s *itnim_data =
+                               (struct bfad_itnim_data_s *) rport->dd_data;
+       struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
 
        if (!rport || fc_remote_port_chkready(rport))
                return -ENXIO;
 
+       if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) {
+               /*
+                * We should not mask LUN 0 - since this will translate
+                * to no LUN / TARGET for SCSI ml resulting no scan.
+                */
+               if (sdev->lun == 0) {
+                       sdev->sdev_bflags |= BLIST_NOREPORTLUN |
+                                            BLIST_SPARSELUN;
+                       goto done;
+               }
+
+               /*
+                * Query LUN Mask configuration - to expose this LUN
+                * to the SCSI mid-layer or to mask it.
+                */
+               if (bfad_im_check_if_make_lun_visible(sdev, rport) !=
+                                                       BFA_STATUS_OK)
+                       return -ENXIO;
+       }
+done:
        sdev->hostdata = rport->dd_data;
 
        return 0;
@@ -1037,6 +1091,8 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
            && (fc_rport->scsi_target_id < MAX_FCP_TARGET))
                itnim->scsi_tgt_id = fc_rport->scsi_target_id;
 
+       itnim->channel = fc_rport->channel;
+
        return;
 }
 
index 004b6cf848d943288934452237c1cfa2ef8b8cfd..0814367ef101a1c075c0cfd4f5a52bc34dea920d 100644 (file)
@@ -91,6 +91,7 @@ struct bfad_itnim_s {
        struct fc_rport *fc_rport;
        struct bfa_itnim_s *bfa_itnim;
        u16        scsi_tgt_id;
+       u16        channel;
        u16        queue_work;
        unsigned long   last_ramp_up_time;
        unsigned long   last_queue_full_time;
@@ -166,4 +167,30 @@ irqreturn_t bfad_intx(int irq, void *dev_id);
 int bfad_im_bsg_request(struct fc_bsg_job *job);
 int bfad_im_bsg_timeout(struct fc_bsg_job *job);
 
+/*
+ * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
+ * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
+ *
+ * Internally iterate's over all the ITNIM's part of the im_port & set's the
+ * sdev_bflags for the scsi_device associated with LUN #0.
+ */
+#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do {          \
+       struct scsi_device *__sdev = NULL;                              \
+       struct bfad_itnim_s *__itnim = NULL;                            \
+       u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN;           \
+       list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list), \
+                           list_entry) {                               \
+               __sdev = scsi_device_lookup((__im_port)->shost,         \
+                                           __itnim->channel,           \
+                                           __itnim->scsi_tgt_id, 0);   \
+               if (__sdev) {                                           \
+                       if ((__lunmask_cfg) == BFA_TRUE)                \
+                               __sdev->sdev_bflags |= scan_flags;      \
+                       else                                            \
+                               __sdev->sdev_bflags &= ~scan_flags;     \
+                       scsi_device_put(__sdev);                        \
+               }                                                       \
+       }                                                               \
+} while (0)
+
 #endif
index c5360ffb4bed35ae9bd06fa1a0195f2517b82778..d3ff9cd40234f5d5ea478020ecbf0a04bac1503e 100644 (file)
@@ -1868,8 +1868,9 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
 
        tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
        if (!tdata->skb) {
-               pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n",
-                       cdev->skb_tx_rsvd, headroom, opcode);
+               struct cxgbi_sock *csk = cconn->cep->csk;
+               struct net_device *ndev = cdev->ports[csk->port_id];
+               ndev->stats.tx_dropped++;
                return -ENOMEM;
        }
 
index 4ef021291a4d06d2ecd4340f5ba1c35658a5c9d1..04c5cea47a2258a2156f692a6441253db31a8506 100644 (file)
@@ -466,6 +466,11 @@ static int alua_check_sense(struct scsi_device *sdev,
                         * Power On, Reset, or Bus Device Reset, just retry.
                         */
                        return ADD_TO_MLQUEUE;
+               if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
+                       /*
+                        * Mode Parameters Changed
+                        */
+                       return ADD_TO_MLQUEUE;
                if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
                        /*
                         * ALUA state changed
index 841ebf4a6788fc3895d5ddd31b8a67f7ce49ab55..53a31c753cb1e682ead92268901bc966845a1996 100644 (file)
@@ -953,6 +953,8 @@ static int __init rdac_init(void)
        if (!kmpath_rdacd) {
                scsi_unregister_device_handler(&rdac_dh);
                printk(KERN_ERR "kmpath_rdacd creation failed.\n");
+
+               r = -EINVAL;
        }
 done:
        return r;
index 8d67467dd9cec100f52b51803fbe943192421a58..e9599600aa230b8b6315c405730352f99e6a7d9a 100644 (file)
@@ -58,7 +58,11 @@ module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for "     \
                 "Direct Data Placement (DDP).");
 
-DEFINE_MUTEX(fcoe_config_mutex);
+unsigned int fcoe_debug_logging;
+module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+static DEFINE_MUTEX(fcoe_config_mutex);
 
 static struct workqueue_struct *fcoe_wq;
 
@@ -67,8 +71,8 @@ static DECLARE_COMPLETION(fcoe_flush_completion);
 
 /* fcoe host list */
 /* must only by accessed under the RTNL mutex */
-LIST_HEAD(fcoe_hostlist);
-DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
+static LIST_HEAD(fcoe_hostlist);
+static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
 
 /* Function Prototypes */
 static int fcoe_reset(struct Scsi_Host *);
@@ -157,7 +161,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
        .lport_set_port_id = fcoe_set_port_id,
 };
 
-struct fc_function_template fcoe_nport_fc_functions = {
+static struct fc_function_template fcoe_nport_fc_functions = {
        .show_host_node_name = 1,
        .show_host_port_name = 1,
        .show_host_supported_classes = 1,
@@ -197,7 +201,7 @@ struct fc_function_template fcoe_nport_fc_functions = {
        .bsg_request = fc_lport_bsg_request,
 };
 
-struct fc_function_template fcoe_vport_fc_functions = {
+static struct fc_function_template fcoe_vport_fc_functions = {
        .show_host_node_name = 1,
        .show_host_port_name = 1,
        .show_host_supported_classes = 1,
@@ -433,7 +437,7 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
  *
  * Caller must be holding the RTNL mutex
  */
-void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
+static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
 {
        struct net_device *netdev = fcoe->netdev;
        struct fcoe_ctlr *fip = &fcoe->ctlr;
@@ -748,7 +752,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
  *
  * Returns: True for read types I/O, otherwise returns false.
  */
-bool fcoe_oem_match(struct fc_frame *fp)
+static bool fcoe_oem_match(struct fc_frame *fp)
 {
        struct fc_frame_header *fh = fc_frame_header_get(fp);
        struct fcp_cmnd *fcp;
@@ -756,11 +760,12 @@ bool fcoe_oem_match(struct fc_frame *fp)
        if (fc_fcp_is_read(fr_fsp(fp)) &&
            (fr_fsp(fp)->data_len > fcoe_ddp_min))
                return true;
-       else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
+       else if ((fr_fsp(fp) == NULL) &&
+                (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) &&
+                (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
                fcp = fc_frame_payload_get(fp, sizeof(*fcp));
-               if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
-                   fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
-                   (fcp->fc_flags & FCP_CFL_WRDATA))
+               if ((fcp->fc_flags & FCP_CFL_WRDATA) &&
+                   (ntohl(fcp->fc_dl) > fcoe_ddp_min))
                        return true;
        }
        return false;
@@ -1106,7 +1111,7 @@ static int __init fcoe_if_init(void)
  *
  * Returns: 0 on success
  */
-int __exit fcoe_if_exit(void)
+static int __exit fcoe_if_exit(void)
 {
        fc_release_transport(fcoe_nport_scsi_transport);
        fc_release_transport(fcoe_vport_scsi_transport);
@@ -1295,7 +1300,7 @@ static inline unsigned int fcoe_select_cpu(void)
  *
  * Returns: 0 for success
  */
-int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
+static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
             struct packet_type *ptype, struct net_device *olddev)
 {
        struct fc_lport *lport;
@@ -1451,7 +1456,7 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
  *
  * Return: 0 for success
  */
-int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
+static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
 {
        int wlen;
        u32 crc;
@@ -1671,8 +1676,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
                        skb->dev ? skb->dev->name : "<NULL>");
 
        port = lport_priv(lport);
-       if (skb_is_nonlinear(skb))
-               skb_linearize(skb);     /* not ideal */
+       skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
 
        /*
         * Frame length checks and setting up the header pointers
@@ -1728,7 +1732,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
  *
  * Return: 0 for success
  */
-int fcoe_percpu_receive_thread(void *arg)
+static int fcoe_percpu_receive_thread(void *arg)
 {
        struct fcoe_percpu_s *p = arg;
        struct sk_buff *skb;
@@ -2146,7 +2150,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
  * Returns: 0 if the ethtool query was successful
  *          -1 if the ethtool query failed
  */
-int fcoe_link_speed_update(struct fc_lport *lport)
+static int fcoe_link_speed_update(struct fc_lport *lport)
 {
        struct net_device *netdev = fcoe_netdev(lport);
        struct ethtool_cmd ecmd;
@@ -2180,7 +2184,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
  * Returns: 0 if link is UP and OK, -1 if not
  *
  */
-int fcoe_link_ok(struct fc_lport *lport)
+static int fcoe_link_ok(struct fc_lport *lport)
 {
        struct net_device *netdev = fcoe_netdev(lport);
 
@@ -2200,7 +2204,7 @@ int fcoe_link_ok(struct fc_lport *lport)
  * there no packets that will be handled by the lport, but also that any
  * threads already handling packet have returned.
  */
-void fcoe_percpu_clean(struct fc_lport *lport)
+static void fcoe_percpu_clean(struct fc_lport *lport)
 {
        struct fcoe_percpu_s *pp;
        struct fcoe_rcv_info *fr;
@@ -2251,7 +2255,7 @@ void fcoe_percpu_clean(struct fc_lport *lport)
  *
  * Returns: Always 0 (return value required by FC transport template)
  */
-int fcoe_reset(struct Scsi_Host *shost)
+static int fcoe_reset(struct Scsi_Host *shost)
 {
        struct fc_lport *lport = shost_priv(shost);
        struct fcoe_port *port = lport_priv(lport);
index 6c6884bcf84004e7f792ccccc71bf9728cc24095..bcc89e63949573173e20fac339765d72633b96de 100644 (file)
@@ -40,9 +40,7 @@
 #define FCOE_MIN_XID           0x0000  /* the min xid supported by fcoe_sw */
 #define FCOE_MAX_XID           0x0FFF  /* the max xid supported by fcoe_sw */
 
-unsigned int fcoe_debug_logging;
-module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+extern unsigned int fcoe_debug_logging;
 
 #define FCOE_LOGGING       0x01 /* General logging, not categorized */
 #define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
index 5140f5d0fd6be610f5038368fdec6627be4e9cea..b96962c394492604cd2fc2a73ce80731db28882d 100644 (file)
@@ -4271,7 +4271,9 @@ static void stop_controller_lockup_detector(struct ctlr_info *h)
        remove_ctlr_from_lockup_detector_list(h);
        /* If the list of ctlr's to monitor is empty, stop the thread */
        if (list_empty(&hpsa_ctlr_list)) {
+               spin_unlock_irqrestore(&lockup_detector_lock, flags);
                kthread_stop(hpsa_lockup_detector);
+               spin_lock_irqsave(&lockup_detector_lock, flags);
                hpsa_lockup_detector = NULL;
        }
        spin_unlock_irqrestore(&lockup_detector_lock, flags);
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
deleted file mode 100644 (file)
index 5f54461..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-# Makefile for create_fw
-#
-CC=gcc
-CFLAGS=-c -Wall -O2 -g
-LDFLAGS=
-SOURCES=create_fw.c
-OBJECTS=$(SOURCES:.cpp=.o)
-EXECUTABLE=create_fw
-
-all: $(SOURCES) $(EXECUTABLE)
-
-$(EXECUTABLE): $(OBJECTS)
-       $(CC) $(LDFLAGS) $(OBJECTS) -o $@
-
-.c.o:
-       $(CC) $(CFLAGS) $< -O $@
-
-clean:
-       rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
deleted file mode 100644 (file)
index 8056d2b..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-This defines the temporary binary blow we are to pass to the SCU
-driver to emulate the binary firmware that we will eventually be
-able to access via NVRAM on the SCU controller.
-
-The current size of the binary blob is expected to be 149 bytes or larger
-
-Header Types:
-0x1: Phy Masks
-0x2: Phy Gens
-0x3: SAS Addrs
-0xff: End of Data
-
-ID string - u8[12]: "#SCU MAGIC#\0"
-Version - u8: 1
-SubVersion - u8: 0
-
-Header Type - u8: 0x1
-Size - u8: 8
-Phy Mask - u32[8]
-
-Header Type - u8: 0x2
-Size - u8: 8
-Phy Gen - u32[8]
-
-Header Type - u8: 0x3
-Size - u8: 8
-Sas Addr - u64[8]
-
-Header Type - u8: 0xf
-
-
-==============================================================================
-
-Place isci_firmware.bin in /lib/firmware
-Be sure to recreate the initramfs image to include the firmware.
-
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
deleted file mode 100644 (file)
index c7a2887..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <string.h>
-#include <errno.h>
-#include <asm/types.h>
-#include <strings.h>
-#include <stdint.h>
-
-#include "create_fw.h"
-#include "../probe_roms.h"
-
-int write_blob(struct isci_orom *isci_orom)
-{
-       FILE *fd;
-       int err;
-       size_t count;
-
-       fd = fopen(blob_name, "w+");
-       if (!fd) {
-               perror("Open file for write failed");
-               fclose(fd);
-               return -EIO;
-       }
-
-       count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
-       if (count != 1) {
-               perror("Write data failed");
-               fclose(fd);
-               return -EIO;
-       }
-
-       fclose(fd);
-
-       return 0;
-}
-
-void set_binary_values(struct isci_orom *isci_orom)
-{
-       int ctrl_idx, phy_idx, port_idx;
-
-       /* setting OROM signature */
-       strncpy(isci_orom->hdr.signature, sig, strlen(sig));
-       isci_orom->hdr.version = version;
-       isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
-       isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
-       isci_orom->hdr.num_elements = num_elements;
-
-       for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
-               isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
-               isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
-                       max_num_concurrent_dev_spin_up;
-               isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
-                       enable_ssc;
-
-               for (port_idx = 0; port_idx < 4; port_idx++)
-                       isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
-                               phy_mask[ctrl_idx][port_idx];
-
-               for (phy_idx = 0; phy_idx < 4; phy_idx++) {
-                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
-                               (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
-                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
-                               (__u32)(sas_addr[ctrl_idx][phy_idx]);
-
-                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
-                               afe_tx_amp_control0;
-                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
-                               afe_tx_amp_control1;
-                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
-                               afe_tx_amp_control2;
-                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
-                               afe_tx_amp_control3;
-               }
-       }
-}
-
-int main(void)
-{
-       int err;
-       struct isci_orom *isci_orom;
-
-       isci_orom = malloc(sizeof(struct isci_orom));
-       memset(isci_orom, 0, sizeof(struct isci_orom));
-
-       set_binary_values(isci_orom);
-
-       err = write_blob(isci_orom);
-       if (err < 0) {
-               free(isci_orom);
-               return err;
-       }
-
-       free(isci_orom);
-       return 0;
-}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
deleted file mode 100644 (file)
index 5f29882..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef _CREATE_FW_H_
-#define _CREATE_FW_H_
-#include "../probe_roms.h"
-
-
-/* we are configuring for 2 SCUs */
-static const int num_elements = 2;
-
-/*
- * For all defined arrays:
- * elements 0-3 are for SCU0, ports 0-3
- * elements 4-7 are for SCU1, ports 0-3
- *
- * valid configurations for one SCU are:
- *  P0  P1  P2  P3
- * ----------------
- * 0xF,0x0,0x0,0x0 # 1 x4 port
- * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
- *                 # ports
- * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
- *                 # port
- * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
- * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
- *
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value assigned to UNINIT_PARAM (255).
- */
-
-/* discovery mode type (port auto config mode by default ) */
-
-/*
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value "0000000000000000". SAS address of zero's is
- * considered invalid and will not be used.
- */
-#ifdef MPC
-static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
-                                    {1, 2, 4, 8} };
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
-                                                    0x5FCFFFFFF0000002ULL,
-                                                    0x5FCFFFFFF0000003ULL,
-                                                    0x5FCFFFFFF0000004ULL },
-                                                  { 0x5FCFFFFFF0000005ULL,
-                                                    0x5FCFFFFFF0000006ULL,
-                                                    0x5FCFFFFFF0000007ULL,
-                                                    0x5FCFFFFFF0000008ULL } };
-#else  /* APC (default) */
-static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4];
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
-                                                    0x5FCFFFFF00000001ULL,
-                                                    0x5FCFFFFF00000001ULL,
-                                                    0x5FCFFFFF00000001ULL },
-                                                  { 0x5FCFFFFF00000002ULL,
-                                                    0x5FCFFFFF00000002ULL,
-                                                    0x5FCFFFFF00000002ULL,
-                                                    0x5FCFFFFF00000002ULL } };
-#endif
-
-/* Maximum number of concurrent device spin up */
-static const int max_num_concurrent_dev_spin_up = 1;
-
-/* enable of ssc operation */
-static const int enable_ssc;
-
-/* AFE_TX_AMP_CONTROL */
-static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
-static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
-static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
-static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
-
-static const char blob_name[] = "isci_firmware.bin";
-static const char sig[] = "ISCUOEMB";
-static const unsigned char version = 0x10;
-
-#endif
index e7fe9c4c85b84d7098850eb16288caaae8b20578..1a65d6514237dd59ac24c8deb89f1afce27a4e3d 100644 (file)
@@ -899,7 +899,8 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
                         */
                        if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
                            (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
-                           (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
+                           (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
+                           (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
                                is_controller_start_complete = false;
                                break;
                        }
@@ -1666,6 +1667,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
        /* Default to no SSC operation. */
        ihost->oem_parameters.controller.do_enable_ssc = false;
 
+       /* Default to short cables on all phys. */
+       ihost->oem_parameters.controller.cable_selection_mask = 0;
+
        /* Initialize all of the port parameter information to narrow ports. */
        for (index = 0; index < SCI_MAX_PORTS; index++) {
                ihost->oem_parameters.ports[index].phy_mask = 0;
@@ -1673,8 +1677,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
 
        /* Initialize all of the phy parameter information. */
        for (index = 0; index < SCI_MAX_PHYS; index++) {
-               /* Default to 6G (i.e. Gen 3) for now. */
-               ihost->user_parameters.phys[index].max_speed_generation = 3;
+               /* Default to 3G (i.e. Gen 2). */
+               ihost->user_parameters.phys[index].max_speed_generation =
+                       SCIC_SDS_PARM_GEN2_SPEED;
 
                /* the frequencies cannot be 0 */
                ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
@@ -1694,7 +1699,7 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
        ihost->user_parameters.ssp_inactivity_timeout = 5;
        ihost->user_parameters.stp_max_occupancy_timeout = 5;
        ihost->user_parameters.ssp_max_occupancy_timeout = 20;
-       ihost->user_parameters.no_outbound_task_timeout = 20;
+       ihost->user_parameters.no_outbound_task_timeout = 2;
 }
 
 static void controller_timeout(unsigned long data)
@@ -1759,7 +1764,7 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
        return sci_controller_reset(ihost);
 }
 
-int sci_oem_parameters_validate(struct sci_oem_params *oem)
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
 {
        int i;
 
@@ -1791,18 +1796,61 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem)
            oem->controller.max_concurr_spin_up < 1)
                return -EINVAL;
 
+       if (oem->controller.do_enable_ssc) {
+               if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
+                       return -EINVAL;
+
+               if (version >= ISCI_ROM_VER_1_1) {
+                       u8 test = oem->controller.ssc_sata_tx_spread_level;
+
+                       switch (test) {
+                       case 0:
+                       case 2:
+                       case 3:
+                       case 6:
+                       case 7:
+                               break;
+                       default:
+                               return -EINVAL;
+                       }
+
+                       test = oem->controller.ssc_sas_tx_spread_level;
+                       if (oem->controller.ssc_sas_tx_type == 0) {
+                               switch (test) {
+                               case 0:
+                               case 2:
+                               case 3:
+                                       break;
+                               default:
+                                       return -EINVAL;
+                               }
+                       } else if (oem->controller.ssc_sas_tx_type == 1) {
+                               switch (test) {
+                               case 0:
+                               case 3:
+                               case 6:
+                                       break;
+                               default:
+                                       return -EINVAL;
+                               }
+                       }
+               }
+       }
+
        return 0;
 }
 
 static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
 {
        u32 state = ihost->sm.current_state_id;
+       struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
 
        if (state == SCIC_RESET ||
            state == SCIC_INITIALIZING ||
            state == SCIC_INITIALIZED) {
 
-               if (sci_oem_parameters_validate(&ihost->oem_parameters))
+               if (sci_oem_parameters_validate(&ihost->oem_parameters,
+                                               pci_info->orom->hdr.version))
                        return SCI_FAILURE_INVALID_PARAMETER_VALUE;
 
                return SCI_SUCCESS;
@@ -1857,6 +1905,31 @@ static void power_control_timeout(unsigned long data)
                ihost->power_control.phys_waiting--;
                ihost->power_control.phys_granted_power++;
                sci_phy_consume_power_handler(iphy);
+
+               if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+                       u8 j;
+
+                       for (j = 0; j < SCI_MAX_PHYS; j++) {
+                               struct isci_phy *requester = ihost->power_control.requesters[j];
+
+                               /*
+                                * Search the power_control queue to see if there are other phys
+                                * attached to the same remote device. If found, take all of
+                                * them out of await_sas_power state.
+                                */
+                               if (requester != NULL && requester != iphy) {
+                                       u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
+                                                         iphy->frame_rcvd.iaf.sas_addr,
+                                                         sizeof(requester->frame_rcvd.iaf.sas_addr));
+
+                                       if (other == 0) {
+                                               ihost->power_control.requesters[j] = NULL;
+                                               ihost->power_control.phys_waiting--;
+                                               sci_phy_consume_power_handler(requester);
+                                       }
+                               }
+                       }
+               }
        }
 
        /*
@@ -1891,9 +1964,34 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
                ihost->power_control.timer_started = true;
 
        } else {
-               /* Add the phy in the waiting list */
-               ihost->power_control.requesters[iphy->phy_index] = iphy;
-               ihost->power_control.phys_waiting++;
+               /*
+                * There are phys, attached to the same sas address as this phy, are
+                * already in READY state, this phy don't need wait.
+                */
+               u8 i;
+               struct isci_phy *current_phy;
+
+               for (i = 0; i < SCI_MAX_PHYS; i++) {
+                       u8 other;
+                       current_phy = &ihost->phys[i];
+
+                       other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
+                                      iphy->frame_rcvd.iaf.sas_addr,
+                                      sizeof(current_phy->frame_rcvd.iaf.sas_addr));
+
+                       if (current_phy->sm.current_state_id == SCI_PHY_READY &&
+                           current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS &&
+                           other == 0) {
+                               sci_phy_consume_power_handler(iphy);
+                               break;
+                       }
+               }
+
+               if (i == SCI_MAX_PHYS) {
+                       /* Add the phy in the waiting list */
+                       ihost->power_control.requesters[iphy->phy_index] = iphy;
+                       ihost->power_control.phys_waiting++;
+               }
        }
 }
 
@@ -1908,162 +2006,250 @@ void sci_controller_power_control_queue_remove(struct isci_host *ihost,
        ihost->power_control.requesters[iphy->phy_index] = NULL;
 }
 
+static int is_long_cable(int phy, unsigned char selection_byte)
+{
+       return !!(selection_byte & (1 << phy));
+}
+
+static int is_medium_cable(int phy, unsigned char selection_byte)
+{
+       return !!(selection_byte & (1 << (phy + 4)));
+}
+
+static enum cable_selections decode_selection_byte(
+       int phy,
+       unsigned char selection_byte)
+{
+       return ((selection_byte & (1 << phy)) ? 1 : 0)
+               + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
+}
+
+static unsigned char *to_cable_select(struct isci_host *ihost)
+{
+       if (is_cable_select_overridden())
+               return ((unsigned char *)&cable_selection_override)
+                       + ihost->id;
+       else
+               return &ihost->oem_parameters.controller.cable_selection_mask;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
+{
+       return decode_selection_byte(phy, *to_cable_select(ihost));
+}
+
+char *lookup_cable_names(enum cable_selections selection)
+{
+       static char *cable_names[] = {
+               [short_cable]     = "short",
+               [long_cable]      = "long",
+               [medium_cable]    = "medium",
+               [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
+       };
+       return (selection <= undefined_cable) ? cable_names[selection]
+                                             : cable_names[undefined_cable];
+}
+
 #define AFE_REGISTER_WRITE_DELAY 10
 
-/* Initialize the AFE for this phy index. We need to read the AFE setup from
- * the OEM parameters
- */
 static void sci_controller_afe_initialization(struct isci_host *ihost)
 {
+       struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
        const struct sci_oem_params *oem = &ihost->oem_parameters;
        struct pci_dev *pdev = ihost->pdev;
        u32 afe_status;
        u32 phy_id;
+       unsigned char cable_selection_mask = *to_cable_select(ihost);
 
        /* Clear DFX Status registers */
-       writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
+       writel(0x0081000f, &afe->afe_dfx_master_control0);
        udelay(AFE_REGISTER_WRITE_DELAY);
 
-       if (is_b0(pdev)) {
+       if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
                /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
-                * Timer, PM Stagger Timer */
-               writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
+                * Timer, PM Stagger Timer
+                */
+               writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
                udelay(AFE_REGISTER_WRITE_DELAY);
        }
 
        /* Configure bias currents to normal */
        if (is_a2(pdev))
-               writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
+               writel(0x00005A00, &afe->afe_bias_control);
        else if (is_b0(pdev) || is_c0(pdev))
-               writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
+               writel(0x00005F00, &afe->afe_bias_control);
+       else if (is_c1(pdev))
+               writel(0x00005500, &afe->afe_bias_control);
 
        udelay(AFE_REGISTER_WRITE_DELAY);
 
        /* Enable PLL */
-       if (is_b0(pdev) || is_c0(pdev))
-               writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
-       else
-               writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
+       if (is_a2(pdev))
+               writel(0x80040908, &afe->afe_pll_control0);
+       else if (is_b0(pdev) || is_c0(pdev))
+               writel(0x80040A08, &afe->afe_pll_control0);
+       else if (is_c1(pdev)) {
+               writel(0x80000B08, &afe->afe_pll_control0);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+               writel(0x00000B08, &afe->afe_pll_control0);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+               writel(0x80000B08, &afe->afe_pll_control0);
+       }
 
        udelay(AFE_REGISTER_WRITE_DELAY);
 
        /* Wait for the PLL to lock */
        do {
-               afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
+               afe_status = readl(&afe->afe_common_block_status);
                udelay(AFE_REGISTER_WRITE_DELAY);
        } while ((afe_status & 0x00001000) == 0);
 
        if (is_a2(pdev)) {
-               /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
-               writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
+               /* Shorten SAS SNW lock time (RxLock timer value from 76
+                * us to 50 us)
+                */
+               writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
                udelay(AFE_REGISTER_WRITE_DELAY);
        }
 
        for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+               struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id];
                const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+               int cable_length_long =
+                       is_long_cable(phy_id, cable_selection_mask);
+               int cable_length_medium =
+                       is_medium_cable(phy_id, cable_selection_mask);
 
-               if (is_b0(pdev)) {
-                        /* Configure transmitter SSC parameters */
-                       writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+               if (is_a2(pdev)) {
+                       /* All defaults, except the Receive Word
+                        * Alignament/Comma Detect Enable....(0xe800)
+                        */
+                       writel(0x00004512, &xcvr->afe_xcvr_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(0x0050100F, &xcvr->afe_xcvr_control1);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               } else if (is_b0(pdev)) {
+                       /* Configure transmitter SSC parameters */
+                       writel(0x00030000, &xcvr->afe_tx_ssc_control);
                        udelay(AFE_REGISTER_WRITE_DELAY);
                } else if (is_c0(pdev)) {
-                        /* Configure transmitter SSC parameters */
-                       writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+                       /* Configure transmitter SSC parameters */
+                       writel(0x00010202, &xcvr->afe_tx_ssc_control);
                        udelay(AFE_REGISTER_WRITE_DELAY);
 
-                       /*
-                        * All defaults, except the Receive Word Alignament/Comma Detect
-                        * Enable....(0xe800) */
-                       writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+                       /* All defaults, except the Receive Word
+                        * Alignament/Comma Detect Enable....(0xe800)
+                        */
+                       writel(0x00014500, &xcvr->afe_xcvr_control0);
                        udelay(AFE_REGISTER_WRITE_DELAY);
-               } else {
-                       /*
-                        * All defaults, except the Receive Word Alignament/Comma Detect
-                        * Enable....(0xe800) */
-                       writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+               } else if (is_c1(pdev)) {
+                       /* Configure transmitter SSC parameters */
+                       writel(0x00010202, &xcvr->afe_tx_ssc_control);
                        udelay(AFE_REGISTER_WRITE_DELAY);
 
-                       writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
+                       /* All defaults, except the Receive Word
+                        * Alignament/Comma Detect Enable....(0xe800)
+                        */
+                       writel(0x0001C500, &xcvr->afe_xcvr_control0);
                        udelay(AFE_REGISTER_WRITE_DELAY);
                }
 
-               /*
-                * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
-                * & increase TX int & ext bias 20%....(0xe85c) */
+               /* Power up TX and RX out from power down (PWRDNTX and
+                * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
+                */
                if (is_a2(pdev))
-                       writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       writel(0x000003F0, &xcvr->afe_channel_control);
                else if (is_b0(pdev)) {
-                        /* Power down TX and RX (PWRDNTX and PWRDNRX) */
-                       writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       writel(0x000003D7, &xcvr->afe_channel_control);
                        udelay(AFE_REGISTER_WRITE_DELAY);
 
-                       /*
-                        * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
-                        * & increase TX int & ext bias 20%....(0xe85c) */
-                       writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
-               } else {
-                       writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       writel(0x000003D4, &xcvr->afe_channel_control);
+               } else if (is_c0(pdev)) {
+                       writel(0x000001E7, &xcvr->afe_channel_control);
                        udelay(AFE_REGISTER_WRITE_DELAY);
 
-                       /*
-                        * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
-                        * & increase TX int & ext bias 20%....(0xe85c) */
-                       writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       writel(0x000001E4, &xcvr->afe_channel_control);
+               } else if (is_c1(pdev)) {
+                       writel(cable_length_long ? 0x000002F7 : 0x000001F7,
+                              &xcvr->afe_channel_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(cable_length_long ? 0x000002F4 : 0x000001F4,
+                              &xcvr->afe_channel_control);
                }
                udelay(AFE_REGISTER_WRITE_DELAY);
 
                if (is_a2(pdev)) {
                        /* Enable TX equalization (0xe824) */
-                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+                       writel(0x00040000, &xcvr->afe_tx_control);
                        udelay(AFE_REGISTER_WRITE_DELAY);
                }
 
-               /*
-                * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
-                * RDD=0x0(RX Detect Enabled) ....(0xe800) */
-               writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+               if (is_a2(pdev) || is_b0(pdev))
+                       /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
+                        * TPD=0x0(TX Power On), RDD=0x0(RX Detect
+                        * Enabled) ....(0xe800)
+                        */
+                       writel(0x00004100, &xcvr->afe_xcvr_control0);
+               else if (is_c0(pdev))
+                       writel(0x00014100, &xcvr->afe_xcvr_control0);
+               else if (is_c1(pdev))
+                       writel(0x0001C100, &xcvr->afe_xcvr_control0);
                udelay(AFE_REGISTER_WRITE_DELAY);
 
                /* Leave DFE/FFE on */
                if (is_a2(pdev))
-                       writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+                       writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
                else if (is_b0(pdev)) {
-                       writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+                       writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
                        udelay(AFE_REGISTER_WRITE_DELAY);
                        /* Enable TX equalization (0xe824) */
-                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
-               } else {
-                       writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
+                       writel(0x00040000, &xcvr->afe_tx_control);
+               } else if (is_c0(pdev)) {
+                       writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /* Enable TX equalization (0xe824) */
+                       writel(0x00040000, &xcvr->afe_tx_control);
+               } else if (is_c1(pdev)) {
+                       writel(cable_length_long ? 0x01500C0C :
+                              cable_length_medium ? 0x01400C0D : 0x02400C0D,
+                              &xcvr->afe_xcvr_control1);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
                        udelay(AFE_REGISTER_WRITE_DELAY);
 
-                       writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+                       writel(cable_length_long ? 0x33091C1F :
+                              cable_length_medium ? 0x3315181F : 0x2B17161F,
+                              &xcvr->afe_rx_ssc_control0);
                        udelay(AFE_REGISTER_WRITE_DELAY);
 
                        /* Enable TX equalization (0xe824) */
-                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+                       writel(0x00040000, &xcvr->afe_tx_control);
                }
 
                udelay(AFE_REGISTER_WRITE_DELAY);
 
-               writel(oem_phy->afe_tx_amp_control0,
-                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
+               writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
                udelay(AFE_REGISTER_WRITE_DELAY);
 
-               writel(oem_phy->afe_tx_amp_control1,
-                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
+               writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
                udelay(AFE_REGISTER_WRITE_DELAY);
 
-               writel(oem_phy->afe_tx_amp_control2,
-                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
+               writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
                udelay(AFE_REGISTER_WRITE_DELAY);
 
-               writel(oem_phy->afe_tx_amp_control3,
-                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
+               writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
                udelay(AFE_REGISTER_WRITE_DELAY);
        }
 
        /* Transfer control to the PEs */
-       writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
+       writel(0x00010f00, &afe->afe_dfx_master_control0);
        udelay(AFE_REGISTER_WRITE_DELAY);
 }
 
index 646051afd3cbd07e2ab6761a18edef7355cf39f6..5477f0fa8233198e691945a08c14eba074be2f21 100644 (file)
@@ -435,11 +435,36 @@ static inline bool is_b0(struct pci_dev *pdev)
 
 static inline bool is_c0(struct pci_dev *pdev)
 {
-       if (pdev->revision >= 5)
+       if (pdev->revision == 5)
                return true;
        return false;
 }
 
+static inline bool is_c1(struct pci_dev *pdev)
+{
+       if (pdev->revision >= 6)
+               return true;
+       return false;
+}
+
+enum cable_selections {
+       short_cable     = 0,
+       long_cable      = 1,
+       medium_cable    = 2,
+       undefined_cable = 3
+};
+
+#define CABLE_OVERRIDE_DISABLED (0x10000)
+
+static inline int is_cable_select_overridden(void)
+{
+       return cable_selection_override < CABLE_OVERRIDE_DISABLED;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy);
+void validate_cable_selections(struct isci_host *ihost);
+char *lookup_cable_names(enum cable_selections);
+
 /* set hw control for 'activity', even though active enclosures seem to drive
  * the activity led on their own.  Skip setting FSENG control on 'status' due
  * to unexpected operation and 'error' due to not being a supported automatic
index a97edabcb85a29e96e1741171fcce2ea4c7cb4c0..17c4c2c89c2e5d9fe56f7e3b08010fc5ded3d71b 100644 (file)
@@ -65,7 +65,7 @@
 #include "probe_roms.h"
 
 #define MAJ 1
-#define MIN 0
+#define MIN 1
 #define BUILD 0
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
        __stringify(BUILD)
@@ -94,7 +94,7 @@ MODULE_DEVICE_TABLE(pci, isci_id_table);
 
 /* linux isci specific settings */
 
-unsigned char no_outbound_task_to = 20;
+unsigned char no_outbound_task_to = 2;
 module_param(no_outbound_task_to, byte, 0);
 MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
 
@@ -114,7 +114,7 @@ u16 stp_inactive_to = 5;
 module_param(stp_inactive_to, ushort, 0);
 MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
 
-unsigned char phy_gen = 3;
+unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
 module_param(phy_gen, byte, 0);
 MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
 
@@ -122,6 +122,14 @@ unsigned char max_concurr_spinup;
 module_param(max_concurr_spinup, byte, 0);
 MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
 
+uint cable_selection_override = CABLE_OVERRIDE_DISABLED;
+module_param(cable_selection_override, uint, 0);
+
+MODULE_PARM_DESC(cable_selection_override,
+                "This field indicates length of the SAS/SATA cable between "
+                "host and device. If any bits > 15 are set (default) "
+                "indicates \"use platform defaults\"");
+
 static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
@@ -412,6 +420,14 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
                return NULL;
        isci_host->shost = shost;
 
+       dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
+                "{%s, %s, %s, %s}\n",
+                (is_cable_select_overridden() ? "* " : ""), isci_host->id,
+                lookup_cable_names(decode_cable_selection(isci_host, 3)),
+                lookup_cable_names(decode_cable_selection(isci_host, 2)),
+                lookup_cable_names(decode_cable_selection(isci_host, 1)),
+                lookup_cable_names(decode_cable_selection(isci_host, 0)));
+
        err = isci_host_init(isci_host);
        if (err)
                goto err_shost;
@@ -466,7 +482,8 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
                orom = isci_request_oprom(pdev);
 
        for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
-               if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+               if (sci_oem_parameters_validate(&orom->ctrl[i],
+                                               orom->hdr.version)) {
                        dev_warn(&pdev->dev,
                                 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
                        devm_kfree(&pdev->dev, orom);
index 8efeb6b083213bb20d15ca3a9ca0a954515ec23f..234ab46fce3346948300d9ae95364e33103792ee 100644 (file)
@@ -480,6 +480,7 @@ extern u16 ssp_inactive_to;
 extern u16 stp_inactive_to;
 extern unsigned char phy_gen;
 extern unsigned char max_concurr_spinup;
+extern uint cable_selection_override;
 
 irqreturn_t isci_msix_isr(int vec, void *data);
 irqreturn_t isci_intx_isr(int vec, void *data);
index 35f50c2183e18a4c6460b07db081bb7d82ff6beb..fe18acfd6eb3ad9b62a775665a1dbedecb32e011 100644 (file)
@@ -91,22 +91,23 @@ sci_phy_transport_layer_initialization(struct isci_phy *iphy,
 
 static enum sci_status
 sci_phy_link_layer_initialization(struct isci_phy *iphy,
-                                 struct scu_link_layer_registers __iomem *reg)
+                                 struct scu_link_layer_registers __iomem *llr)
 {
        struct isci_host *ihost = iphy->owning_port->owning_controller;
+       struct sci_phy_user_params *phy_user;
+       struct sci_phy_oem_params *phy_oem;
        int phy_idx = iphy->phy_index;
-       struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
-       struct sci_phy_oem_params *phy_oem =
-               &ihost->oem_parameters.phys[phy_idx];
-       u32 phy_configuration;
        struct sci_phy_cap phy_cap;
+       u32 phy_configuration;
        u32 parity_check = 0;
        u32 parity_count = 0;
        u32 llctl, link_rate;
        u32 clksm_value = 0;
        u32 sp_timeouts = 0;
 
-       iphy->link_layer_registers = reg;
+       phy_user = &ihost->user_parameters.phys[phy_idx];
+       phy_oem = &ihost->oem_parameters.phys[phy_idx];
+       iphy->link_layer_registers = llr;
 
        /* Set our IDENTIFY frame data */
        #define SCI_END_DEVICE 0x01
@@ -116,32 +117,26 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
               SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
               SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
               SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
-              &iphy->link_layer_registers->transmit_identification);
+              &llr->transmit_identification);
 
        /* Write the device SAS Address */
-       writel(0xFEDCBA98,
-              &iphy->link_layer_registers->sas_device_name_high);
-       writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
+       writel(0xFEDCBA98, &llr->sas_device_name_high);
+       writel(phy_idx, &llr->sas_device_name_low);
 
        /* Write the source SAS Address */
-       writel(phy_oem->sas_address.high,
-               &iphy->link_layer_registers->source_sas_address_high);
-       writel(phy_oem->sas_address.low,
-               &iphy->link_layer_registers->source_sas_address_low);
+       writel(phy_oem->sas_address.high, &llr->source_sas_address_high);
+       writel(phy_oem->sas_address.low, &llr->source_sas_address_low);
 
        /* Clear and Set the PHY Identifier */
-       writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
-       writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
-               &iphy->link_layer_registers->identify_frame_phy_id);
+       writel(0, &llr->identify_frame_phy_id);
+       writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id);
 
        /* Change the initial state of the phy configuration register */
-       phy_configuration =
-               readl(&iphy->link_layer_registers->phy_configuration);
+       phy_configuration = readl(&llr->phy_configuration);
 
        /* Hold OOB state machine in reset */
        phy_configuration |=  SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
-       writel(phy_configuration,
-               &iphy->link_layer_registers->phy_configuration);
+       writel(phy_configuration, &llr->phy_configuration);
 
        /* Configure the SNW capabilities */
        phy_cap.all = 0;
@@ -149,15 +144,64 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
        phy_cap.gen3_no_ssc = 1;
        phy_cap.gen2_no_ssc = 1;
        phy_cap.gen1_no_ssc = 1;
-       if (ihost->oem_parameters.controller.do_enable_ssc == true) {
+       if (ihost->oem_parameters.controller.do_enable_ssc) {
+               struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
+               struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_idx];
+               struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+               bool en_sas = false;
+               bool en_sata = false;
+               u32 sas_type = 0;
+               u32 sata_spread = 0x2;
+               u32 sas_spread = 0x2;
+
                phy_cap.gen3_ssc = 1;
                phy_cap.gen2_ssc = 1;
                phy_cap.gen1_ssc = 1;
+
+               if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1)
+                       en_sas = en_sata = true;
+               else {
+                       sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level;
+                       sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level;
+
+                       if (sata_spread)
+                               en_sata = true;
+
+                       if (sas_spread) {
+                               en_sas = true;
+                               sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type;
+                       }
+
+               }
+
+               if (en_sas) {
+                       u32 reg;
+
+                       reg = readl(&xcvr->afe_xcvr_control0);
+                       reg |= (0x00100000 | (sas_type << 19));
+                       writel(reg, &xcvr->afe_xcvr_control0);
+
+                       reg = readl(&xcvr->afe_tx_ssc_control);
+                       reg |= sas_spread << 8;
+                       writel(reg, &xcvr->afe_tx_ssc_control);
+               }
+
+               if (en_sata) {
+                       u32 reg;
+
+                       reg = readl(&xcvr->afe_tx_ssc_control);
+                       reg |= sata_spread;
+                       writel(reg, &xcvr->afe_tx_ssc_control);
+
+                       reg = readl(&llr->stp_control);
+                       reg |= 1 << 12;
+                       writel(reg, &llr->stp_control);
+               }
        }
 
-       /*
-        * The SAS specification indicates that the phy_capabilities that
-        * are transmitted shall have an even parity.  Calculate the parity. */
+       /* The SAS specification indicates that the phy_capabilities that
+        * are transmitted shall have an even parity.  Calculate the parity.
+        */
        parity_check = phy_cap.all;
        while (parity_check != 0) {
                if (parity_check & 0x1)
@@ -165,20 +209,20 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
                parity_check >>= 1;
        }
 
-       /*
-        * If parity indicates there are an odd number of bits set, then
-        * set the parity bit to 1 in the phy capabilities. */
+       /* If parity indicates there are an odd number of bits set, then
+        * set the parity bit to 1 in the phy capabilities.
+        */
        if ((parity_count % 2) != 0)
                phy_cap.parity = 1;
 
-       writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
+       writel(phy_cap.all, &llr->phy_capabilities);
 
        /* Set the enable spinup period but disable the ability to send
         * notify enable spinup
         */
        writel(SCU_ENSPINUP_GEN_VAL(COUNT,
                        phy_user->notify_enable_spin_up_insertion_frequency),
-               &iphy->link_layer_registers->notify_enable_spinup_control);
+               &llr->notify_enable_spinup_control);
 
        /* Write the ALIGN Insertion Ferequency for connected phy and
         * inpendent of connected state
@@ -189,11 +233,13 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
        clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
                        phy_user->align_insertion_frequency);
 
-       writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
+       writel(clksm_value, &llr->clock_skew_management);
 
-       /* @todo Provide a way to write this register correctly */
-       writel(0x02108421,
-               &iphy->link_layer_registers->afe_lookup_table_control);
+       if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) {
+               writel(0x04210400, &llr->afe_lookup_table_control);
+               writel(0x020A7C05, &llr->sas_primitive_timeout);
+       } else
+               writel(0x02108421, &llr->afe_lookup_table_control);
 
        llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
                (u8)ihost->user_parameters.no_outbound_task_timeout);
@@ -210,9 +256,9 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
                break;
        }
        llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
-       writel(llctl, &iphy->link_layer_registers->link_layer_control);
+       writel(llctl, &llr->link_layer_control);
 
-       sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
+       sp_timeouts = readl(&llr->sas_phy_timeouts);
 
        /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
        sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
@@ -222,20 +268,23 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
         */
        sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
 
-       writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
+       writel(sp_timeouts, &llr->sas_phy_timeouts);
 
        if (is_a2(ihost->pdev)) {
-               /* Program the max ARB time for the PHY to 700us so we inter-operate with
-                * the PMC expander which shuts down PHYs if the expander PHY generates too
-                * many breaks.  This time value will guarantee that the initiator PHY will
-                * generate the break.
+               /* Program the max ARB time for the PHY to 700us so we
+                * inter-operate with the PMC expander which shuts down
+                * PHYs if the expander PHY generates too many breaks.
+                * This time value will guarantee that the initiator PHY
+                * will generate the break.
                 */
                writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
-                       &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
+                      &llr->maximum_arbitration_wait_timer_timeout);
        }
 
-       /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
-       writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
+       /* Disable link layer hang detection, rely on the OS timeout for
+        * I/O timeouts.
+        */
+       writel(0, &llr->link_layer_hang_detection_timeout);
 
        /* We can exit the initial state to the stopped state */
        sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
@@ -1049,24 +1098,25 @@ static void scu_link_layer_stop_protocol_engine(
        writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
 }
 
-/**
- *
- *
- * This method will start the OOB/SN state machine for this struct isci_phy object.
- */
-static void scu_link_layer_start_oob(
-       struct isci_phy *iphy)
+static void scu_link_layer_start_oob(struct isci_phy *iphy)
 {
-       u32 scu_sas_pcfg_value;
-
-       scu_sas_pcfg_value =
-               readl(&iphy->link_layer_registers->phy_configuration);
-       scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
-       scu_sas_pcfg_value &=
-               ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
-               SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
-       writel(scu_sas_pcfg_value,
-              &iphy->link_layer_registers->phy_configuration);
+       struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers;
+       u32 val;
+
+       /** Reset OOB sequence - start */
+       val = readl(&ll->phy_configuration);
+       val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+                SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+       writel(val, &ll->phy_configuration);
+       readl(&ll->phy_configuration); /* flush */
+       /** Reset OOB sequence - end */
+
+       /** Start OOB sequence - start */
+       val = readl(&ll->phy_configuration);
+       val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+       writel(val, &ll->phy_configuration);
+       readl(&ll->phy_configuration); /* flush */
+       /** Start OOB sequence - end */
 }
 
 /**
index ac7f27749f975761a1beefc7f1b1741a5c02e0bc..7c6ac58a5c4c45c37af504f8e10bfa84cb7df3ee 100644 (file)
@@ -114,7 +114,7 @@ static u32 sci_port_get_phys(struct isci_port *iport)
  * value is returned if the specified port is not valid.  When this value is
  * returned, no data is copied to the properties output parameter.
  */
-static enum sci_status sci_port_get_properties(struct isci_port *iport,
+enum sci_status sci_port_get_properties(struct isci_port *iport,
                                                struct sci_port_properties *prop)
 {
        if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
@@ -647,19 +647,26 @@ void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
        }
 }
 
-static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
-                                 bool do_notify_user)
+static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+       sci_phy_resume(iphy);
+       iport->enabled_phy_mask |= 1 << iphy->phy_index;
+}
+
+static void sci_port_activate_phy(struct isci_port *iport,
+                                 struct isci_phy *iphy,
+                                 u8 flags)
 {
        struct isci_host *ihost = iport->owning_controller;
 
-       if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
+       if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
                sci_phy_resume(iphy);
 
        iport->active_phy_mask |= 1 << iphy->phy_index;
 
        sci_controller_clear_invalid_phy(ihost, iphy);
 
-       if (do_notify_user == true)
+       if (flags & PF_NOTIFY)
                isci_port_link_up(ihost, iport, iphy);
 }
 
@@ -669,14 +676,19 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
        struct isci_host *ihost = iport->owning_controller;
 
        iport->active_phy_mask &= ~(1 << iphy->phy_index);
+       iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
        if (!iport->active_phy_mask)
                iport->last_active_phy = iphy->phy_index;
 
        iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
 
-       /* Re-assign the phy back to the LP as if it were a narrow port */
-       writel(iphy->phy_index,
-               &iport->port_pe_configuration_register[iphy->phy_index]);
+       /* Re-assign the phy back to the LP as if it were a narrow port for APC
+        * mode. For MPC mode, the phy will remain in the port.
+        */
+       if (iport->owning_controller->oem_parameters.controller.mode_type ==
+               SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
+               writel(iphy->phy_index,
+                       &iport->port_pe_configuration_register[iphy->phy_index]);
 
        if (do_notify_user == true)
                isci_port_link_down(ihost, iphy, iport);
@@ -701,18 +713,16 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
  * sci_port_general_link_up_handler - phy can be assigned to port?
  * @sci_port: sci_port object for which has a phy that has gone link up.
  * @sci_phy: This is the struct isci_phy object that has gone link up.
- * @do_notify_user: This parameter specifies whether to inform the user (via
- *    sci_port_link_up()) as to the fact that a new phy as become ready.
+ * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
  *
- * Determine if this phy can be assigned to this
- * port . If the phy is not a valid PHY for
- * this port then the function will notify the user. A PHY can only be
- * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
- * the same port. none
+ * Determine if this phy can be assigned to this port . If the phy is
+ * not a valid PHY for this port then the function will notify the user.
+ * A PHY can only be part of a port if it's attached SAS ADDRESS is the
+ * same as all other PHYs in the same port.
  */
 static void sci_port_general_link_up_handler(struct isci_port *iport,
-                                                 struct isci_phy *iphy,
-                                                 bool do_notify_user)
+                                            struct isci_phy *iphy,
+                                            u8 flags)
 {
        struct sci_sas_address port_sas_address;
        struct sci_sas_address phy_sas_address;
@@ -730,7 +740,7 @@ static void sci_port_general_link_up_handler(struct isci_port *iport,
            iport->active_phy_mask == 0) {
                struct sci_base_state_machine *sm = &iport->sm;
 
-               sci_port_activate_phy(iport, iphy, do_notify_user);
+               sci_port_activate_phy(iport, iphy, flags);
                if (sm->current_state_id == SCI_PORT_RESETTING)
                        port_state_machine_change(iport, SCI_PORT_READY);
        } else
@@ -781,11 +791,16 @@ bool sci_port_link_detected(
        struct isci_phy *iphy)
 {
        if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
-           (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
-           sci_port_is_wide(iport)) {
-               sci_port_invalid_link_up(iport, iphy);
-
-               return false;
+           (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
+               if (sci_port_is_wide(iport)) {
+                       sci_port_invalid_link_up(iport, iphy);
+                       return false;
+               } else {
+                       struct isci_host *ihost = iport->owning_controller;
+                       struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
+                       writel(iphy->phy_index,
+                              &dst_port->port_pe_configuration_register[iphy->phy_index]);
+               }
        }
 
        return true;
@@ -975,6 +990,13 @@ static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine
        }
 }
 
+static void scic_sds_port_ready_substate_waiting_exit(
+                                       struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+       sci_port_resume_port_task_scheduler(iport);
+}
+
 static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
 {
        u32 index;
@@ -988,13 +1010,13 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
                        writel(iport->physical_port_index,
                                &iport->port_pe_configuration_register[
                                        iport->phy_table[index]->phy_index]);
+                       if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
+                               sci_port_resume_phy(iport, iport->phy_table[index]);
                }
        }
 
        sci_port_update_viit_entry(iport);
 
-       sci_port_resume_port_task_scheduler(iport);
-
        /*
         * Post the dummy task for the port so the hardware can schedule
         * io correctly
@@ -1061,20 +1083,9 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
        if (iport->active_phy_mask == 0) {
                isci_port_not_ready(ihost, iport);
 
-               port_state_machine_change(iport,
-                                         SCI_PORT_SUB_WAITING);
-       } else if (iport->started_request_count == 0)
-               port_state_machine_change(iport,
-                                         SCI_PORT_SUB_OPERATIONAL);
-}
-
-static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
-{
-       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
-
-       sci_port_suspend_port_task_scheduler(iport);
-       if (iport->ready_exit)
-               sci_port_invalidate_dummy_remote_node(iport);
+               port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
+       } else
+               port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
 }
 
 enum sci_status sci_port_start(struct isci_port *iport)
@@ -1252,7 +1263,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
                if (status != SCI_SUCCESS)
                        return status;
 
-               sci_port_general_link_up_handler(iport, iphy, true);
+               sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
                iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
                port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
 
@@ -1262,7 +1273,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
 
                if (status != SCI_SUCCESS)
                        return status;
-               sci_port_general_link_up_handler(iport, iphy, true);
+               sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
 
                /* Re-enter the configuring state since this may be the last phy in
                 * the port.
@@ -1338,13 +1349,13 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
                /* Since this is the first phy going link up for the port we
                 * can just enable it and continue
                 */
-               sci_port_activate_phy(iport, iphy, true);
+               sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
 
                port_state_machine_change(iport,
                                          SCI_PORT_SUB_OPERATIONAL);
                return SCI_SUCCESS;
        case SCI_PORT_SUB_OPERATIONAL:
-               sci_port_general_link_up_handler(iport, iphy, true);
+               sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
                return SCI_SUCCESS;
        case SCI_PORT_RESETTING:
                /* TODO We should  make  sure  that  the phy  that  has gone
@@ -1361,7 +1372,7 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
                /* In the resetting state we don't notify the user regarding
                 * link up and link down notifications.
                 */
-               sci_port_general_link_up_handler(iport, iphy, false);
+               sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
                return SCI_SUCCESS;
        default:
                dev_warn(sciport_to_dev(iport),
@@ -1584,14 +1595,14 @@ static const struct sci_base_state sci_port_state_table[] = {
        },
        [SCI_PORT_SUB_WAITING] = {
                .enter_state = sci_port_ready_substate_waiting_enter,
+               .exit_state  = scic_sds_port_ready_substate_waiting_exit,
        },
        [SCI_PORT_SUB_OPERATIONAL] = {
                .enter_state = sci_port_ready_substate_operational_enter,
                .exit_state  = sci_port_ready_substate_operational_exit
        },
        [SCI_PORT_SUB_CONFIGURING] = {
-               .enter_state = sci_port_ready_substate_configuring_enter,
-               .exit_state  = sci_port_ready_substate_configuring_exit
+               .enter_state = sci_port_ready_substate_configuring_enter
        },
        [SCI_PORT_RESETTING] = {
                .exit_state  = sci_port_resetting_state_exit
@@ -1609,6 +1620,7 @@ void sci_port_construct(struct isci_port *iport, u8 index,
        iport->logical_port_index  = SCIC_SDS_DUMMY_PORT;
        iport->physical_port_index = index;
        iport->active_phy_mask     = 0;
+       iport->enabled_phy_mask    = 0;
        iport->last_active_phy     = 0;
        iport->ready_exit          = false;
 
index cb5ffbc386038136812da1e596ca5059ff391615..08116090eb7015dbe7c4b3862748a20601e6d765 100644 (file)
@@ -63,6 +63,9 @@
 
 #define SCIC_SDS_DUMMY_PORT   0xFF
 
+#define PF_NOTIFY (1 << 0)
+#define PF_RESUME (1 << 1)
+
 struct isci_phy;
 struct isci_host;
 
@@ -83,6 +86,8 @@ enum isci_status {
  * @logical_port_index: software port index
  * @physical_port_index: hardware port index
  * @active_phy_mask: identifies phy members
+ * @enabled_phy_mask: phy mask for the port
+ *                    that are already part of the port
  * @reserved_tag:
  * @reserved_rni: reserver for port task scheduler workaround
  * @started_request_count: reference count for outstanding commands
@@ -104,6 +109,7 @@ struct isci_port {
        u8 logical_port_index;
        u8 physical_port_index;
        u8 active_phy_mask;
+       u8 enabled_phy_mask;
        u8 last_active_phy;
        u16 reserved_rni;
        u16 reserved_tag;
@@ -250,6 +256,10 @@ bool sci_port_link_detected(
        struct isci_port *iport,
        struct isci_phy *iphy);
 
+enum sci_status sci_port_get_properties(
+       struct isci_port *iport,
+       struct sci_port_properties *prop);
+
 enum sci_status sci_port_link_up(struct isci_port *iport,
                                      struct isci_phy *iphy);
 enum sci_status sci_port_link_down(struct isci_port *iport,
index 38a99d2811411d102220a96bdf918579fb47546b..6d1e9544cbe5c03b7f84059b7651c1b9d560ffb2 100644 (file)
@@ -57,7 +57,7 @@
 
 #define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT    (10)
 #define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT    (10)
-#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION  (100)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION  (250)
 
 enum SCIC_SDS_APC_ACTIVITY {
        SCIC_SDS_APC_SKIP_PHY,
@@ -466,6 +466,23 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
        return sci_port_configuration_agent_validate_ports(ihost, port_agent);
 }
 
+/*
+ * This routine will restart the automatic port configuration timeout
+ * timer for the next time period. This could be caused by either a link
+ * down event or a link up event where we can not yet tell to which a phy
+ * belongs.
+ */
+static void sci_apc_agent_start_timer(
+       struct sci_port_configuration_agent *port_agent,
+       u32 timeout)
+{
+       if (port_agent->timer_pending)
+               sci_del_timer(&port_agent->timer);
+
+       port_agent->timer_pending = true;
+       sci_mod_timer(&port_agent->timer, timeout);
+}
+
 static void sci_apc_agent_configure_ports(struct isci_host *ihost,
                                               struct sci_port_configuration_agent *port_agent,
                                               struct isci_phy *iphy,
@@ -565,17 +582,8 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost,
                break;
 
        case SCIC_SDS_APC_START_TIMER:
-               /*
-                * This can occur for either a link down event, or a link
-                * up event where we cannot yet tell the port to which a
-                * phy belongs.
-                */
-               if (port_agent->timer_pending)
-                       sci_del_timer(&port_agent->timer);
-
-               port_agent->timer_pending = true;
-               sci_mod_timer(&port_agent->timer,
-                             SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+               sci_apc_agent_start_timer(port_agent,
+                                         SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
                break;
 
        case SCIC_SDS_APC_SKIP_PHY:
@@ -607,7 +615,8 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
        if (!iport) {
                /* the phy is not the part of this port */
                port_agent->phy_ready_mask |= 1 << phy_index;
-               sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+               sci_apc_agent_start_timer(port_agent,
+                                         SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
        } else {
                /* the phy is already the part of the port */
                u32 port_state = iport->sm.current_state_id;
index b5f4341de2434ead30763b4228e49ff5ee2edc67..9b8117b9d7569e7a8bda7623ec2aa3a07fcfffb6 100644 (file)
@@ -147,7 +147,7 @@ struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmw
 
        memcpy(orom, fw->data, fw->size);
 
-       if (is_c0(pdev))
+       if (is_c0(pdev) || is_c1(pdev))
                goto out;
 
        /*
index 2c75248ca326ea38e3a9256da8e966fddd516d41..bb0e9d4d97c9a8f0437e51aee3440c8ff0060004 100644 (file)
@@ -152,7 +152,7 @@ struct sci_user_parameters {
 #define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
 
 struct sci_oem_params;
-int sci_oem_parameters_validate(struct sci_oem_params *oem);
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
 
 struct isci_orom;
 struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
@@ -191,6 +191,11 @@ struct isci_oem_hdr {
                        0x1a, 0x04, 0xc6)
 #define ISCI_EFI_VAR_NAME      "RstScuO"
 
+#define ISCI_ROM_VER_1_0       0x10
+#define ISCI_ROM_VER_1_1       0x11
+#define ISCI_ROM_VER_1_3       0x13
+#define ISCI_ROM_VER_LATEST    ISCI_ROM_VER_1_3
+
 /* Allowed PORT configuration modes APC Automatic PORT configuration mode is
  * defined by the OEM configuration parameters providing no PHY_MASK parameters
  * for any PORT. i.e. There are no phys assigned to any of the ports at start.
@@ -220,8 +225,86 @@ struct sci_oem_params {
        struct {
                uint8_t mode_type;
                uint8_t max_concurr_spin_up;
-               uint8_t do_enable_ssc;
-               uint8_t reserved;
+               /*
+                * This bitfield indicates the OEM's desired default Tx
+                * Spread Spectrum Clocking (SSC) settings for SATA and SAS.
+                * NOTE: Default SSC Modulation Frequency is 31.5KHz.
+                */
+               union {
+                       struct {
+                       /*
+                        * NOTE: Max spread for SATA is +0 / -5000 PPM.
+                        * Down-spreading SSC (only method allowed for SATA):
+                        *  SATA SSC Tx Disabled                    = 0x0
+                        *  SATA SSC Tx at +0 / -1419 PPM Spread    = 0x2
+                        *  SATA SSC Tx at +0 / -2129 PPM Spread    = 0x3
+                        *  SATA SSC Tx at +0 / -4257 PPM Spread    = 0x6
+                        *  SATA SSC Tx at +0 / -4967 PPM Spread    = 0x7
+                        */
+                               uint8_t ssc_sata_tx_spread_level:4;
+                       /*
+                        * SAS SSC Tx Disabled                     = 0x0
+                        *
+                        * NOTE: Max spread for SAS down-spreading +0 /
+                        *       -2300 PPM
+                        * Down-spreading SSC:
+                        *  SAS SSC Tx at +0 / -1419 PPM Spread     = 0x2
+                        *  SAS SSC Tx at +0 / -2129 PPM Spread     = 0x3
+                        *
+                        * NOTE: Max spread for SAS center-spreading +2300 /
+                        *       -2300 PPM
+                        * Center-spreading SSC:
+                        *  SAS SSC Tx at +1064 / -1064 PPM Spread  = 0x3
+                        *  SAS SSC Tx at +2129 / -2129 PPM Spread  = 0x6
+                        */
+                               uint8_t ssc_sas_tx_spread_level:3;
+                       /*
+                        * NOTE: Refer to the SSC section of the SAS 2.x
+                        * Specification for proper setting of this field.
+                        * For standard SAS Initiator SAS PHY operation it
+                        * should be 0 for Down-spreading.
+                        * SAS SSC Tx spread type:
+                        *  Down-spreading SSC      = 0
+                        *  Center-spreading SSC    = 1
+                        */
+                               uint8_t ssc_sas_tx_type:1;
+                       };
+                       uint8_t do_enable_ssc;
+               };
+               /*
+                * This field indicates length of the SAS/SATA cable between
+                * host and device.
+                * This field is used make relationship between analog
+                * parameters of the phy in the silicon and length of the cable.
+                * Supported cable attenuation levels:
+                * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than
+                * 6m.
+                *
+                * This is bit mask field:
+                *
+                * BIT:      (MSB) 7     6     5     4
+                * ASSIGNMENT:   <phy3><phy2><phy1><phy0>  - Medium cable
+                *                                           length assignment
+                * BIT:            3     2     1     0  (LSB)
+                * ASSIGNMENT:   <phy3><phy2><phy1><phy0>  - Long cable length
+                *                                           assignment
+                *
+                * BITS 7-4 are set when the cable length is assigned to medium
+                * BITS 3-0 are set when the cable length is assigned to long
+                *
+                * The BIT positions are clear when the cable length is
+                * assigned to short.
+                *
+                * Setting the bits for both long and medium cable length is
+                * undefined.
+                *
+                * A value of 0x84 would assign
+                *    phy3 - medium
+                *    phy2 - long
+                *    phy1 - short
+                *    phy0 - short
+                */
+               uint8_t cable_selection_mask;
        } controller;
 
        struct {
index b207cd3b15a0514da4579e9e869556d6b95dc961..dd74b6ceeb823df92bd69e69a0964069f1b5011a 100644 (file)
@@ -53,6 +53,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 #include <scsi/sas.h>
+#include <linux/bitops.h>
 #include "isci.h"
 #include "port.h"
 #include "remote_device.h"
@@ -1101,6 +1102,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
                                                       struct isci_remote_device *idev)
 {
        enum sci_status status;
+       struct sci_port_properties properties;
        struct domain_device *dev = idev->domain_dev;
 
        sci_remote_device_construct(iport, idev);
@@ -1110,6 +1112,11 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
         * entries will be needed to store the remote node.
         */
        idev->is_direct_attached = true;
+
+       sci_port_get_properties(iport, &properties);
+       /* Get accurate port width from port's phy mask for a DA device. */
+       idev->device_port_width = hweight32(properties.phy_mask);
+
        status = sci_controller_allocate_remote_node_context(iport->owning_controller,
                                                                  idev,
                                                                  &idev->rnc.remote_node_index);
@@ -1125,9 +1132,6 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
 
        idev->connection_rate = sci_port_get_max_allowed_speed(iport);
 
-       /* / @todo Should I assign the port width by reading all of the phys on the port? */
-       idev->device_port_width = 1;
-
        return SCI_SUCCESS;
 }
 
index 66ad3dc89498a3ab305de95bc179ecaf42afa2f3..f5a3f7d2bdab29059af48d4b333d39b91c57d3d0 100644 (file)
@@ -496,7 +496,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
                }
        }
 
-       isci_print_tmf(tmf);
+       isci_print_tmf(ihost, tmf);
 
        if (tmf->status == SCI_SUCCESS)
                ret =  TMF_RESP_FUNC_COMPLETE;
index bc78c0a41d5cac86df69d6a23af08e6dac949552..1b27b3797c6c9cce8c1160c11d43240ccc4a52fe 100644 (file)
@@ -106,7 +106,6 @@ struct isci_tmf {
        } resp;
        unsigned char lun[8];
        u16 io_tag;
-       struct isci_remote_device *device;
        enum isci_tmf_function_codes tmf_code;
        int status;
 
@@ -120,10 +119,10 @@ struct isci_tmf {
 
 };
 
-static inline void isci_print_tmf(struct isci_tmf *tmf)
+static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
 {
        if (SAS_PROTOCOL_SATA == tmf->proto)
-               dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+               dev_dbg(&ihost->pdev->dev,
                        "%s: status = %x\n"
                        "tmf->resp.d2h_fis.status = %x\n"
                        "tmf->resp.d2h_fis.error = %x\n",
@@ -132,7 +131,7 @@ static inline void isci_print_tmf(struct isci_tmf *tmf)
                        tmf->resp.d2h_fis.status,
                        tmf->resp.d2h_fis.error);
        else
-               dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+               dev_dbg(&ihost->pdev->dev,
                        "%s: status = %x\n"
                        "tmf->resp.resp_iu.data_present = %x\n"
                        "tmf->resp.resp_iu.status = %x\n"
index 7269e928824a07f93efb1e570eb136f6cbedfbeb..1d1b0c9da29ba684100515b337431ccfb8be2b08 100644 (file)
@@ -61,7 +61,7 @@ static void fc_disc_restart(struct fc_disc *);
  * Locking Note: This function expects that the lport mutex is locked before
  * calling it.
  */
-void fc_disc_stop_rports(struct fc_disc *disc)
+static void fc_disc_stop_rports(struct fc_disc *disc)
 {
        struct fc_lport *lport;
        struct fc_rport_priv *rdata;
@@ -682,7 +682,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
  * fc_disc_stop() - Stop discovery for a given lport
  * @lport: The local port that discovery should stop on
  */
-void fc_disc_stop(struct fc_lport *lport)
+static void fc_disc_stop(struct fc_lport *lport)
 {
        struct fc_disc *disc = &lport->disc;
 
@@ -698,7 +698,7 @@ void fc_disc_stop(struct fc_lport *lport)
  * This function will block until discovery has been
  * completely stopped and all rports have been deleted.
  */
-void fc_disc_stop_final(struct fc_lport *lport)
+static void fc_disc_stop_final(struct fc_lport *lport)
 {
        fc_disc_stop(lport);
        lport->tt.rport_flush_queue();
index fb9161dc4ca67411f07413018562286fcaeff017..e17a28d324d04ccdf903043ae4549e04c94ccb85 100644 (file)
@@ -28,6 +28,7 @@
 #include <scsi/fc/fc_els.h>
 #include <scsi/libfc.h>
 #include <scsi/fc_encode.h>
+#include "fc_libfc.h"
 
 /**
  * fc_elsct_send() - Send an ELS or CT frame
index 9de9db27e87401b1f5cf244a15e9320011faa276..4d70d96fa5dc5730016a00a3c1753370ca5e980e 100644 (file)
@@ -91,7 +91,7 @@ struct fc_exch_pool {
  * It manages the allocation of exchange IDs.
  */
 struct fc_exch_mgr {
-       struct fc_exch_pool *pool;
+       struct fc_exch_pool __percpu *pool;
        mempool_t       *ep_pool;
        enum fc_class   class;
        struct kref     kref;
index 221875ec3d7c64de19c4f6404961be441fb16f34..f607314810accf03b38bee7b1f36fde6d4c2da9f 100644 (file)
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
                fsp->xfer_ddp = FC_XID_UNKNOWN;
                atomic_set(&fsp->ref_cnt, 1);
                init_timer(&fsp->timer);
+               fsp->timer.data = (unsigned long)fsp;
                INIT_LIST_HEAD(&fsp->list);
                spin_lock_init(&fsp->scsi_pkt_lock);
        }
@@ -1850,9 +1851,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
        }
        put_cpu();
 
-       init_timer(&fsp->timer);
-       fsp->timer.data = (unsigned long)fsp;
-
        /*
         * send it to the lower layer
         * if we get -1 return then put the request in the pending
index e77094a587ed62aa17e1c1479d6a1ba04ad2af06..83750ebb527f5e2841f26a06d70a063658d8603f 100644 (file)
@@ -677,7 +677,8 @@ EXPORT_SYMBOL(fc_set_mfs);
  * @lport: The local port receiving the event
  * @event: The discovery event
  */
-void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
+static void fc_lport_disc_callback(struct fc_lport *lport,
+                                  enum fc_disc_event event)
 {
        switch (event) {
        case DISC_EV_SUCCESS:
@@ -1568,7 +1569,7 @@ EXPORT_SYMBOL(fc_lport_flogi_resp);
  * Locking Note: The lport lock is expected to be held before calling
  * this routine.
  */
-void fc_lport_enter_flogi(struct fc_lport *lport)
+static void fc_lport_enter_flogi(struct fc_lport *lport)
 {
        struct fc_frame *fp;
 
index b9e434844a69bb93fe12e153896973a1c8df2968..83aa1efec875999d5c980ab27dcb93cfc71e734c 100644 (file)
@@ -391,7 +391,7 @@ static void fc_rport_work(struct work_struct *work)
  * If it appears we are already logged in, ADISC is used to verify
  * the setup.
  */
-int fc_rport_login(struct fc_rport_priv *rdata)
+static int fc_rport_login(struct fc_rport_priv *rdata)
 {
        mutex_lock(&rdata->rp_mutex);
 
@@ -451,7 +451,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
  * function will hold the rport lock, call an _enter_*
  * function and then unlock the rport.
  */
-int fc_rport_logoff(struct fc_rport_priv *rdata)
+static int fc_rport_logoff(struct fc_rport_priv *rdata)
 {
        mutex_lock(&rdata->rp_mutex);
 
@@ -653,8 +653,8 @@ static int fc_rport_login_complete(struct fc_rport_priv *rdata,
  * @fp:            The FLOGI response frame
  * @rp_arg: The remote port that received the FLOGI response
  */
-void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
-                        void *rp_arg)
+static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+                               void *rp_arg)
 {
        struct fc_rport_priv *rdata = rp_arg;
        struct fc_lport *lport = rdata->local_port;
@@ -1520,7 +1520,7 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
  *
  * Locking Note: Called with the lport lock held.
  */
-void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
 {
        struct fc_seq_els_data els_data;
 
index 5c1776406c963f996e16aac4781fe4540112d250..15eefa1d61fd8dfbd25d696bb519a28570005454 100644 (file)
@@ -306,19 +306,22 @@ mega_query_adapter(adapter_t *adapter)
        adapter->host->sg_tablesize = adapter->sglen;
 
 
-       /* use HP firmware and bios version encoding */
+       /* use HP firmware and bios version encoding
+          Note: fw_version[0|1] and bios_version[0|1] were originally shifted
+          right 8 bits making them zero. This 0 value was hardcoded to fix
+          sparse warnings. */
        if (adapter->product_info.subsysvid == HP_SUBSYS_VID) {
                sprintf (adapter->fw_version, "%c%d%d.%d%d",
                         adapter->product_info.fw_version[2],
-                        adapter->product_info.fw_version[1] >> 8,
+                        0,
                         adapter->product_info.fw_version[1] & 0x0f,
-                        adapter->product_info.fw_version[0] >> 8,
+                        0,
                         adapter->product_info.fw_version[0] & 0x0f);
                sprintf (adapter->bios_version, "%c%d%d.%d%d",
                         adapter->product_info.bios_version[2],
-                        adapter->product_info.bios_version[1] >> 8,
+                        0,
                         adapter->product_info.bios_version[1] & 0x0f,
-                        adapter->product_info.bios_version[0] >> 8,
+                        0,
                         adapter->product_info.bios_version[0] & 0x0f);
        } else {
                memcpy(adapter->fw_version,
index dd94c7d574fb8b8027f574657cbdc43b46fa906f..e5f416f8042d45620cab8ced05fc56738cc8764a 100644 (file)
@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "00.00.06.12-rc1"
-#define MEGASAS_RELDATE                                "Oct. 5, 2011"
-#define MEGASAS_EXT_VERSION                    "Wed. Oct. 5 17:00:00 PDT 2011"
+#define MEGASAS_VERSION                                "00.00.06.14-rc1"
+#define MEGASAS_RELDATE                                "Jan. 6, 2012"
+#define MEGASAS_EXT_VERSION                    "Fri. Jan. 6 17:00:00 PDT 2012"
 
 /*
  * Device IDs
@@ -773,7 +773,6 @@ struct megasas_ctrl_info {
 
 #define MFI_OB_INTR_STATUS_MASK                        0x00000002
 #define MFI_POLL_TIMEOUT_SECS                  60
-#define MEGASAS_COMPLETION_TIMER_INTERVAL      (HZ/10)
 
 #define MFI_REPLY_1078_MESSAGE_INTERRUPT       0x80000000
 #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT       0x00000001
@@ -1353,7 +1352,6 @@ struct megasas_instance {
        u32 mfiStatus;
        u32 last_seq_num;
 
-       struct timer_list io_completion_timer;
        struct list_head internal_reset_pending_q;
 
        /* Ptr to hba specific information */
index 29a994f9c4f1e6a9c0973ecd3cebe413f4937c7b..8b300be442849336d296768c69a44e126e24a40d 100644 (file)
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : v00.00.06.12-rc1
+ *  Version : v00.00.06.14-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
 #include "megaraid_sas_fusion.h"
 #include "megaraid_sas.h"
 
-/*
- * poll_mode_io:1- schedule complete completion from q cmd
- */
-static unsigned int poll_mode_io;
-module_param_named(poll_mode_io, poll_mode_io, int, 0);
-MODULE_PARM_DESC(poll_mode_io,
-       "Complete cmds from IO path, (default=0)");
-
 /*
  * Number of sectors per IO command
  * Will be set in megasas_init_mfi if user does not provide
@@ -1439,11 +1431,6 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
 
        instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
                                cmd->frame_count-1, instance->reg_set);
-       /*
-        * Check if we have pend cmds to be completed
-        */
-       if (poll_mode_io && atomic_read(&instance->fw_outstanding))
-               tasklet_schedule(&instance->isr_tasklet);
 
        return 0;
 out_return_cmd:
@@ -3370,47 +3357,6 @@ megasas_issue_init_mfi(struct megasas_instance *instance)
        return -EINVAL;
 }
 
-/**
- * megasas_start_timer - Initializes a timer object
- * @instance:          Adapter soft state
- * @timer:             timer object to be initialized
- * @fn:                        timer function
- * @interval:          time interval between timer function call
- */
-static inline void
-megasas_start_timer(struct megasas_instance *instance,
-                       struct timer_list *timer,
-                       void *fn, unsigned long interval)
-{
-       init_timer(timer);
-       timer->expires = jiffies + interval;
-       timer->data = (unsigned long)instance;
-       timer->function = fn;
-       add_timer(timer);
-}
-
-/**
- * megasas_io_completion_timer - Timer fn
- * @instance_addr:     Address of adapter soft state
- *
- * Schedules tasklet for cmd completion
- * if poll_mode_io is set
- */
-static void
-megasas_io_completion_timer(unsigned long instance_addr)
-{
-       struct megasas_instance *instance =
-                       (struct megasas_instance *)instance_addr;
-
-       if (atomic_read(&instance->fw_outstanding))
-               tasklet_schedule(&instance->isr_tasklet);
-
-       /* Restart timer */
-       if (poll_mode_io)
-               mod_timer(&instance->io_completion_timer,
-                       jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
-}
-
 static u32
 megasas_init_adapter_mfi(struct megasas_instance *instance)
 {
@@ -3638,11 +3584,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
        tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
                (unsigned long)instance);
 
-       /* Initialize the cmd completion timer */
-       if (poll_mode_io)
-               megasas_start_timer(instance, &instance->io_completion_timer,
-                               megasas_io_completion_timer,
-                               MEGASAS_COMPLETION_TIMER_INTERVAL);
        return 0;
 
 fail_init_adapter:
@@ -4369,9 +4310,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
        host = instance->host;
        instance->unload = 1;
 
-       if (poll_mode_io)
-               del_timer_sync(&instance->io_completion_timer);
-
        megasas_flush_cache(instance);
        megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
 
@@ -4511,12 +4449,6 @@ megasas_resume(struct pci_dev *pdev)
        }
 
        instance->instancet->enable_intr(instance->reg_set);
-
-       /* Initialize the cmd completion timer */
-       if (poll_mode_io)
-               megasas_start_timer(instance, &instance->io_completion_timer,
-                               megasas_io_completion_timer,
-                               MEGASAS_COMPLETION_TIMER_INTERVAL);
        instance->unload = 0;
 
        /*
@@ -4570,9 +4502,6 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
        host = instance->host;
        fusion = instance->ctrl_context;
 
-       if (poll_mode_io)
-               del_timer_sync(&instance->io_completion_timer);
-
        scsi_remove_host(instance->host);
        megasas_flush_cache(instance);
        megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4773,6 +4702,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
        memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
        cmd->frame->hdr.context = cmd->index;
        cmd->frame->hdr.pad_0 = 0;
+       cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
+                                  MFI_FRAME_SENSE64);
 
        /*
         * The management interface between applications and the fw uses
@@ -5219,60 +5150,6 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
 static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
                megasas_sysfs_set_dbg_lvl);
 
-static ssize_t
-megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
-{
-       return sprintf(buf, "%u\n", poll_mode_io);
-}
-
-static ssize_t
-megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
-                               const char *buf, size_t count)
-{
-       int retval = count;
-       int tmp = poll_mode_io;
-       int i;
-       struct megasas_instance *instance;
-
-       if (sscanf(buf, "%u", &poll_mode_io) < 1) {
-               printk(KERN_ERR "megasas: could not set poll_mode_io\n");
-               retval = -EINVAL;
-       }
-
-       /*
-        * Check if poll_mode_io is already set or is same as previous value
-        */
-       if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
-               goto out;
-
-       if (poll_mode_io) {
-               /*
-                * Start timers for all adapters
-                */
-               for (i = 0; i < megasas_mgmt_info.max_index; i++) {
-                       instance = megasas_mgmt_info.instance[i];
-                       if (instance) {
-                               megasas_start_timer(instance,
-                                       &instance->io_completion_timer,
-                                       megasas_io_completion_timer,
-                                       MEGASAS_COMPLETION_TIMER_INTERVAL);
-                       }
-               }
-       } else {
-               /*
-                * Delete timers for all adapters
-                */
-               for (i = 0; i < megasas_mgmt_info.max_index; i++) {
-                       instance = megasas_mgmt_info.instance[i];
-                       if (instance)
-                               del_timer_sync(&instance->io_completion_timer);
-               }
-       }
-
-out:
-       return retval;
-}
-
 static void
 megasas_aen_polling(struct work_struct *work)
 {
@@ -5502,11 +5379,6 @@ megasas_aen_polling(struct work_struct *work)
        kfree(ev);
 }
 
-
-static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
-               megasas_sysfs_show_poll_mode_io,
-               megasas_sysfs_set_poll_mode_io);
-
 /**
  * megasas_init - Driver load entry point
  */
@@ -5565,11 +5437,6 @@ static int __init megasas_init(void)
                                  &driver_attr_dbg_lvl);
        if (rval)
                goto err_dcf_dbg_lvl;
-       rval = driver_create_file(&megasas_pci_driver.driver,
-                                 &driver_attr_poll_mode_io);
-       if (rval)
-               goto err_dcf_poll_mode_io;
-
        rval = driver_create_file(&megasas_pci_driver.driver,
                                &driver_attr_support_device_change);
        if (rval)
@@ -5578,10 +5445,6 @@ static int __init megasas_init(void)
        return rval;
 
 err_dcf_support_device_change:
-       driver_remove_file(&megasas_pci_driver.driver,
-                 &driver_attr_poll_mode_io);
-
-err_dcf_poll_mode_io:
        driver_remove_file(&megasas_pci_driver.driver,
                           &driver_attr_dbg_lvl);
 err_dcf_dbg_lvl:
@@ -5606,8 +5469,6 @@ static int __init megasas_init(void)
  */
 static void __exit megasas_exit(void)
 {
-       driver_remove_file(&megasas_pci_driver.driver,
-                          &driver_attr_poll_mode_io);
        driver_remove_file(&megasas_pci_driver.driver,
                           &driver_attr_dbg_lvl);
        driver_remove_file(&megasas_pci_driver.driver,
index 5255dd688aca49b807f430ffa26dd80a41fcf42e..294abb0defa66e4b34bb8eacf9d91d534bfe144f 100644 (file)
@@ -282,7 +282,9 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
        else {
                *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
                if ((raid->level >= 5) &&
-                   (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER))
+                   ((instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) ||
+                    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER &&
+                     raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
                        pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
                else if (raid->level == 1) {
                        /* Get alternate Pd. */
index 22a3ff02e48a419d0e9353d72e53f8d13a3e41f0..bfe68545203ff9123a58f1f9ae896b3b59293811 100644 (file)
 #define QL4_SESS_RECOVERY_TMO          120     /* iSCSI session */
                                                /* recovery timeout */
 
+#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
+#define LSW(x) ((uint16_t)(x))
 #define LSDW(x) ((u32)((u64)(x)))
 #define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
 
@@ -671,6 +673,7 @@ struct scsi_qla_host {
        uint16_t pri_ddb_idx;
        uint16_t sec_ddb_idx;
        int is_reset;
+       uint16_t temperature;
 };
 
 struct ql4_task_data {
index 1bdfa8120ac888c65c304c28dc3f3aba806ea403..90614f38b55d54541bba90d114d37e1ae4c553d7 100644 (file)
@@ -697,6 +697,9 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
                        writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
                               &ha->reg->ctrl_status);
                        readl(&ha->reg->ctrl_status);
+                       writel(set_rmask(CSR_SCSI_COMPLETION_INTR),
+                              &ha->reg->ctrl_status);
+                       readl(&ha->reg->ctrl_status);
                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
                        if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
                                DEBUG2(printk("scsi%ld: %s: Get firmware "
index c2593782fbbef8c203148b1661c92a3e1dbe35f6..e1e66a45e4d06f59ab7e35a4b46999d3bba90106 100644 (file)
@@ -219,6 +219,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                ha->mailbox_timeout_count++;
                mbx_sts[0] = (-1);
                set_bit(DPC_RESET_HA, &ha->dpc_flags);
+               if (is_qla8022(ha)) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "disabling pause transmit on port 0 & 1.\n");
+                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+                                       CRB_NIU_XG_PAUSE_CTL_P0 |
+                                       CRB_NIU_XG_PAUSE_CTL_P1);
+               }
                goto mbox_exit;
        }
 
index 8d6bc1b2ff17266a45d66dfb923f5c7757cf1493..78f1111158d75379d4d1c1ef04a139889c280d3d 100644 (file)
@@ -1875,6 +1875,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
 int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
 {
        int retval;
+
+       /* clear the interrupt */
+       writel(0, &ha->qla4_8xxx_reg->host_int);
+       readl(&ha->qla4_8xxx_reg->host_int);
+
        retval = qla4_8xxx_device_state_handler(ha);
 
        if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
index 35376a1c3f1bc109fb8bea9deb54cec9b7acf0c8..dc45ac92369150eea7821fac2bacc24c8fe11311 100644 (file)
 #define PHAN_PEG_RCV_INITIALIZED       0xff01
 
 /*CRB_RELATED*/
-#define QLA82XX_CRB_BASE       QLA82XX_CAM_RAM(0x200)
-#define QLA82XX_REG(X)         (QLA82XX_CRB_BASE+(X))
-
+#define QLA82XX_CRB_BASE               (QLA82XX_CAM_RAM(0x200))
+#define QLA82XX_REG(X)                 (QLA82XX_CRB_BASE+(X))
 #define CRB_CMDPEG_STATE               QLA82XX_REG(0x50)
 #define CRB_RCVPEG_STATE               QLA82XX_REG(0x13c)
 #define CRB_DMA_SHIFT                  QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE                 QLA82XX_REG(0x1b4)
+
+#define qla82xx_get_temp_val(x)                ((x) >> 16)
+#define qla82xx_get_temp_state(x)      ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state)        (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+       QLA82XX_TEMP_NORMAL = 0x1,      /* Normal operating range */
+       QLA82XX_TEMP_WARN,      /* Sound alert, temperature getting high */
+       QLA82XX_TEMP_PANIC      /* Fatal error, hardware has shut down. */
+};
+
+#define CRB_NIU_XG_PAUSE_CTL_P0                0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1                0x8
 
 #define QLA82XX_HW_H0_CH_HUB_ADR       0x05
 #define QLA82XX_HW_H1_CH_HUB_ADR       0x0E
index ec393a00c03816f52515fc7e294187a2527e2bf0..ce6d3b7f0c616a3adc14677c27996e99711d4f75 100644 (file)
@@ -35,43 +35,44 @@ static struct kmem_cache *srb_cachep;
 int ql4xdisablesysfsboot = 1;
 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdisablesysfsboot,
-               "Set to disable exporting boot targets to sysfs\n"
-               " 0 - Export boot targets\n"
-               " 1 - Do not export boot targets (Default)");
+                " Set to disable exporting boot targets to sysfs.\n"
+                "\t\t  0 - Export boot targets\n"
+                "\t\t  1 - Do not export boot targets (Default)");
 
 int ql4xdontresethba = 0;
 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdontresethba,
-               "Don't reset the HBA for driver recovery \n"
-               " 0 - It will reset HBA (Default)\n"
-               " 1 - It will NOT reset HBA");
+                " Don't reset the HBA for driver recovery.\n"
+                "\t\t  0 - It will reset HBA (Default)\n"
+                "\t\t  1 - It will NOT reset HBA");
 
-int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
+int ql4xextended_error_logging;
 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xextended_error_logging,
-                "Option to enable extended error logging, "
-                "Default is 0 - no logging, 1 - debug logging");
+                " Option to enable extended error logging.\n"
+                "\t\t  0 - no logging (Default)\n"
+                "\t\t  2 - debug logging");
 
 int ql4xenablemsix = 1;
 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql4xenablemsix,
-               "Set to enable MSI or MSI-X interrupt mechanism.\n"
-               " 0 = enable INTx interrupt mechanism.\n"
-               " 1 = enable MSI-X interrupt mechanism (Default).\n"
-               " 2 = enable MSI interrupt mechanism.");
+                " Set to enable MSI or MSI-X interrupt mechanism.\n"
+                "\t\t  0 = enable INTx interrupt mechanism.\n"
+                "\t\t  1 = enable MSI-X interrupt mechanism (Default).\n"
+                "\t\t  2 = enable MSI interrupt mechanism.");
 
 #define QL4_DEF_QDEPTH 32
 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xmaxqdepth,
-               "Maximum queue depth to report for target devices.\n"
-               " Default: 32.");
+                " Maximum queue depth to report for target devices.\n"
+                "\t\t  Default: 32.");
 
 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
                "Target Session Recovery Timeout.\n"
-               " Default: 120 sec.");
+               "\t\t  Default: 120 sec.");
 
 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
 /*
@@ -1630,7 +1631,9 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
 
        /* Update timers after login */
        ddb_entry->default_relogin_timeout =
-                               le16_to_cpu(fw_ddb_entry->def_timeout);
+               (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
+                (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
+                le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
        ddb_entry->default_time2wait =
                                le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
 
@@ -1969,6 +1972,42 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
        return QLA_ERROR;
 }
 
+/**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
+{
+       uint32_t temp, temp_state, temp_val;
+       int status = QLA_SUCCESS;
+
+       temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
+
+       temp_state = qla82xx_get_temp_state(temp);
+       temp_val = qla82xx_get_temp_val(temp);
+
+       if (temp_state == QLA82XX_TEMP_PANIC) {
+               ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
+                          " exceeds maximum allowed. Hardware has been shut"
+                          " down.\n", temp_val);
+               status = QLA_ERROR;
+       } else if (temp_state == QLA82XX_TEMP_WARN) {
+               if (ha->temperature == QLA82XX_TEMP_NORMAL)
+                       ql4_printk(KERN_WARNING, ha, "Device temperature %d"
+                                  " degrees C exceeds operating range."
+                                  " Immediate action needed.\n", temp_val);
+       } else {
+               if (ha->temperature == QLA82XX_TEMP_WARN)
+                       ql4_printk(KERN_INFO, ha, "Device temperature is"
+                                  " now %d degrees C in normal range.\n",
+                                  temp_val);
+       }
+       ha->temperature = temp_state;
+       return status;
+}
+
 /**
  * qla4_8xxx_check_fw_alive  - Check firmware health
  * @ha: Pointer to host adapter structure.
@@ -2040,7 +2079,16 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
            test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
            test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
                dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-               if (dev_state == QLA82XX_DEV_NEED_RESET &&
+
+               if (qla4_8xxx_check_temp(ha)) {
+                       ql4_printk(KERN_INFO, ha, "disabling pause"
+                                  " transmit on port 0 & 1.\n");
+                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+                                       CRB_NIU_XG_PAUSE_CTL_P0 |
+                                       CRB_NIU_XG_PAUSE_CTL_P1);
+                       set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+                       qla4xxx_wake_dpc(ha);
+               } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
                    !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
                        if (!ql4xdontresethba) {
                                ql4_printk(KERN_INFO, ha, "%s: HW State: "
@@ -2057,9 +2105,21 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
                } else  {
                        /* Check firmware health */
                        if (qla4_8xxx_check_fw_alive(ha)) {
+                               ql4_printk(KERN_INFO, ha, "disabling pause"
+                                          " transmit on port 0 & 1.\n");
+                               qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+                                               CRB_NIU_XG_PAUSE_CTL_P0 |
+                                               CRB_NIU_XG_PAUSE_CTL_P1);
                                halt_status = qla4_8xxx_rd_32(ha,
                                                QLA82XX_PEG_HALT_STATUS1);
 
+                               if (LSW(MSB(halt_status)) == 0x67)
+                                       ql4_printk(KERN_ERR, ha, "%s:"
+                                                  " Firmware aborted with"
+                                                  " error code 0x00006700."
+                                                  " Device is being reset\n",
+                                                  __func__);
+
                                /* Since we cannot change dev_state in interrupt
                                 * context, set appropriate DPC flag then wakeup
                                 * DPC */
@@ -2078,7 +2138,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
        }
 }
 
-void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
 {
        struct iscsi_session *sess;
        struct ddb_entry *ddb_entry;
@@ -3826,16 +3886,14 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
        return ret;
 }
 
-static void qla4xxx_free_nt_list(struct list_head *list_nt)
+static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
 {
-       struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+       struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
 
-       /* Free up the normaltargets list */
-       list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
-               list_del_init(&nt_ddb_idx->list);
-               vfree(nt_ddb_idx);
+       list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+               list_del_init(&ddb_idx->list);
+               vfree(ddb_idx);
        }
-
 }
 
 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
@@ -3884,6 +3942,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
                                          struct ddb_entry *ddb_entry)
 {
+       uint16_t def_timeout;
+
        ddb_entry->ddb_type = FLASH_DDB;
        ddb_entry->fw_ddb_index = INVALID_ENTRY;
        ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
@@ -3894,9 +3954,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
        atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
        atomic_set(&ddb_entry->relogin_timer, 0);
        atomic_set(&ddb_entry->relogin_retry_count, 0);
-
+       def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
        ddb_entry->default_relogin_timeout =
-               le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+               (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
+               def_timeout : LOGIN_TOV;
        ddb_entry->default_time2wait =
                le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
 }
@@ -3934,7 +3995,6 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
                            ip_state == IP_ADDRSTATE_DEPRICATED ||
                            ip_state == IP_ADDRSTATE_DISABLING)
                                ip_idx[idx] = -1;
-
                }
 
                /* Break if all IP states checked */
@@ -3947,58 +4007,37 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
        } while (time_after(wtime, jiffies));
 }
 
-void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
+                                 struct list_head *list_st)
 {
+       struct qla_ddb_index  *st_ddb_idx;
        int max_ddbs;
+       int fw_idx_size;
+       struct dev_db_entry *fw_ddb_entry;
+       dma_addr_t fw_ddb_dma;
        int ret;
        uint32_t idx = 0, next_idx = 0;
        uint32_t state = 0, conn_err = 0;
-       uint16_t conn_id;
-       struct dev_db_entry *fw_ddb_entry;
-       struct ddb_entry *ddb_entry = NULL;
-       dma_addr_t fw_ddb_dma;
-       struct iscsi_cls_session *cls_sess;
-       struct iscsi_session *sess;
-       struct iscsi_cls_conn *cls_conn;
-       struct iscsi_endpoint *ep;
-       uint16_t cmds_max = 32, tmo = 0;
-       uint32_t initial_cmdsn = 0;
-       struct list_head list_st, list_nt; /* List of sendtargets */
-       struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
-       int fw_idx_size;
-       unsigned long wtime;
-       struct qla_ddb_index  *nt_ddb_idx;
-
-       if (!test_bit(AF_LINK_UP, &ha->flags)) {
-               set_bit(AF_BUILD_DDB_LIST, &ha->flags);
-               ha->is_reset = is_reset;
-               return;
-       }
-       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
-                                    MAX_DEV_DB_ENTRIES;
+       uint16_t conn_id = 0;
 
        fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
                                      &fw_ddb_dma);
        if (fw_ddb_entry == NULL) {
                DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
-               goto exit_ddb_list;
+               goto exit_st_list;
        }
 
-       INIT_LIST_HEAD(&list_st);
-       INIT_LIST_HEAD(&list_nt);
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
        fw_idx_size = sizeof(struct qla_ddb_index);
 
        for (idx = 0; idx < max_ddbs; idx = next_idx) {
-               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
-                                             fw_ddb_dma, NULL,
-                                             &next_idx, &state, &conn_err,
-                                             NULL, &conn_id);
+               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+                                             NULL, &next_idx, &state,
+                                             &conn_err, NULL, &conn_id);
                if (ret == QLA_ERROR)
                        break;
 
-               if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
-                       goto continue_next_st;
-
                /* Check if ST, add to the list_st */
                if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
                        goto continue_next_st;
@@ -4009,59 +4048,155 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
 
                st_ddb_idx->fw_ddb_idx = idx;
 
-               list_add_tail(&st_ddb_idx->list, &list_st);
+               list_add_tail(&st_ddb_idx->list, list_st);
 continue_next_st:
                if (next_idx == 0)
                        break;
        }
 
-       /* Before issuing conn open mbox, ensure all IPs states are configured
-        * Note, conn open fails if IPs are not configured
+exit_st_list:
+       if (fw_ddb_entry)
+               dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
+ * @ha: pointer to adapter structure
+ * @list_ddb: List from which failed ddb to be removed
+ *
+ * Iterate over the list of DDBs and find and remove DDBs that are either in
+ * no connection active state or failed state
+ **/
+static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
+                                     struct list_head *list_ddb)
+{
+       struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
+       uint32_t next_idx = 0;
+       uint32_t state = 0, conn_err = 0;
+       int ret;
+
+       list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+               ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
+                                             NULL, 0, NULL, &next_idx, &state,
+                                             &conn_err, NULL, NULL);
+               if (ret == QLA_ERROR)
+                       continue;
+
+               if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+                   state == DDB_DS_SESSION_FAILED) {
+                       list_del_init(&ddb_idx->list);
+                       vfree(ddb_idx);
+               }
+       }
+}
+
+static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
+                                  struct dev_db_entry *fw_ddb_entry,
+                                  int is_reset)
+{
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_session *sess;
+       struct iscsi_cls_conn *cls_conn;
+       struct iscsi_endpoint *ep;
+       uint16_t cmds_max = 32;
+       uint16_t conn_id = 0;
+       uint32_t initial_cmdsn = 0;
+       int ret = QLA_SUCCESS;
+
+       struct ddb_entry *ddb_entry = NULL;
+
+       /* Create session object, with INVALID_ENTRY,
+        * the targer_id would get set when we issue the login
         */
-       qla4xxx_wait_for_ip_configuration(ha);
+       cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
+                                      cmds_max, sizeof(struct ddb_entry),
+                                      sizeof(struct ql4_task_data),
+                                      initial_cmdsn, INVALID_ENTRY);
+       if (!cls_sess) {
+               ret = QLA_ERROR;
+               goto exit_setup;
+       }
 
-       /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
-       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
-               qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+       /*
+        * so calling module_put function to decrement the
+        * reference count.
+        **/
+       module_put(qla4xxx_iscsi_transport.owner);
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ddb_entry->sess = cls_sess;
+
+       cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+       memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+              sizeof(struct dev_db_entry));
+
+       qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+       cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
+
+       if (!cls_conn) {
+               ret = QLA_ERROR;
+               goto exit_setup;
        }
 
-       /* Wait to ensure all sendtargets are done for min 12 sec wait */
-       tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
-       DEBUG2(ql4_printk(KERN_INFO, ha,
-                         "Default time to wait for build ddb %d\n", tmo));
+       ddb_entry->conn = cls_conn;
 
-       wtime = jiffies + (HZ * tmo);
-       do {
-               list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
-                                        list) {
-                       ret = qla4xxx_get_fwddb_entry(ha,
-                                                     st_ddb_idx->fw_ddb_idx,
-                                                     NULL, 0, NULL, &next_idx,
-                                                     &state, &conn_err, NULL,
-                                                     NULL);
-                       if (ret == QLA_ERROR)
-                               continue;
+       /* Setup ep, for displaying attributes in sysfs */
+       ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+       if (ep) {
+               ep->conn = cls_conn;
+               cls_conn->ep = ep;
+       } else {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
+               ret = QLA_ERROR;
+               goto exit_setup;
+       }
 
-                       if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
-                           state == DDB_DS_SESSION_FAILED) {
-                               list_del_init(&st_ddb_idx->list);
-                               vfree(st_ddb_idx);
-                       }
-               }
-               schedule_timeout_uninterruptible(HZ / 10);
-       } while (time_after(wtime, jiffies));
+       /* Update sess/conn params */
+       qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
 
-       /* Free up the sendtargets list */
-       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
-               list_del_init(&st_ddb_idx->list);
-               vfree(st_ddb_idx);
+       if (is_reset == RESET_ADAPTER) {
+               iscsi_block_session(cls_sess);
+               /* Use the relogin path to discover new devices
+                *  by short-circuting the logic of setting
+                *  timer to relogin - instead set the flags
+                *  to initiate login right away.
+                */
+               set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+               set_bit(DF_RELOGIN, &ddb_entry->flags);
        }
 
+exit_setup:
+       return ret;
+}
+
+static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
+                                 struct list_head *list_nt, int is_reset)
+{
+       struct dev_db_entry *fw_ddb_entry;
+       dma_addr_t fw_ddb_dma;
+       int max_ddbs;
+       int fw_idx_size;
+       int ret;
+       uint32_t idx = 0, next_idx = 0;
+       uint32_t state = 0, conn_err = 0;
+       uint16_t conn_id = 0;
+       struct qla_ddb_index  *nt_ddb_idx;
+
+       fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+                                     &fw_ddb_dma);
+       if (fw_ddb_entry == NULL) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+               goto exit_nt_list;
+       }
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
+       fw_idx_size = sizeof(struct qla_ddb_index);
+
        for (idx = 0; idx < max_ddbs; idx = next_idx) {
-               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
-                                             fw_ddb_dma, NULL,
-                                             &next_idx, &state, &conn_err,
-                                             NULL, &conn_id);
+               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+                                             NULL, &next_idx, &state,
+                                             &conn_err, NULL, &conn_id);
                if (ret == QLA_ERROR)
                        break;
 
@@ -4072,107 +4207,113 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
                if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
                        goto continue_next_nt;
 
-               if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
-                   state == DDB_DS_SESSION_FAILED) {
-                       DEBUG2(ql4_printk(KERN_INFO, ha,
-                                         "Adding  DDB to session = 0x%x\n",
-                                         idx));
-                       if (is_reset == INIT_ADAPTER) {
-                               nt_ddb_idx = vmalloc(fw_idx_size);
-                               if (!nt_ddb_idx)
-                                       break;
-
-                               nt_ddb_idx->fw_ddb_idx = idx;
-
-                               memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
-                                      sizeof(struct dev_db_entry));
-
-                               if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
-                                               fw_ddb_entry) == QLA_SUCCESS) {
-                                       vfree(nt_ddb_idx);
-                                       goto continue_next_nt;
-                               }
-                               list_add_tail(&nt_ddb_idx->list, &list_nt);
-                       } else if (is_reset == RESET_ADAPTER) {
-                               if (qla4xxx_is_session_exists(ha,
-                                                  fw_ddb_entry) == QLA_SUCCESS)
-                                       goto continue_next_nt;
-                       }
+               if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
+                   state == DDB_DS_SESSION_FAILED))
+                       goto continue_next_nt;
 
-                       /* Create session object, with INVALID_ENTRY,
-                        * the targer_id would get set when we issue the login
-                        */
-                       cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
-                                               ha->host, cmds_max,
-                                               sizeof(struct ddb_entry),
-                                               sizeof(struct ql4_task_data),
-                                               initial_cmdsn, INVALID_ENTRY);
-                       if (!cls_sess)
-                               goto exit_ddb_list;
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Adding  DDB to session = 0x%x\n", idx));
+               if (is_reset == INIT_ADAPTER) {
+                       nt_ddb_idx = vmalloc(fw_idx_size);
+                       if (!nt_ddb_idx)
+                               break;
 
-                       /*
-                        * iscsi_session_setup increments the driver reference
-                        * count which wouldn't let the driver to be unloaded.
-                        * so calling module_put function to decrement the
-                        * reference count.
-                        **/
-                       module_put(qla4xxx_iscsi_transport.owner);
-                       sess = cls_sess->dd_data;
-                       ddb_entry = sess->dd_data;
-                       ddb_entry->sess = cls_sess;
+                       nt_ddb_idx->fw_ddb_idx = idx;
 
-                       cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
-                       memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+                       memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
                               sizeof(struct dev_db_entry));
 
-                       qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
-
-                       cls_conn = iscsi_conn_setup(cls_sess,
-                                                   sizeof(struct qla_conn),
-                                                   conn_id);
-                       if (!cls_conn)
-                               goto exit_ddb_list;
-
-                       ddb_entry->conn = cls_conn;
-
-                       /* Setup ep, for displaying attributes in sysfs */
-                       ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
-                       if (ep) {
-                               ep->conn = cls_conn;
-                               cls_conn->ep = ep;
-                       } else {
-                               DEBUG2(ql4_printk(KERN_ERR, ha,
-                                                 "Unable to get ep\n"));
-                       }
-
-                       /* Update sess/conn params */
-                       qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
-                                                cls_conn);
-
-                       if (is_reset == RESET_ADAPTER) {
-                               iscsi_block_session(cls_sess);
-                               /* Use the relogin path to discover new devices
-                                *  by short-circuting the logic of setting
-                                *  timer to relogin - instead set the flags
-                                *  to initiate login right away.
-                                */
-                               set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
-                               set_bit(DF_RELOGIN, &ddb_entry->flags);
+                       if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
+                                       fw_ddb_entry) == QLA_SUCCESS) {
+                               vfree(nt_ddb_idx);
+                               goto continue_next_nt;
                        }
+                       list_add_tail(&nt_ddb_idx->list, list_nt);
+               } else if (is_reset == RESET_ADAPTER) {
+                       if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
+                                                               QLA_SUCCESS)
+                               goto continue_next_nt;
                }
+
+               ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
+               if (ret == QLA_ERROR)
+                       goto exit_nt_list;
+
 continue_next_nt:
                if (next_idx == 0)
                        break;
        }
-exit_ddb_list:
-       qla4xxx_free_nt_list(&list_nt);
+
+exit_nt_list:
        if (fw_ddb_entry)
                dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_build_ddb_list - Build ddb list and setup sessions
+ * @ha: pointer to adapter structure
+ * @is_reset: Is this init path or reset path
+ *
+ * Create a list of sendtargets (st) from firmware DDBs, issue send targets
+ * using connection open, then create the list of normal targets (nt)
+ * from firmware DDBs. Based on the list of nt setup session and connection
+ * objects.
+ **/
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+       uint16_t tmo = 0;
+       struct list_head list_st, list_nt;
+       struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
+       unsigned long wtime;
+
+       if (!test_bit(AF_LINK_UP, &ha->flags)) {
+               set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+               ha->is_reset = is_reset;
+               return;
+       }
+
+       INIT_LIST_HEAD(&list_st);
+       INIT_LIST_HEAD(&list_nt);
+
+       qla4xxx_build_st_list(ha, &list_st);
+
+       /* Before issuing conn open mbox, ensure all IPs states are configured
+        * Note, conn open fails if IPs are not configured
+        */
+       qla4xxx_wait_for_ip_configuration(ha);
+
+       /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+               qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+       }
+
+       /* Wait to ensure all sendtargets are done for min 12 sec wait */
+       tmo = ((ha->def_timeout > LOGIN_TOV) &&
+              (ha->def_timeout < LOGIN_TOV * 10) ?
+              ha->def_timeout : LOGIN_TOV);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Default time to wait for build ddb %d\n", tmo));
+
+       wtime = jiffies + (HZ * tmo);
+       do {
+               if (list_empty(&list_st))
+                       break;
+
+               qla4xxx_remove_failed_ddb(ha, &list_st);
+               schedule_timeout_uninterruptible(HZ / 10);
+       } while (time_after(wtime, jiffies));
+
+       /* Free up the sendtargets list */
+       qla4xxx_free_ddb_list(&list_st);
+
+       qla4xxx_build_nt_list(ha, &list_nt, is_reset);
+
+       qla4xxx_free_ddb_list(&list_nt);
 
        qla4xxx_free_ddb_index(ha);
 }
 
-
 /**
  * qla4xxx_probe_adapter - callback function to probe HBA
  * @pdev: pointer to pci_dev structure
index 26a3fa34a33c0594c3dfd4fa78f73c75db0fb28c..133989b3a9f4da0d7deed83682c6924c3a654330 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k10"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k12"
index f85cfa6c47b5212982bcbfcd9065936c27360f3d..b2c95dbe9d651d3a82b9e7a269ec3b5bd7e7c167 100644 (file)
@@ -1316,15 +1316,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
        }
 
        if (scsi_target_is_busy(starget)) {
-               if (list_empty(&sdev->starved_entry))
-                       list_add_tail(&sdev->starved_entry,
-                                     &shost->starved_list);
+               list_move_tail(&sdev->starved_entry, &shost->starved_list);
                return 0;
        }
 
-       /* We're OK to process the command, so we can't be starved */
-       if (!list_empty(&sdev->starved_entry))
-               list_del_init(&sdev->starved_entry);
        return 1;
 }
 
index 1b214910b71414804f6d0c6dee0932ead1f98156..f59d4a05ecd74168910790d2051673508d3a40e4 100644 (file)
@@ -3048,7 +3048,8 @@ fc_remote_port_rolechg(struct fc_rport  *rport, u32 roles)
 
                spin_lock_irqsave(shost->host_lock, flags);
                rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
-                                 FC_RPORT_DEVLOSS_PENDING);
+                                 FC_RPORT_DEVLOSS_PENDING |
+                                 FC_RPORT_DEVLOSS_CALLBK_DONE);
                spin_unlock_irqrestore(shost->host_lock, flags);
 
                /* ensure any stgt delete functions are done */
index 02d99982a74d9048edfb73359ed2e38d08b163f8..eacd46bb36b95fe2b5caf6e82f3940427a250910 100644 (file)
@@ -2368,16 +2368,15 @@ static ssize_t
 sg_proc_write_adio(struct file *filp, const char __user *buffer,
                   size_t count, loff_t *off)
 {
-       int num;
-       char buff[11];
+       int err;
+       unsigned long num;
 
        if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
                return -EACCES;
-       num = (count < 10) ? count : 10;
-       if (copy_from_user(buff, buffer, num))
-               return -EFAULT;
-       buff[num] = '\0';
-       sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
+       err = kstrtoul_from_user(buffer, count, 0, &num);
+       if (err)
+               return err;
+       sg_allow_dio = num ? 1 : 0;
        return count;
 }
 
@@ -2390,17 +2389,15 @@ static ssize_t
 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
                     size_t count, loff_t *off)
 {
-       int num;
+       int err;
        unsigned long k = ULONG_MAX;
-       char buff[11];
 
        if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
                return -EACCES;
-       num = (count < 10) ? count : 10;
-       if (copy_from_user(buff, buffer, num))
-               return -EFAULT;
-       buff[num] = '\0';
-       k = simple_strtoul(buff, NULL, 10);
+
+       err = kstrtoul_from_user(buffer, count, 0, &k);
+       if (err)
+               return err;
        if (k <= 1048576) {     /* limit "big buff" to 1 MB */
                sg_big_buff = k;
                return count;
index b4543f575f466fc3c7fed5f3edad05be9595d0cd..36d1ed7817ebf9d52631c970023d31b31ff494e0 100644 (file)
@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
        struct sym_lcb *lp = sym_lp(tp, sdev->lun);
        unsigned long flags;
 
+       /* if slave_alloc returned before allocating a sym_lcb, return */
+       if (!lp)
+               return;
+
        spin_lock_irqsave(np->s.host->host_lock, flags);
 
        if (lp->busy_itlq || lp->busy_itl) {
index e743a45ee92c265bd8a2e13ecec9f98cf3eb926c..8418eb03665121db4a6caf809ea8efc4f12b170c 100644 (file)
@@ -131,7 +131,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
        rxchan = dws->rxchan;
 
        /* 2. Prepare the TX dma transfer */
-       txconf.direction = DMA_TO_DEVICE;
+       txconf.direction = DMA_MEM_TO_DEV;
        txconf.dst_addr = dws->dma_addr;
        txconf.dst_maxburst = LNW_DMA_MSIZE_16;
        txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -147,13 +147,13 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
        txdesc = txchan->device->device_prep_slave_sg(txchan,
                                &dws->tx_sgl,
                                1,
-                               DMA_TO_DEVICE,
+                               DMA_MEM_TO_DEV,
                                DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
        txdesc->callback = dw_spi_dma_done;
        txdesc->callback_param = dws;
 
        /* 3. Prepare the RX dma transfer */
-       rxconf.direction = DMA_FROM_DEVICE;
+       rxconf.direction = DMA_DEV_TO_MEM;
        rxconf.src_addr = dws->dma_addr;
        rxconf.src_maxburst = LNW_DMA_MSIZE_16;
        rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -169,7 +169,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
        rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
                                &dws->rx_sgl,
                                1,
-                               DMA_FROM_DEVICE,
+                               DMA_DEV_TO_MEM,
                                DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
        rxdesc->callback = dw_spi_dma_done;
        rxdesc->callback_param = dws;
index 0a282e5fcc9c3fe4931a871acd6d89708162bd0d..d46e55c720b7f71717d5772b034c5b7f3e159dc4 100644 (file)
@@ -551,6 +551,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
        struct dma_async_tx_descriptor *txd;
        enum dma_slave_buswidth buswidth;
        struct dma_slave_config conf;
+       enum dma_transfer_direction slave_dirn;
        struct scatterlist *sg;
        struct sg_table *sgt;
        struct dma_chan *chan;
@@ -573,6 +574,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
 
                conf.src_addr = espi->sspdr_phys;
                conf.src_addr_width = buswidth;
+               slave_dirn = DMA_DEV_TO_MEM;
        } else {
                chan = espi->dma_tx;
                buf = t->tx_buf;
@@ -580,6 +582,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
 
                conf.dst_addr = espi->sspdr_phys;
                conf.dst_addr_width = buswidth;
+               slave_dirn = DMA_MEM_TO_DEV;
        }
 
        ret = dmaengine_slave_config(chan, &conf);
@@ -631,7 +634,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
                return ERR_PTR(-ENOMEM);
 
        txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
-                                                dir, DMA_CTRL_ACK);
+                                                slave_dirn, DMA_CTRL_ACK);
        if (!txd) {
                dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
                return ERR_PTR(-ENOMEM);
@@ -979,7 +982,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
        dma_cap_set(DMA_SLAVE, mask);
 
        espi->dma_rx_data.port = EP93XX_DMA_SSP;
-       espi->dma_rx_data.direction = DMA_FROM_DEVICE;
+       espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
        espi->dma_rx_data.name = "ep93xx-spi-rx";
 
        espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
@@ -990,7 +993,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
        }
 
        espi->dma_tx_data.port = EP93XX_DMA_SSP;
-       espi->dma_tx_data.direction = DMA_TO_DEVICE;
+       espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
        espi->dma_tx_data.name = "ep93xx-spi-tx";
 
        espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
index f1f5efbc3404aefc4aa68dba8ded5a4c72275bdc..2f9cb43a239870b6db396b8507fb6a67893f1f37 100644 (file)
@@ -900,11 +900,11 @@ static int configure_dma(struct pl022 *pl022)
 {
        struct dma_slave_config rx_conf = {
                .src_addr = SSP_DR(pl022->phybase),
-               .direction = DMA_FROM_DEVICE,
+               .direction = DMA_DEV_TO_MEM,
        };
        struct dma_slave_config tx_conf = {
                .dst_addr = SSP_DR(pl022->phybase),
-               .direction = DMA_TO_DEVICE,
+               .direction = DMA_MEM_TO_DEV,
        };
        unsigned int pages;
        int ret;
@@ -1041,7 +1041,7 @@ static int configure_dma(struct pl022 *pl022)
        rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
                                      pl022->sgt_rx.sgl,
                                      rx_sglen,
-                                     DMA_FROM_DEVICE,
+                                     DMA_DEV_TO_MEM,
                                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!rxdesc)
                goto err_rxdesc;
@@ -1049,7 +1049,7 @@ static int configure_dma(struct pl022 *pl022)
        txdesc = txchan->device->device_prep_slave_sg(txchan,
                                      pl022->sgt_tx.sgl,
                                      tx_sglen,
-                                     DMA_TO_DEVICE,
+                                     DMA_MEM_TO_DEV,
                                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!txdesc)
                goto err_txdesc;
index 7086583b910708e80cf1b133187eb9be236ee4ff..2a6429d8c363e52b73430b9943716e273dc31438 100644 (file)
@@ -1079,7 +1079,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
        }
        sg = dma->sg_rx_p;
        desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
-                                       num, DMA_FROM_DEVICE,
+                                       num, DMA_DEV_TO_MEM,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc_rx) {
                dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
@@ -1124,7 +1124,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
        }
        sg = dma->sg_tx_p;
        desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
-                                       sg, num, DMA_TO_DEVICE,
+                                       sg, num, DMA_MEM_TO_DEV,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc_tx) {
                dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
index 6958594f2fc09a4c35eb65ce6037f937b323251e..9ae024025ff356618221717d3fae6b5d296e32db 100644 (file)
@@ -268,7 +268,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
        struct dma_slave_config tx_conf = {
                .dst_addr = uap->port.mapbase + UART01x_DR,
                .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
-               .direction = DMA_TO_DEVICE,
+               .direction = DMA_MEM_TO_DEV,
                .dst_maxburst = uap->fifosize >> 1,
        };
        struct dma_chan *chan;
@@ -301,7 +301,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
                struct dma_slave_config rx_conf = {
                        .src_addr = uap->port.mapbase + UART01x_DR,
                        .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
-                       .direction = DMA_FROM_DEVICE,
+                       .direction = DMA_DEV_TO_MEM,
                        .src_maxburst = uap->fifosize >> 1,
                };
 
@@ -480,7 +480,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
                return -EBUSY;
        }
 
-       desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_TO_DEVICE,
+       desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
                                             DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
@@ -676,7 +676,7 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
                &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
        dma_dev = rxchan->device;
        desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
-                                       DMA_FROM_DEVICE,
+                                       DMA_DEV_TO_MEM,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        /*
         * If the DMA engine is busy and cannot prepare a
index de0f613ed6f56051df309abc3ad25ea804081006..17ae65762d1a465f83ae92d9e64128419c2e077c 100644 (file)
@@ -764,7 +764,7 @@ static int dma_handle_rx(struct eg20t_port *priv)
        sg_dma_address(sg) = priv->rx_buf_dma;
 
        desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx,
-                       sg, 1, DMA_FROM_DEVICE,
+                       sg, 1, DMA_DEV_TO_MEM,
                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
        if (!desc)
@@ -923,7 +923,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
        }
 
        desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx,
-                                       priv->sg_tx_p, nent, DMA_TO_DEVICE,
+                                       priv->sg_tx_p, nent, DMA_MEM_TO_DEV,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n",
index 9e62349b3d9f22cfeb59d2fcea49da4b068aac34..75085795528edd172568dedd2a82f5b160246029 100644 (file)
@@ -1339,7 +1339,7 @@ static void sci_submit_rx(struct sci_port *s)
                struct dma_async_tx_descriptor *desc;
 
                desc = chan->device->device_prep_slave_sg(chan,
-                       sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
+                       sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
 
                if (desc) {
                        s->desc_rx[i] = desc;
@@ -1454,7 +1454,7 @@ static void work_fn_tx(struct work_struct *work)
        BUG_ON(!sg_dma_len(sg));
 
        desc = chan->device->device_prep_slave_sg(chan,
-                       sg, s->sg_len_tx, DMA_TO_DEVICE,
+                       sg, s->sg_len_tx, DMA_MEM_TO_DEV,
                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                /* switch to PIO */
index 32793ce3d9e9dd4a32588204d5a59680046374e6..9c2cc4633894b152047ed587b2cf9494d5fdc684 100644 (file)
@@ -183,7 +183,7 @@ static int __devinit ehci_hcd_xilinx_of_probe(struct platform_device *op)
        }
 
        irq = irq_of_parse_and_map(dn, 0);
-       if (irq == NO_IRQ) {
+       if (!irq) {
                printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
                rv = -EBUSY;
                goto err_irq;
index a163632877afa5a0a68fdbeb297c721ef2308273..97cb45916c4351110e3a21fb2613c2954855ee9b 100644 (file)
@@ -84,7 +84,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
        struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
        struct dma_chan *dma_chan = ux500_channel->dma_chan;
        struct dma_async_tx_descriptor *dma_desc;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        struct scatterlist sg;
        struct dma_slave_config slave_conf;
        enum dma_slave_buswidth addr_width;
@@ -104,7 +104,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
        sg_dma_address(&sg) = dma_addr;
        sg_dma_len(&sg) = len;
 
-       direction = ux500_channel->is_tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+       direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
        addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
                                        DMA_SLAVE_BUSWIDTH_4_BYTES;
 
index b51fcd80d244b57ca75a540276ff45b1c1c3c8c0..72339bd6fcab862565ee222aa4ead450f1d2945d 100644 (file)
@@ -772,10 +772,10 @@ static void usbhsf_dma_prepare_tasklet(unsigned long data)
        struct dma_async_tx_descriptor *desc;
        struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
        struct device *dev = usbhs_priv_to_dev(priv);
-       enum dma_data_direction dir;
+       enum dma_transfer_direction dir;
        dma_cookie_t cookie;
 
-       dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+       dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
 
        sg_init_table(&sg, 1);
        sg_set_page(&sg, virt_to_page(pkt->dma),
index 882a51fe7b3c8cd000dddc5f6b2214536f7b7636..9dab1f51dd43b3bac6e275eac77f995fb245d48d 100644 (file)
@@ -856,9 +856,9 @@ static const struct file_operations vhost_net_fops = {
 };
 
 static struct miscdevice vhost_net_misc = {
-       MISC_DYNAMIC_MINOR,
-       "vhost-net",
-       &vhost_net_fops,
+       .minor = VHOST_NET_MINOR,
+       .name = "vhost-net",
+       .fops = &vhost_net_fops,
 };
 
 static int vhost_net_init(void)
@@ -879,3 +879,5 @@ MODULE_VERSION("0.0.1");
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Michael S. Tsirkin");
 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
+MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
+MODULE_ALIAS("devname:vhost-net");
index e3406ab313059fcde53187f7bd9ea3f6352aff11..727a5149d81806be71c7b902be7d4a20b3309a2e 100644 (file)
@@ -245,6 +245,7 @@ struct mx3fb_data {
 
        uint32_t                h_start_width;
        uint32_t                v_start_width;
+       enum disp_data_mapping  disp_data_fmt;
 };
 
 struct dma_chan_request {
@@ -287,11 +288,14 @@ static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long r
        __raw_writel(value, mx3fb->reg_base + reg);
 }
 
-static const uint32_t di_mappings[] = {
-       0x1600AAAA, 0x00E05555, 0x00070000, 3,  /* RGB888 */
-       0x0005000F, 0x000B000F, 0x0011000F, 1,  /* RGB666 */
-       0x0011000F, 0x000B000F, 0x0005000F, 1,  /* BGR666 */
-       0x0004003F, 0x000A000F, 0x000F003F, 1   /* RGB565 */
+struct di_mapping {
+       uint32_t b0, b1, b2;
+};
+
+static const struct di_mapping di_mappings[] = {
+       [IPU_DISP_DATA_MAPPING_RGB666] = { 0x0005000f, 0x000b000f, 0x0011000f },
+       [IPU_DISP_DATA_MAPPING_RGB565] = { 0x0004003f, 0x000a000f, 0x000f003f },
+       [IPU_DISP_DATA_MAPPING_RGB888] = { 0x00070000, 0x000f0000, 0x00170000 },
 };
 
 static void sdc_fb_init(struct mx3fb_info *fbi)
@@ -334,7 +338,7 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
        /* This enables the channel */
        if (mx3_fbi->cookie < 0) {
                mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan,
-                     &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
+                     &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
                if (!mx3_fbi->txd) {
                        dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
                                dma_chan->chan_id);
@@ -425,7 +429,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
  * @pixel_clk:         desired pixel clock frequency in Hz.
  * @width:             width of panel in pixels.
  * @height:            height of panel in pixels.
- * @pixel_fmt:         pixel format of buffer as FOURCC ASCII code.
  * @h_start_width:     number of pixel clocks between the HSYNC signal pulse
  *                     and the start of valid data.
  * @h_sync_width:      width of the HSYNC signal in units of pixel clocks.
@@ -442,7 +445,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
 static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
                          uint32_t pixel_clk,
                          uint16_t width, uint16_t height,
-                         enum pixel_fmt pixel_fmt,
                          uint16_t h_start_width, uint16_t h_sync_width,
                          uint16_t h_end_width, uint16_t v_start_width,
                          uint16_t v_sync_width, uint16_t v_end_width,
@@ -453,6 +455,7 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
        uint32_t old_conf;
        uint32_t div;
        struct clk *ipu_clk;
+       const struct di_mapping *map;
 
        dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height);
 
@@ -540,36 +543,10 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
                sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
        mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
 
-       switch (pixel_fmt) {
-       case IPU_PIX_FMT_RGB24:
-               mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP);
-               mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
-                            ((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC);
-               break;
-       case IPU_PIX_FMT_RGB666:
-               mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP);
-               mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
-                            ((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC);
-               break;
-       case IPU_PIX_FMT_BGR666:
-               mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP);
-               mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
-                            ((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC);
-               break;
-       default:
-               mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP);
-               mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
-                            ((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC);
-               break;
-       }
+       map = &di_mappings[mx3fb->disp_data_fmt];
+       mx3fb_write_reg(mx3fb, map->b0, DI_DISP3_B0_MAP);
+       mx3fb_write_reg(mx3fb, map->b1, DI_DISP3_B1_MAP);
+       mx3fb_write_reg(mx3fb, map->b2, DI_DISP3_B2_MAP);
 
        spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
 
@@ -780,8 +757,6 @@ static int __set_par(struct fb_info *fbi, bool lock)
                if (sdc_init_panel(mx3fb, mode,
                                   (PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
                                   fbi->var.xres, fbi->var.yres,
-                                  (fbi->var.sync & FB_SYNC_SWAP_RGB) ?
-                                  IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666,
                                   fbi->var.left_margin,
                                   fbi->var.hsync_len,
                                   fbi->var.right_margin +
@@ -1117,7 +1092,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
                async_tx_ack(mx3_fbi->txd);
 
        txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg +
-               mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
+               mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
        if (!txd) {
                dev_err(fbi->device,
                        "Error preparing a DMA transaction descriptor.\n");
@@ -1349,6 +1324,12 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
        const struct fb_videomode *mode;
        int ret, num_modes;
 
+       if (mx3fb_pdata->disp_data_fmt >= ARRAY_SIZE(di_mappings)) {
+               dev_err(dev, "Illegal display data format %d\n",
+                               mx3fb_pdata->disp_data_fmt);
+               return -EINVAL;
+       }
+
        ichan->client = mx3fb;
        irq = ichan->eof_irq;
 
@@ -1402,6 +1383,8 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
        mx3fbi->mx3fb           = mx3fb;
        mx3fbi->blank           = FB_BLANK_NORMAL;
 
+       mx3fb->disp_data_fmt    = mx3fb_pdata->disp_data_fmt;
+
        init_completion(&mx3fbi->flip_cmpl);
        disable_irq(ichan->eof_irq);
        dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq);
index ba6eda4b51433e20e87f4ae977cdd565f7d3c300..18c1bb6ffce34dec32c4ba3ec3e6a96459fc771b 100644 (file)
@@ -11,3 +11,4 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
        return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
                ((mfn1 == mfn2) || ((mfn1+1) == mfn2));
 }
+EXPORT_SYMBOL(xen_biovec_phys_mergeable);
index 3832e303c33aca5ceabf0bb1540e02a28b7765bc..596e6a7b17d68bc3ebf424cc11344da1cc32f1e5 100644 (file)
@@ -221,7 +221,7 @@ static int register_balloon(struct device *dev)
 {
        int i, error;
 
-       error = bus_register(&balloon_subsys);
+       error = subsys_system_register(&balloon_subsys, NULL);
        if (error)
                return error;
 
index 5f43bfba3c7a76906244b47fce383ab310c08c5b..0d15a3d113a2c77bb119913f6fa2229a7c0605fa 100644 (file)
@@ -82,7 +82,6 @@ fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \
 fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
 fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
                                      advansys/3550.bin advansys/38C0800.bin
-fw-shipped-$(CONFIG_SCSI_ISCI) += isci/isci_firmware.bin
 fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \
                                         qlogic/12160.bin
 fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin
diff --git a/firmware/isci/isci_firmware.bin.ihex b/firmware/isci/isci_firmware.bin.ihex
deleted file mode 100644 (file)
index 2e66195..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-:10000000495343554F454D42E80018100002000087
-:1000100000000000000000000101000000000000DE
-:10002000FFFFCF5F0100000008DD0B0000FC0F00A8
-:10003000097C0B006EFC0A00FFFFCF5F010000008F
-:1000400008DD0B0000FC0F00097C0B006EFC0A00B1
-:10005000FFFFCF5F0100000008DD0B0000FC0F0078
-:10006000097C0B006EFC0A00FFFFCF5F010000005F
-:1000700008DD0B0000FC0F00097C0B006EFC0A0081
-:100080000101000000000000FFFFCF5F0200000040
-:1000900008DD0B0000FC0F00097C0B006EFC0A0061
-:1000A000FFFFCF5F0200000008DD0B0000FC0F0027
-:1000B000097C0B006EFC0A00FFFFCF5F020000000E
-:1000C00008DD0B0000FC0F00097C0B006EFC0A0031
-:1000D000FFFFCF5F0200000008DD0B0000FC0F00F7
-:0800E000097C0B006EFC0A0014
-:00000001FF
index ecb9fd3be1433838911f4c947627436d816cb136..d33f01c08b60b329247fe4d8729a387af7c263e4 100644 (file)
@@ -31,3 +31,22 @@ config BTRFS_FS_POSIX_ACL
          Linux website <http://acl.bestbits.at/>.
 
          If you don't know what Access Control Lists are, say N
+
+config BTRFS_FS_CHECK_INTEGRITY
+       bool "Btrfs with integrity check tool compiled in (DANGEROUS)"
+       depends on BTRFS_FS
+       help
+         Adds code that examines all block write requests (including
+         writes of the super block). The goal is to verify that the
+         state of the filesystem on disk is always consistent, i.e.,
+         after a power-loss or kernel panic event the filesystem is
+         in a consistent state.
+
+         If the integrity check tool is included and activated in
+         the mount options, plenty of kernel memory is used, and
+         plenty of additional CPU cycles are spent. Enabling this
+         functionality is not intended for normal use.
+
+         In most cases, unless you are a btrfs developer who needs
+         to verify the integrity of (super)-block write requests
+         during the run of a regression test, say N
index c0ddfd29c5e5a348464d5c3d15a77a7708fd8d79..0c4fa2befae793f1a6845322d7ba71aaa5da4374 100644 (file)
@@ -8,6 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
           extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
           export.o tree-log.o free-space-cache.o zlib.o lzo.o \
           compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
-          reada.o backref.o
+          reada.o backref.o ulist.o
 
 btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
+btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
index 22c64fff1bd524b213ce8b13a233f861680d8424..b9a843226de859c34c8e17f9172aeab0ff0f3be1 100644 (file)
 #include "ctree.h"
 #include "disk-io.h"
 #include "backref.h"
+#include "ulist.h"
+#include "transaction.h"
+#include "delayed-ref.h"
 
-struct __data_ref {
+/*
+ * this structure records all encountered refs on the way up to the root
+ */
+struct __prelim_ref {
        struct list_head list;
-       u64 inum;
-       u64 root;
-       u64 extent_data_item_offset;
+       u64 root_id;
+       struct btrfs_key key;
+       int level;
+       int count;
+       u64 parent;
+       u64 wanted_disk_byte;
 };
 
-struct __shared_ref {
-       struct list_head list;
+static int __add_prelim_ref(struct list_head *head, u64 root_id,
+                           struct btrfs_key *key, int level, u64 parent,
+                           u64 wanted_disk_byte, int count)
+{
+       struct __prelim_ref *ref;
+
+       /* in case we're adding delayed refs, we're holding the refs spinlock */
+       ref = kmalloc(sizeof(*ref), GFP_ATOMIC);
+       if (!ref)
+               return -ENOMEM;
+
+       ref->root_id = root_id;
+       if (key)
+               ref->key = *key;
+       else
+               memset(&ref->key, 0, sizeof(ref->key));
+
+       ref->level = level;
+       ref->count = count;
+       ref->parent = parent;
+       ref->wanted_disk_byte = wanted_disk_byte;
+       list_add_tail(&ref->list, head);
+
+       return 0;
+}
+
+static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
+                               struct ulist *parents,
+                               struct extent_buffer *eb, int level,
+                               u64 wanted_objectid, u64 wanted_disk_byte)
+{
+       int ret;
+       int slot;
+       struct btrfs_file_extent_item *fi;
+       struct btrfs_key key;
        u64 disk_byte;
-};
+
+add_parent:
+       ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
+       if (ret < 0)
+               return ret;
+
+       if (level != 0)
+               return 0;
+
+       /*
+        * if the current leaf is full with EXTENT_DATA items, we must
+        * check the next one if that holds a reference as well.
+        * ref->count cannot be used to skip this check.
+        * repeat this until we don't find any additional EXTENT_DATA items.
+        */
+       while (1) {
+               ret = btrfs_next_leaf(root, path);
+               if (ret < 0)
+                       return ret;
+               if (ret)
+                       return 0;
+
+               eb = path->nodes[0];
+               for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) {
+                       btrfs_item_key_to_cpu(eb, &key, slot);
+                       if (key.objectid != wanted_objectid ||
+                           key.type != BTRFS_EXTENT_DATA_KEY)
+                               return 0;
+                       fi = btrfs_item_ptr(eb, slot,
+                                               struct btrfs_file_extent_item);
+                       disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+                       if (disk_byte == wanted_disk_byte)
+                               goto add_parent;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * resolve an indirect backref in the form (root_id, key, level)
+ * to a logical address
+ */
+static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
+                                       struct __prelim_ref *ref,
+                                       struct ulist *parents)
+{
+       struct btrfs_path *path;
+       struct btrfs_root *root;
+       struct btrfs_key root_key;
+       struct btrfs_key key = {0};
+       struct extent_buffer *eb;
+       int ret = 0;
+       int root_level;
+       int level = ref->level;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       root_key.objectid = ref->root_id;
+       root_key.type = BTRFS_ROOT_ITEM_KEY;
+       root_key.offset = (u64)-1;
+       root = btrfs_read_fs_root_no_name(fs_info, &root_key);
+       if (IS_ERR(root)) {
+               ret = PTR_ERR(root);
+               goto out;
+       }
+
+       rcu_read_lock();
+       root_level = btrfs_header_level(root->node);
+       rcu_read_unlock();
+
+       if (root_level + 1 == level)
+               goto out;
+
+       path->lowest_level = level;
+       ret = btrfs_search_slot(NULL, root, &ref->key, path, 0, 0);
+       pr_debug("search slot in root %llu (level %d, ref count %d) returned "
+                "%d for key (%llu %u %llu)\n",
+                (unsigned long long)ref->root_id, level, ref->count, ret,
+                (unsigned long long)ref->key.objectid, ref->key.type,
+                (unsigned long long)ref->key.offset);
+       if (ret < 0)
+               goto out;
+
+       eb = path->nodes[level];
+       if (!eb) {
+               WARN_ON(1);
+               ret = 1;
+               goto out;
+       }
+
+       if (level == 0) {
+               if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) {
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret)
+                               goto out;
+                       eb = path->nodes[0];
+               }
+
+               btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
+       }
+
+       /* the last two parameters will only be used for level == 0 */
+       ret = add_all_parents(root, path, parents, eb, level, key.objectid,
+                               ref->wanted_disk_byte);
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+/*
+ * resolve all indirect backrefs from the list
+ */
+static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
+                                  struct list_head *head)
+{
+       int err;
+       int ret = 0;
+       struct __prelim_ref *ref;
+       struct __prelim_ref *ref_safe;
+       struct __prelim_ref *new_ref;
+       struct ulist *parents;
+       struct ulist_node *node;
+
+       parents = ulist_alloc(GFP_NOFS);
+       if (!parents)
+               return -ENOMEM;
+
+       /*
+        * _safe allows us to insert directly after the current item without
+        * iterating over the newly inserted items.
+        * we're also allowed to re-assign ref during iteration.
+        */
+       list_for_each_entry_safe(ref, ref_safe, head, list) {
+               if (ref->parent)        /* already direct */
+                       continue;
+               if (ref->count == 0)
+                       continue;
+               err = __resolve_indirect_ref(fs_info, ref, parents);
+               if (err) {
+                       if (ret == 0)
+                               ret = err;
+                       continue;
+               }
+
+               /* we put the first parent into the ref at hand */
+               node = ulist_next(parents, NULL);
+               ref->parent = node ? node->val : 0;
+
+               /* additional parents require new refs being added here */
+               while ((node = ulist_next(parents, node))) {
+                       new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
+                       if (!new_ref) {
+                               ret = -ENOMEM;
+                               break;
+                       }
+                       memcpy(new_ref, ref, sizeof(*ref));
+                       new_ref->parent = node->val;
+                       list_add(&new_ref->list, &ref->list);
+               }
+               ulist_reinit(parents);
+       }
+
+       ulist_free(parents);
+       return ret;
+}
+
+/*
+ * merge two lists of backrefs and adjust counts accordingly
+ *
+ * mode = 1: merge identical keys, if key is set
+ * mode = 2: merge identical parents
+ */
+static int __merge_refs(struct list_head *head, int mode)
+{
+       struct list_head *pos1;
+
+       list_for_each(pos1, head) {
+               struct list_head *n2;
+               struct list_head *pos2;
+               struct __prelim_ref *ref1;
+
+               ref1 = list_entry(pos1, struct __prelim_ref, list);
+
+               if (mode == 1 && ref1->key.type == 0)
+                       continue;
+               for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
+                    pos2 = n2, n2 = pos2->next) {
+                       struct __prelim_ref *ref2;
+
+                       ref2 = list_entry(pos2, struct __prelim_ref, list);
+
+                       if (mode == 1) {
+                               if (memcmp(&ref1->key, &ref2->key,
+                                          sizeof(ref1->key)) ||
+                                   ref1->level != ref2->level ||
+                                   ref1->root_id != ref2->root_id)
+                                       continue;
+                               ref1->count += ref2->count;
+                       } else {
+                               if (ref1->parent != ref2->parent)
+                                       continue;
+                               ref1->count += ref2->count;
+                       }
+                       list_del(&ref2->list);
+                       kfree(ref2);
+               }
+
+       }
+       return 0;
+}
+
+/*
+ * add all currently queued delayed refs from this head whose seq nr is
+ * smaller or equal that seq to the list
+ */
+static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
+                             struct btrfs_key *info_key,
+                             struct list_head *prefs)
+{
+       struct btrfs_delayed_extent_op *extent_op = head->extent_op;
+       struct rb_node *n = &head->node.rb_node;
+       int sgn;
+       int ret;
+
+       if (extent_op && extent_op->update_key)
+               btrfs_disk_key_to_cpu(info_key, &extent_op->key);
+
+       while ((n = rb_prev(n))) {
+               struct btrfs_delayed_ref_node *node;
+               node = rb_entry(n, struct btrfs_delayed_ref_node,
+                               rb_node);
+               if (node->bytenr != head->node.bytenr)
+                       break;
+               WARN_ON(node->is_head);
+
+               if (node->seq > seq)
+                       continue;
+
+               switch (node->action) {
+               case BTRFS_ADD_DELAYED_EXTENT:
+               case BTRFS_UPDATE_DELAYED_HEAD:
+                       WARN_ON(1);
+                       continue;
+               case BTRFS_ADD_DELAYED_REF:
+                       sgn = 1;
+                       break;
+               case BTRFS_DROP_DELAYED_REF:
+                       sgn = -1;
+                       break;
+               default:
+                       BUG_ON(1);
+               }
+               switch (node->type) {
+               case BTRFS_TREE_BLOCK_REF_KEY: {
+                       struct btrfs_delayed_tree_ref *ref;
+
+                       ref = btrfs_delayed_node_to_tree_ref(node);
+                       ret = __add_prelim_ref(prefs, ref->root, info_key,
+                                              ref->level + 1, 0, node->bytenr,
+                                              node->ref_mod * sgn);
+                       break;
+               }
+               case BTRFS_SHARED_BLOCK_REF_KEY: {
+                       struct btrfs_delayed_tree_ref *ref;
+
+                       ref = btrfs_delayed_node_to_tree_ref(node);
+                       ret = __add_prelim_ref(prefs, ref->root, info_key,
+                                              ref->level + 1, ref->parent,
+                                              node->bytenr,
+                                              node->ref_mod * sgn);
+                       break;
+               }
+               case BTRFS_EXTENT_DATA_REF_KEY: {
+                       struct btrfs_delayed_data_ref *ref;
+                       struct btrfs_key key;
+
+                       ref = btrfs_delayed_node_to_data_ref(node);
+
+                       key.objectid = ref->objectid;
+                       key.type = BTRFS_EXTENT_DATA_KEY;
+                       key.offset = ref->offset;
+                       ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
+                                              node->bytenr,
+                                              node->ref_mod * sgn);
+                       break;
+               }
+               case BTRFS_SHARED_DATA_REF_KEY: {
+                       struct btrfs_delayed_data_ref *ref;
+                       struct btrfs_key key;
+
+                       ref = btrfs_delayed_node_to_data_ref(node);
+
+                       key.objectid = ref->objectid;
+                       key.type = BTRFS_EXTENT_DATA_KEY;
+                       key.offset = ref->offset;
+                       ret = __add_prelim_ref(prefs, ref->root, &key, 0,
+                                              ref->parent, node->bytenr,
+                                              node->ref_mod * sgn);
+                       break;
+               }
+               default:
+                       WARN_ON(1);
+               }
+               BUG_ON(ret);
+       }
+
+       return 0;
+}
+
+/*
+ * add all inline backrefs for bytenr to the list
+ */
+static int __add_inline_refs(struct btrfs_fs_info *fs_info,
+                            struct btrfs_path *path, u64 bytenr,
+                            struct btrfs_key *info_key, int *info_level,
+                            struct list_head *prefs)
+{
+       int ret;
+       int slot;
+       struct extent_buffer *leaf;
+       struct btrfs_key key;
+       unsigned long ptr;
+       unsigned long end;
+       struct btrfs_extent_item *ei;
+       u64 flags;
+       u64 item_size;
+
+       /*
+        * enumerate all inline refs
+        */
+       leaf = path->nodes[0];
+       slot = path->slots[0] - 1;
+
+       item_size = btrfs_item_size_nr(leaf, slot);
+       BUG_ON(item_size < sizeof(*ei));
+
+       ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
+       flags = btrfs_extent_flags(leaf, ei);
+
+       ptr = (unsigned long)(ei + 1);
+       end = (unsigned long)ei + item_size;
+
+       if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+               struct btrfs_tree_block_info *info;
+               struct btrfs_disk_key disk_key;
+
+               info = (struct btrfs_tree_block_info *)ptr;
+               *info_level = btrfs_tree_block_level(leaf, info);
+               btrfs_tree_block_key(leaf, info, &disk_key);
+               btrfs_disk_key_to_cpu(info_key, &disk_key);
+               ptr += sizeof(struct btrfs_tree_block_info);
+               BUG_ON(ptr > end);
+       } else {
+               BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
+       }
+
+       while (ptr < end) {
+               struct btrfs_extent_inline_ref *iref;
+               u64 offset;
+               int type;
+
+               iref = (struct btrfs_extent_inline_ref *)ptr;
+               type = btrfs_extent_inline_ref_type(leaf, iref);
+               offset = btrfs_extent_inline_ref_offset(leaf, iref);
+
+               switch (type) {
+               case BTRFS_SHARED_BLOCK_REF_KEY:
+                       ret = __add_prelim_ref(prefs, 0, info_key,
+                                               *info_level + 1, offset,
+                                               bytenr, 1);
+                       break;
+               case BTRFS_SHARED_DATA_REF_KEY: {
+                       struct btrfs_shared_data_ref *sdref;
+                       int count;
+
+                       sdref = (struct btrfs_shared_data_ref *)(iref + 1);
+                       count = btrfs_shared_data_ref_count(leaf, sdref);
+                       ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
+                                              bytenr, count);
+                       break;
+               }
+               case BTRFS_TREE_BLOCK_REF_KEY:
+                       ret = __add_prelim_ref(prefs, offset, info_key,
+                                              *info_level + 1, 0, bytenr, 1);
+                       break;
+               case BTRFS_EXTENT_DATA_REF_KEY: {
+                       struct btrfs_extent_data_ref *dref;
+                       int count;
+                       u64 root;
+
+                       dref = (struct btrfs_extent_data_ref *)(&iref->offset);
+                       count = btrfs_extent_data_ref_count(leaf, dref);
+                       key.objectid = btrfs_extent_data_ref_objectid(leaf,
+                                                                     dref);
+                       key.type = BTRFS_EXTENT_DATA_KEY;
+                       key.offset = btrfs_extent_data_ref_offset(leaf, dref);
+                       root = btrfs_extent_data_ref_root(leaf, dref);
+                       ret = __add_prelim_ref(prefs, root, &key, 0, 0, bytenr,
+                                               count);
+                       break;
+               }
+               default:
+                       WARN_ON(1);
+               }
+               BUG_ON(ret);
+               ptr += btrfs_extent_inline_ref_size(type);
+       }
+
+       return 0;
+}
+
+/*
+ * add all non-inline backrefs for bytenr to the list
+ */
+static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
+                           struct btrfs_path *path, u64 bytenr,
+                           struct btrfs_key *info_key, int info_level,
+                           struct list_head *prefs)
+{
+       struct btrfs_root *extent_root = fs_info->extent_root;
+       int ret;
+       int slot;
+       struct extent_buffer *leaf;
+       struct btrfs_key key;
+
+       while (1) {
+               ret = btrfs_next_item(extent_root, path);
+               if (ret < 0)
+                       break;
+               if (ret) {
+                       ret = 0;
+                       break;
+               }
+
+               slot = path->slots[0];
+               leaf = path->nodes[0];
+               btrfs_item_key_to_cpu(leaf, &key, slot);
+
+               if (key.objectid != bytenr)
+                       break;
+               if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
+                       continue;
+               if (key.type > BTRFS_SHARED_DATA_REF_KEY)
+                       break;
+
+               switch (key.type) {
+               case BTRFS_SHARED_BLOCK_REF_KEY:
+                       ret = __add_prelim_ref(prefs, 0, info_key,
+                                               info_level + 1, key.offset,
+                                               bytenr, 1);
+                       break;
+               case BTRFS_SHARED_DATA_REF_KEY: {
+                       struct btrfs_shared_data_ref *sdref;
+                       int count;
+
+                       sdref = btrfs_item_ptr(leaf, slot,
+                                             struct btrfs_shared_data_ref);
+                       count = btrfs_shared_data_ref_count(leaf, sdref);
+                       ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
+                                               bytenr, count);
+                       break;
+               }
+               case BTRFS_TREE_BLOCK_REF_KEY:
+                       ret = __add_prelim_ref(prefs, key.offset, info_key,
+                                               info_level + 1, 0, bytenr, 1);
+                       break;
+               case BTRFS_EXTENT_DATA_REF_KEY: {
+                       struct btrfs_extent_data_ref *dref;
+                       int count;
+                       u64 root;
+
+                       dref = btrfs_item_ptr(leaf, slot,
+                                             struct btrfs_extent_data_ref);
+                       count = btrfs_extent_data_ref_count(leaf, dref);
+                       key.objectid = btrfs_extent_data_ref_objectid(leaf,
+                                                                     dref);
+                       key.type = BTRFS_EXTENT_DATA_KEY;
+                       key.offset = btrfs_extent_data_ref_offset(leaf, dref);
+                       root = btrfs_extent_data_ref_root(leaf, dref);
+                       ret = __add_prelim_ref(prefs, root, &key, 0, 0,
+                                               bytenr, count);
+                       break;
+               }
+               default:
+                       WARN_ON(1);
+               }
+               BUG_ON(ret);
+       }
+
+       return ret;
+}
+
+/*
+ * this adds all existing backrefs (inline backrefs, backrefs and delayed
+ * refs) for the given bytenr to the refs list, merges duplicates and resolves
+ * indirect refs to their parent bytenr.
+ * When roots are found, they're added to the roots list
+ *
+ * FIXME some caching might speed things up
+ */
+static int find_parent_nodes(struct btrfs_trans_handle *trans,
+                            struct btrfs_fs_info *fs_info, u64 bytenr,
+                            u64 seq, struct ulist *refs, struct ulist *roots)
+{
+       struct btrfs_key key;
+       struct btrfs_path *path;
+       struct btrfs_key info_key = { 0 };
+       struct btrfs_delayed_ref_root *delayed_refs = NULL;
+       struct btrfs_delayed_ref_head *head = NULL;
+       int info_level = 0;
+       int ret;
+       struct list_head prefs_delayed;
+       struct list_head prefs;
+       struct __prelim_ref *ref;
+
+       INIT_LIST_HEAD(&prefs);
+       INIT_LIST_HEAD(&prefs_delayed);
+
+       key.objectid = bytenr;
+       key.type = BTRFS_EXTENT_ITEM_KEY;
+       key.offset = (u64)-1;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       /*
+        * grab both a lock on the path and a lock on the delayed ref head.
+        * We need both to get a consistent picture of how the refs look
+        * at a specified point in time
+        */
+again:
+       ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
+       if (ret < 0)
+               goto out;
+       BUG_ON(ret == 0);
+
+       /*
+        * look if there are updates for this ref queued and lock the head
+        */
+       delayed_refs = &trans->transaction->delayed_refs;
+       spin_lock(&delayed_refs->lock);
+       head = btrfs_find_delayed_ref_head(trans, bytenr);
+       if (head) {
+               if (!mutex_trylock(&head->mutex)) {
+                       atomic_inc(&head->node.refs);
+                       spin_unlock(&delayed_refs->lock);
+
+                       btrfs_release_path(path);
+
+                       /*
+                        * Mutex was contended, block until it's
+                        * released and try again
+                        */
+                       mutex_lock(&head->mutex);
+                       mutex_unlock(&head->mutex);
+                       btrfs_put_delayed_ref(&head->node);
+                       goto again;
+               }
+               ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed);
+               if (ret)
+                       goto out;
+       }
+       spin_unlock(&delayed_refs->lock);
+
+       if (path->slots[0]) {
+               struct extent_buffer *leaf;
+               int slot;
+
+               leaf = path->nodes[0];
+               slot = path->slots[0] - 1;
+               btrfs_item_key_to_cpu(leaf, &key, slot);
+               if (key.objectid == bytenr &&
+                   key.type == BTRFS_EXTENT_ITEM_KEY) {
+                       ret = __add_inline_refs(fs_info, path, bytenr,
+                                               &info_key, &info_level, &prefs);
+                       if (ret)
+                               goto out;
+                       ret = __add_keyed_refs(fs_info, path, bytenr, &info_key,
+                                              info_level, &prefs);
+                       if (ret)
+                               goto out;
+               }
+       }
+       btrfs_release_path(path);
+
+       /*
+        * when adding the delayed refs above, the info_key might not have
+        * been known yet. Go over the list and replace the missing keys
+        */
+       list_for_each_entry(ref, &prefs_delayed, list) {
+               if ((ref->key.offset | ref->key.type | ref->key.objectid) == 0)
+                       memcpy(&ref->key, &info_key, sizeof(ref->key));
+       }
+       list_splice_init(&prefs_delayed, &prefs);
+
+       ret = __merge_refs(&prefs, 1);
+       if (ret)
+               goto out;
+
+       ret = __resolve_indirect_refs(fs_info, &prefs);
+       if (ret)
+               goto out;
+
+       ret = __merge_refs(&prefs, 2);
+       if (ret)
+               goto out;
+
+       while (!list_empty(&prefs)) {
+               ref = list_first_entry(&prefs, struct __prelim_ref, list);
+               list_del(&ref->list);
+               if (ref->count < 0)
+                       WARN_ON(1);
+               if (ref->count && ref->root_id && ref->parent == 0) {
+                       /* no parent == root of tree */
+                       ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
+                       BUG_ON(ret < 0);
+               }
+               if (ref->count && ref->parent) {
+                       ret = ulist_add(refs, ref->parent, 0, GFP_NOFS);
+                       BUG_ON(ret < 0);
+               }
+               kfree(ref);
+       }
+
+out:
+       if (head)
+               mutex_unlock(&head->mutex);
+       btrfs_free_path(path);
+       while (!list_empty(&prefs)) {
+               ref = list_first_entry(&prefs, struct __prelim_ref, list);
+               list_del(&ref->list);
+               kfree(ref);
+       }
+       while (!list_empty(&prefs_delayed)) {
+               ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
+                                      list);
+               list_del(&ref->list);
+               kfree(ref);
+       }
+
+       return ret;
+}
+
+/*
+ * Finds all leafs with a reference to the specified combination of bytenr and
+ * offset. key_list_head will point to a list of corresponding keys (caller must
+ * free each list element). The leafs will be stored in the leafs ulist, which
+ * must be freed with ulist_free.
+ *
+ * returns 0 on success, <0 on error
+ */
+static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
+                               struct btrfs_fs_info *fs_info, u64 bytenr,
+                               u64 num_bytes, u64 seq, struct ulist **leafs)
+{
+       struct ulist *tmp;
+       int ret;
+
+       tmp = ulist_alloc(GFP_NOFS);
+       if (!tmp)
+               return -ENOMEM;
+       *leafs = ulist_alloc(GFP_NOFS);
+       if (!*leafs) {
+               ulist_free(tmp);
+               return -ENOMEM;
+       }
+
+       ret = find_parent_nodes(trans, fs_info, bytenr, seq, *leafs, tmp);
+       ulist_free(tmp);
+
+       if (ret < 0 && ret != -ENOENT) {
+               ulist_free(*leafs);
+               return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * walk all backrefs for a given extent to find all roots that reference this
+ * extent. Walking a backref means finding all extents that reference this
+ * extent and in turn walk the backrefs of those, too. Naturally this is a
+ * recursive process, but here it is implemented in an iterative fashion: We
+ * find all referencing extents for the extent in question and put them on a
+ * list. In turn, we find all referencing extents for those, further appending
+ * to the list. The way we iterate the list allows adding more elements after
+ * the current while iterating. The process stops when we reach the end of the
+ * list. Found roots are added to the roots list.
+ *
+ * returns 0 on success, < 0 on error.
+ */
+int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
+                               struct btrfs_fs_info *fs_info, u64 bytenr,
+                               u64 num_bytes, u64 seq, struct ulist **roots)
+{
+       struct ulist *tmp;
+       struct ulist_node *node = NULL;
+       int ret;
+
+       tmp = ulist_alloc(GFP_NOFS);
+       if (!tmp)
+               return -ENOMEM;
+       *roots = ulist_alloc(GFP_NOFS);
+       if (!*roots) {
+               ulist_free(tmp);
+               return -ENOMEM;
+       }
+
+       while (1) {
+               ret = find_parent_nodes(trans, fs_info, bytenr, seq,
+                                       tmp, *roots);
+               if (ret < 0 && ret != -ENOENT) {
+                       ulist_free(tmp);
+                       ulist_free(*roots);
+                       return ret;
+               }
+               node = ulist_next(tmp, node);
+               if (!node)
+                       break;
+               bytenr = node->val;
+       }
+
+       ulist_free(tmp);
+       return 0;
+}
+
 
 static int __inode_info(u64 inum, u64 ioff, u8 key_type,
                        struct btrfs_root *fs_root, struct btrfs_path *path,
@@ -181,8 +952,11 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
        btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
        if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
            found_key->objectid > logical ||
-           found_key->objectid + found_key->offset <= logical)
+           found_key->objectid + found_key->offset <= logical) {
+               pr_debug("logical %llu is not within any extent\n",
+                        (unsigned long long)logical);
                return -ENOENT;
+       }
 
        eb = path->nodes[0];
        item_size = btrfs_item_size_nr(eb, path->slots[0]);
@@ -191,6 +965,13 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
        ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
        flags = btrfs_extent_flags(eb, ei);
 
+       pr_debug("logical %llu is at position %llu within the extent (%llu "
+                "EXTENT_ITEM %llu) flags %#llx size %u\n",
+                (unsigned long long)logical,
+                (unsigned long long)(logical - found_key->objectid),
+                (unsigned long long)found_key->objectid,
+                (unsigned long long)found_key->offset,
+                (unsigned long long)flags, item_size);
        if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
                return BTRFS_EXTENT_FLAG_TREE_BLOCK;
        if (flags & BTRFS_EXTENT_FLAG_DATA)
@@ -287,128 +1068,11 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
        return 0;
 }
 
-static int __data_list_add(struct list_head *head, u64 inum,
-                               u64 extent_data_item_offset, u64 root)
-{
-       struct __data_ref *ref;
-
-       ref = kmalloc(sizeof(*ref), GFP_NOFS);
-       if (!ref)
-               return -ENOMEM;
-
-       ref->inum = inum;
-       ref->extent_data_item_offset = extent_data_item_offset;
-       ref->root = root;
-       list_add_tail(&ref->list, head);
-
-       return 0;
-}
-
-static int __data_list_add_eb(struct list_head *head, struct extent_buffer *eb,
-                               struct btrfs_extent_data_ref *dref)
-{
-       return __data_list_add(head, btrfs_extent_data_ref_objectid(eb, dref),
-                               btrfs_extent_data_ref_offset(eb, dref),
-                               btrfs_extent_data_ref_root(eb, dref));
-}
-
-static int __shared_list_add(struct list_head *head, u64 disk_byte)
-{
-       struct __shared_ref *ref;
-
-       ref = kmalloc(sizeof(*ref), GFP_NOFS);
-       if (!ref)
-               return -ENOMEM;
-
-       ref->disk_byte = disk_byte;
-       list_add_tail(&ref->list, head);
-
-       return 0;
-}
-
-static int __iter_shared_inline_ref_inodes(struct btrfs_fs_info *fs_info,
-                                          u64 logical, u64 inum,
-                                          u64 extent_data_item_offset,
-                                          u64 extent_offset,
-                                          struct btrfs_path *path,
-                                          struct list_head *data_refs,
-                                          iterate_extent_inodes_t *iterate,
-                                          void *ctx)
-{
-       u64 ref_root;
-       u32 item_size;
-       struct btrfs_key key;
-       struct extent_buffer *eb;
-       struct btrfs_extent_item *ei;
-       struct btrfs_extent_inline_ref *eiref;
-       struct __data_ref *ref;
-       int ret;
-       int type;
-       int last;
-       unsigned long ptr = 0;
-
-       WARN_ON(!list_empty(data_refs));
-       ret = extent_from_logical(fs_info, logical, path, &key);
-       if (ret & BTRFS_EXTENT_FLAG_DATA)
-               ret = -EIO;
-       if (ret < 0)
-               goto out;
-
-       eb = path->nodes[0];
-       ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
-       item_size = btrfs_item_size_nr(eb, path->slots[0]);
-
-       ret = 0;
-       ref_root = 0;
-       /*
-        * as done in iterate_extent_inodes, we first build a list of refs to
-        * iterate, then free the path and then iterate them to avoid deadlocks.
-        */
-       do {
-               last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
-                                               &eiref, &type);
-               if (last < 0) {
-                       ret = last;
-                       goto out;
-               }
-               if (type == BTRFS_TREE_BLOCK_REF_KEY ||
-                   type == BTRFS_SHARED_BLOCK_REF_KEY) {
-                       ref_root = btrfs_extent_inline_ref_offset(eb, eiref);
-                       ret = __data_list_add(data_refs, inum,
-                                               extent_data_item_offset,
-                                               ref_root);
-               }
-       } while (!ret && !last);
-
-       btrfs_release_path(path);
-
-       if (ref_root == 0) {
-               printk(KERN_ERR "btrfs: failed to find tree block ref "
-                       "for shared data backref %llu\n", logical);
-               WARN_ON(1);
-               ret = -EIO;
-       }
-
-out:
-       while (!list_empty(data_refs)) {
-               ref = list_first_entry(data_refs, struct __data_ref, list);
-               list_del(&ref->list);
-               if (!ret)
-                       ret = iterate(ref->inum, extent_offset +
-                                       ref->extent_data_item_offset,
-                                       ref->root, ctx);
-               kfree(ref);
-       }
-
-       return ret;
-}
-
-static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info,
-                                   u64 logical, u64 orig_extent_item_objectid,
-                                   u64 extent_offset, struct btrfs_path *path,
-                                   struct list_head *data_refs,
-                                   iterate_extent_inodes_t *iterate,
-                                   void *ctx)
+static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
+                               struct btrfs_path *path, u64 logical,
+                               u64 orig_extent_item_objectid,
+                               u64 extent_item_pos, u64 root,
+                               iterate_extent_inodes_t *iterate, void *ctx)
 {
        u64 disk_byte;
        struct btrfs_key key;
@@ -416,8 +1080,10 @@ static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info,
        struct extent_buffer *eb;
        int slot;
        int nritems;
-       int ret;
-       int found = 0;
+       int ret = 0;
+       int extent_type;
+       u64 data_offset;
+       u64 data_len;
 
        eb = read_tree_block(fs_info->tree_root, logical,
                                fs_info->tree_root->leafsize, 0);
@@ -435,149 +1101,99 @@ static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info,
                if (key.type != BTRFS_EXTENT_DATA_KEY)
                        continue;
                fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
-               if (!fi) {
-                       free_extent_buffer(eb);
-                       return -EIO;
-               }
+               extent_type = btrfs_file_extent_type(eb, fi);
+               if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+                       continue;
+               /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
                disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
-               if (disk_byte != orig_extent_item_objectid) {
-                       if (found)
-                               break;
-                       else
-                               continue;
-               }
-               ++found;
-               ret = __iter_shared_inline_ref_inodes(fs_info, logical,
-                                                       key.objectid,
-                                                       key.offset,
-                                                       extent_offset, path,
-                                                       data_refs,
-                                                       iterate, ctx);
-               if (ret)
-                       break;
-       }
+               if (disk_byte != orig_extent_item_objectid)
+                       continue;
 
-       if (!found) {
-               printk(KERN_ERR "btrfs: failed to follow shared data backref "
-                       "to parent %llu\n", logical);
-               WARN_ON(1);
-               ret = -EIO;
+               data_offset = btrfs_file_extent_offset(eb, fi);
+               data_len = btrfs_file_extent_num_bytes(eb, fi);
+
+               if (extent_item_pos < data_offset ||
+                   extent_item_pos >= data_offset + data_len)
+                       continue;
+
+               pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
+                               "root %llu\n", orig_extent_item_objectid,
+                               key.objectid, key.offset, root);
+               ret = iterate(key.objectid,
+                               key.offset + (extent_item_pos - data_offset),
+                               root, ctx);
+               if (ret) {
+                       pr_debug("stopping iteration because ret=%d\n", ret);
+                       break;
+               }
        }
 
        free_extent_buffer(eb);
+
        return ret;
 }
 
 /*
  * calls iterate() for every inode that references the extent identified by
- * the given parameters. will use the path given as a parameter and return it
- * released.
+ * the given parameters.
  * when the iterator function returns a non-zero value, iteration stops.
+ * path is guaranteed to be in released state when iterate() is called.
  */
 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
                                struct btrfs_path *path,
-                               u64 extent_item_objectid,
-                               u64 extent_offset,
+                               u64 extent_item_objectid, u64 extent_item_pos,
                                iterate_extent_inodes_t *iterate, void *ctx)
 {
-       unsigned long ptr = 0;
-       int last;
        int ret;
-       int type;
-       u64 logical;
-       u32 item_size;
-       struct btrfs_extent_inline_ref *eiref;
-       struct btrfs_extent_data_ref *dref;
-       struct extent_buffer *eb;
-       struct btrfs_extent_item *ei;
-       struct btrfs_key key;
        struct list_head data_refs = LIST_HEAD_INIT(data_refs);
        struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
-       struct __data_ref *ref_d;
-       struct __shared_ref *ref_s;
-
-       eb = path->nodes[0];
-       ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
-       item_size = btrfs_item_size_nr(eb, path->slots[0]);
-
-       /* first we iterate the inline refs, ... */
-       do {
-               last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
-                                               &eiref, &type);
-               if (last == -ENOENT) {
-                       ret = 0;
-                       break;
-               }
-               if (last < 0) {
-                       ret = last;
-                       break;
-               }
+       struct btrfs_trans_handle *trans;
+       struct ulist *refs;
+       struct ulist *roots;
+       struct ulist_node *ref_node = NULL;
+       struct ulist_node *root_node = NULL;
+       struct seq_list seq_elem;
+       struct btrfs_delayed_ref_root *delayed_refs;
+
+       trans = btrfs_join_transaction(fs_info->extent_root);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
+
+       pr_debug("resolving all inodes for extent %llu\n",
+                       extent_item_objectid);
+
+       delayed_refs = &trans->transaction->delayed_refs;
+       spin_lock(&delayed_refs->lock);
+       btrfs_get_delayed_seq(delayed_refs, &seq_elem);
+       spin_unlock(&delayed_refs->lock);
+
+       ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
+                                  extent_item_pos, seq_elem.seq,
+                                  &refs);
 
-               if (type == BTRFS_EXTENT_DATA_REF_KEY) {
-                       dref = (struct btrfs_extent_data_ref *)(&eiref->offset);
-                       ret = __data_list_add_eb(&data_refs, eb, dref);
-               } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
-                       logical = btrfs_extent_inline_ref_offset(eb, eiref);
-                       ret = __shared_list_add(&shared_refs, logical);
-               }
-       } while (!ret && !last);
+       if (ret)
+               goto out;
 
-       /* ... then we proceed to in-tree references and ... */
-       while (!ret) {
-               ++path->slots[0];
-               if (path->slots[0] > btrfs_header_nritems(eb)) {
-                       ret = btrfs_next_leaf(fs_info->extent_root, path);
-                       if (ret) {
-                               if (ret == 1)
-                                       ret = 0; /* we're done */
-                               break;
-                       }
-                       eb = path->nodes[0];
-               }
-               btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
-               if (key.objectid != extent_item_objectid)
+       while (!ret && (ref_node = ulist_next(refs, ref_node))) {
+               ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, -1,
+                                               seq_elem.seq, &roots);
+               if (ret)
                        break;
-               if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
-                       dref = btrfs_item_ptr(eb, path->slots[0],
-                                               struct btrfs_extent_data_ref);
-                       ret = __data_list_add_eb(&data_refs, eb, dref);
-               } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
-                       ret = __shared_list_add(&shared_refs, key.offset);
+               while (!ret && (root_node = ulist_next(roots, root_node))) {
+                       pr_debug("root %llu references leaf %llu\n",
+                                       root_node->val, ref_node->val);
+                       ret = iterate_leaf_refs(fs_info, path, ref_node->val,
+                                               extent_item_objectid,
+                                               extent_item_pos, root_node->val,
+                                               iterate, ctx);
                }
        }
 
-       btrfs_release_path(path);
-
-       /*
-        * ... only at the very end we can process the refs we found. this is
-        * because the iterator function we call is allowed to make tree lookups
-        * and we have to avoid deadlocks. additionally, we need more tree
-        * lookups ourselves for shared data refs.
-        */
-       while (!list_empty(&data_refs)) {
-               ref_d = list_first_entry(&data_refs, struct __data_ref, list);
-               list_del(&ref_d->list);
-               if (!ret)
-                       ret = iterate(ref_d->inum, extent_offset +
-                                       ref_d->extent_data_item_offset,
-                                       ref_d->root, ctx);
-               kfree(ref_d);
-       }
-
-       while (!list_empty(&shared_refs)) {
-               ref_s = list_first_entry(&shared_refs, struct __shared_ref,
-                                       list);
-               list_del(&ref_s->list);
-               if (!ret)
-                       ret = __iter_shared_inline_ref(fs_info,
-                                                       ref_s->disk_byte,
-                                                       extent_item_objectid,
-                                                       extent_offset, path,
-                                                       &data_refs,
-                                                       iterate, ctx);
-               kfree(ref_s);
-       }
-
+       ulist_free(refs);
+       ulist_free(roots);
+out:
+       btrfs_put_delayed_seq(delayed_refs, &seq_elem);
+       btrfs_end_transaction(trans, fs_info->extent_root);
        return ret;
 }
 
@@ -586,19 +1202,20 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
                                iterate_extent_inodes_t *iterate, void *ctx)
 {
        int ret;
-       u64 offset;
+       u64 extent_item_pos;
        struct btrfs_key found_key;
 
        ret = extent_from_logical(fs_info, logical, path,
                                        &found_key);
+       btrfs_release_path(path);
        if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
                ret = -EINVAL;
        if (ret < 0)
                return ret;
 
-       offset = logical - found_key.objectid;
+       extent_item_pos = logical - found_key.objectid;
        ret = iterate_extent_inodes(fs_info, path, found_key.objectid,
-                                       offset, iterate, ctx);
+                                       extent_item_pos, iterate, ctx);
 
        return ret;
 }
@@ -643,6 +1260,10 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
                for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
                        name_len = btrfs_inode_ref_name_len(eb, iref);
                        /* path must be released before calling iterate()! */
+                       pr_debug("following ref at offset %u for inode %llu in "
+                                "tree %llu\n", cur,
+                                (unsigned long long)found_key.objectid,
+                                (unsigned long long)fs_root->objectid);
                        ret = iterate(parent, iref, eb, ctx);
                        if (ret) {
                                free_extent_buffer(eb);
@@ -683,10 +1304,14 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
                return PTR_ERR(fspath);
 
        if (fspath > fspath_min) {
+               pr_debug("path resolved: %s\n", fspath);
                ipath->fspath->val[i] = (u64)(unsigned long)fspath;
                ++ipath->fspath->elem_cnt;
                ipath->fspath->bytes_left = fspath - fspath_min;
        } else {
+               pr_debug("missed path, not enough space. missing bytes: %lu, "
+                        "constructed so far: %s\n",
+                        (unsigned long)(fspath_min - fspath), fspath_min);
                ++ipath->fspath->elem_missed;
                ipath->fspath->bytes_missing += fspath_min - fspath;
                ipath->fspath->bytes_left = 0;
index 92618837cb8f94a3a799ea3d08093d9b33aab336..d00dfa9ca9342c96f5057af09fb06418cd943cb6 100644 (file)
@@ -20,6 +20,7 @@
 #define __BTRFS_BACKREF__
 
 #include "ioctl.h"
+#include "ulist.h"
 
 struct inode_fs_paths {
        struct btrfs_path               *btrfs_path;
@@ -54,6 +55,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
 
 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
 
+int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
+                               struct btrfs_fs_info *fs_info, u64 bytenr,
+                               u64 num_bytes, u64 seq, struct ulist **roots);
+
 struct btrfs_data_container *init_data_container(u32 total_bytes);
 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
                                        struct btrfs_path *path);
index 634608d2a6d03b5d8741e573de0b142ff8cb6310..9b9b15fd5204347c5ef2931fb186af679cb0d369 100644 (file)
@@ -51,6 +51,9 @@ struct btrfs_inode {
        /* held while logging the inode in tree-log.c */
        struct mutex log_mutex;
 
+       /* held while doing delalloc reservations */
+       struct mutex delalloc_mutex;
+
        /* used to order data wrt metadata */
        struct btrfs_ordered_inode_tree ordered_tree;
 
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
new file mode 100644 (file)
index 0000000..ad0b3ba
--- /dev/null
@@ -0,0 +1,3068 @@
+/*
+ * Copyright (C) STRATO AG 2011.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+/*
+ * This module can be used to catch cases when the btrfs kernel
+ * code executes write requests to the disk that bring the file
+ * system in an inconsistent state. In such a state, a power-loss
+ * or kernel panic event would cause that the data on disk is
+ * lost or at least damaged.
+ *
+ * Code is added that examines all block write requests during
+ * runtime (including writes of the super block). Three rules
+ * are verified and an error is printed on violation of the
+ * rules:
+ * 1. It is not allowed to write a disk block which is
+ *    currently referenced by the super block (either directly
+ *    or indirectly).
+ * 2. When a super block is written, it is verified that all
+ *    referenced (directly or indirectly) blocks fulfill the
+ *    following requirements:
+ *    2a. All referenced blocks have either been present when
+ *        the file system was mounted, (i.e., they have been
+ *        referenced by the super block) or they have been
+ *        written since then and the write completion callback
+ *        was called and a FLUSH request to the device where
+ *        these blocks are located was received and completed.
+ *    2b. All referenced blocks need to have a generation
+ *        number which is equal to the parent's number.
+ *
+ * One issue that was found using this module was that the log
+ * tree on disk became temporarily corrupted because disk blocks
+ * that had been in use for the log tree had been freed and
+ * reused too early, while being referenced by the written super
+ * block.
+ *
+ * The search term in the kernel log that can be used to filter
+ * on the existence of detected integrity issues is
+ * "btrfs: attempt".
+ *
+ * The integrity check is enabled via mount options. These
+ * mount options are only supported if the integrity check
+ * tool is compiled by defining BTRFS_FS_CHECK_INTEGRITY.
+ *
+ * Example #1, apply integrity checks to all metadata:
+ * mount /dev/sdb1 /mnt -o check_int
+ *
+ * Example #2, apply integrity checks to all metadata and
+ * to data extents:
+ * mount /dev/sdb1 /mnt -o check_int_data
+ *
+ * Example #3, apply integrity checks to all metadata and dump
+ * the tree that the super block references to kernel messages
+ * each time after a super block was written:
+ * mount /dev/sdb1 /mnt -o check_int,check_int_print_mask=263
+ *
+ * If the integrity check tool is included and activated in
+ * the mount options, plenty of kernel memory is used, and
+ * plenty of additional CPU cycles are spent. Enabling this
+ * functionality is not intended for normal use. In most
+ * cases, unless you are a btrfs developer who needs to verify
+ * the integrity of (super)-block write requests, do not
+ * enable the config option BTRFS_FS_CHECK_INTEGRITY to
+ * include and compile the integrity check tool.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/mutex.h>
+#include <linux/crc32c.h>
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "extent_io.h"
+#include "disk-io.h"
+#include "volumes.h"
+#include "print-tree.h"
+#include "locking.h"
+#include "check-integrity.h"
+
+#define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000
+#define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000
+#define BTRFSIC_DEV2STATE_HASHTABLE_SIZE 0x100
+#define BTRFSIC_BLOCK_MAGIC_NUMBER 0x14491051
+#define BTRFSIC_BLOCK_LINK_MAGIC_NUMBER 0x11070807
+#define BTRFSIC_DEV2STATE_MAGIC_NUMBER 0x20111530
+#define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300
+#define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6)   /* in characters,
+                                                        * excluding " [...]" */
+#define BTRFSIC_BLOCK_SIZE PAGE_SIZE
+
+#define BTRFSIC_GENERATION_UNKNOWN ((u64)-1)
+
+/*
+ * The definition of the bitmask fields for the print_mask.
+ * They are specified with the mount option check_integrity_print_mask.
+ */
+#define BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE                    0x00000001
+#define BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION                0x00000002
+#define BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE                 0x00000004
+#define BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE                        0x00000008
+#define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH                       0x00000010
+#define BTRFSIC_PRINT_MASK_END_IO_BIO_BH                       0x00000020
+#define BTRFSIC_PRINT_MASK_VERBOSE                             0x00000040
+#define BTRFSIC_PRINT_MASK_VERY_VERBOSE                                0x00000080
+#define BTRFSIC_PRINT_MASK_INITIAL_TREE                                0x00000100
+#define BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES                   0x00000200
+#define BTRFSIC_PRINT_MASK_INITIAL_DATABASE                    0x00000400
+#define BTRFSIC_PRINT_MASK_NUM_COPIES                          0x00000800
+#define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS               0x00001000
+
+struct btrfsic_dev_state;
+struct btrfsic_state;
+
+struct btrfsic_block {
+       u32 magic_num;          /* only used for debug purposes */
+       unsigned int is_metadata:1;     /* if it is meta-data, not data-data */
+       unsigned int is_superblock:1;   /* if it is one of the superblocks */
+       unsigned int is_iodone:1;       /* if is done by lower subsystem */
+       unsigned int iodone_w_error:1;  /* error was indicated to endio */
+       unsigned int never_written:1;   /* block was added because it was
+                                        * referenced, not because it was
+                                        * written */
+       unsigned int mirror_num:2;      /* large enough to hold
+                                        * BTRFS_SUPER_MIRROR_MAX */
+       struct btrfsic_dev_state *dev_state;
+       u64 dev_bytenr;         /* key, physical byte num on disk */
+       u64 logical_bytenr;     /* logical byte num on disk */
+       u64 generation;
+       struct btrfs_disk_key disk_key; /* extra info to print in case of
+                                        * issues, will not always be correct */
+       struct list_head collision_resolving_node;      /* list node */
+       struct list_head all_blocks_node;       /* list node */
+
+       /* the following two lists contain block_link items */
+       struct list_head ref_to_list;   /* list */
+       struct list_head ref_from_list; /* list */
+       struct btrfsic_block *next_in_same_bio;
+       void *orig_bio_bh_private;
+       union {
+               bio_end_io_t *bio;
+               bh_end_io_t *bh;
+       } orig_bio_bh_end_io;
+       int submit_bio_bh_rw;
+       u64 flush_gen; /* only valid if !never_written */
+};
+
+/*
+ * Elements of this type are allocated dynamically and required because
+ * each block object can refer to and can be ref from multiple blocks.
+ * The key to lookup them in the hashtable is the dev_bytenr of
+ * the block ref to plus the one from the block refered from.
+ * The fact that they are searchable via a hashtable and that a
+ * ref_cnt is maintained is not required for the btrfs integrity
+ * check algorithm itself, it is only used to make the output more
+ * beautiful in case that an error is detected (an error is defined
+ * as a write operation to a block while that block is still referenced).
+ */
+struct btrfsic_block_link {
+       u32 magic_num;          /* only used for debug purposes */
+       u32 ref_cnt;
+       struct list_head node_ref_to;   /* list node */
+       struct list_head node_ref_from; /* list node */
+       struct list_head collision_resolving_node;      /* list node */
+       struct btrfsic_block *block_ref_to;
+       struct btrfsic_block *block_ref_from;
+       u64 parent_generation;
+};
+
+struct btrfsic_dev_state {
+       u32 magic_num;          /* only used for debug purposes */
+       struct block_device *bdev;
+       struct btrfsic_state *state;
+       struct list_head collision_resolving_node;      /* list node */
+       struct btrfsic_block dummy_block_for_bio_bh_flush;
+       u64 last_flush_gen;
+       char name[BDEVNAME_SIZE];
+};
+
+struct btrfsic_block_hashtable {
+       struct list_head table[BTRFSIC_BLOCK_HASHTABLE_SIZE];
+};
+
+struct btrfsic_block_link_hashtable {
+       struct list_head table[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE];
+};
+
+struct btrfsic_dev_state_hashtable {
+       struct list_head table[BTRFSIC_DEV2STATE_HASHTABLE_SIZE];
+};
+
+struct btrfsic_block_data_ctx {
+       u64 start;              /* virtual bytenr */
+       u64 dev_bytenr;         /* physical bytenr on device */
+       u32 len;
+       struct btrfsic_dev_state *dev;
+       char *data;
+       struct buffer_head *bh; /* do not use if set to NULL */
+};
+
+/* This structure is used to implement recursion without occupying
+ * any stack space, refer to btrfsic_process_metablock() */
+struct btrfsic_stack_frame {
+       u32 magic;
+       u32 nr;
+       int error;
+       int i;
+       int limit_nesting;
+       int num_copies;
+       int mirror_num;
+       struct btrfsic_block *block;
+       struct btrfsic_block_data_ctx *block_ctx;
+       struct btrfsic_block *next_block;
+       struct btrfsic_block_data_ctx next_block_ctx;
+       struct btrfs_header *hdr;
+       struct btrfsic_stack_frame *prev;
+};
+
+/* Some state per mounted filesystem */
+struct btrfsic_state {
+       u32 print_mask;
+       int include_extent_data;
+       int csum_size;
+       struct list_head all_blocks_list;
+       struct btrfsic_block_hashtable block_hashtable;
+       struct btrfsic_block_link_hashtable block_link_hashtable;
+       struct btrfs_root *root;
+       u64 max_superblock_generation;
+       struct btrfsic_block *latest_superblock;
+};
+
+static void btrfsic_block_init(struct btrfsic_block *b);
+static struct btrfsic_block *btrfsic_block_alloc(void);
+static void btrfsic_block_free(struct btrfsic_block *b);
+static void btrfsic_block_link_init(struct btrfsic_block_link *n);
+static struct btrfsic_block_link *btrfsic_block_link_alloc(void);
+static void btrfsic_block_link_free(struct btrfsic_block_link *n);
+static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds);
+static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void);
+static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds);
+static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h);
+static void btrfsic_block_hashtable_add(struct btrfsic_block *b,
+                                       struct btrfsic_block_hashtable *h);
+static void btrfsic_block_hashtable_remove(struct btrfsic_block *b);
+static struct btrfsic_block *btrfsic_block_hashtable_lookup(
+               struct block_device *bdev,
+               u64 dev_bytenr,
+               struct btrfsic_block_hashtable *h);
+static void btrfsic_block_link_hashtable_init(
+               struct btrfsic_block_link_hashtable *h);
+static void btrfsic_block_link_hashtable_add(
+               struct btrfsic_block_link *l,
+               struct btrfsic_block_link_hashtable *h);
+static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l);
+static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
+               struct block_device *bdev_ref_to,
+               u64 dev_bytenr_ref_to,
+               struct block_device *bdev_ref_from,
+               u64 dev_bytenr_ref_from,
+               struct btrfsic_block_link_hashtable *h);
+static void btrfsic_dev_state_hashtable_init(
+               struct btrfsic_dev_state_hashtable *h);
+static void btrfsic_dev_state_hashtable_add(
+               struct btrfsic_dev_state *ds,
+               struct btrfsic_dev_state_hashtable *h);
+static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds);
+static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
+               struct block_device *bdev,
+               struct btrfsic_dev_state_hashtable *h);
+static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void);
+static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf);
+static int btrfsic_process_superblock(struct btrfsic_state *state,
+                                     struct btrfs_fs_devices *fs_devices);
+static int btrfsic_process_metablock(struct btrfsic_state *state,
+                                    struct btrfsic_block *block,
+                                    struct btrfsic_block_data_ctx *block_ctx,
+                                    struct btrfs_header *hdr,
+                                    int limit_nesting, int force_iodone_flag);
+static int btrfsic_create_link_to_next_block(
+               struct btrfsic_state *state,
+               struct btrfsic_block *block,
+               struct btrfsic_block_data_ctx
+               *block_ctx, u64 next_bytenr,
+               int limit_nesting,
+               struct btrfsic_block_data_ctx *next_block_ctx,
+               struct btrfsic_block **next_blockp,
+               int force_iodone_flag,
+               int *num_copiesp, int *mirror_nump,
+               struct btrfs_disk_key *disk_key,
+               u64 parent_generation);
+static int btrfsic_handle_extent_data(struct btrfsic_state *state,
+                                     struct btrfsic_block *block,
+                                     struct btrfsic_block_data_ctx *block_ctx,
+                                     u32 item_offset, int force_iodone_flag);
+static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
+                            struct btrfsic_block_data_ctx *block_ctx_out,
+                            int mirror_num);
+static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
+                                 u32 len, struct block_device *bdev,
+                                 struct btrfsic_block_data_ctx *block_ctx_out);
+static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
+static int btrfsic_read_block(struct btrfsic_state *state,
+                             struct btrfsic_block_data_ctx *block_ctx);
+static void btrfsic_dump_database(struct btrfsic_state *state);
+static int btrfsic_test_for_metadata(struct btrfsic_state *state,
+                                    const u8 *data, unsigned int size);
+static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
+                                         u64 dev_bytenr, u8 *mapped_data,
+                                         unsigned int len, struct bio *bio,
+                                         int *bio_is_patched,
+                                         struct buffer_head *bh,
+                                         int submit_bio_bh_rw);
+static int btrfsic_process_written_superblock(
+               struct btrfsic_state *state,
+               struct btrfsic_block *const block,
+               struct btrfs_super_block *const super_hdr);
+static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status);
+static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate);
+static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state,
+                                             const struct btrfsic_block *block,
+                                             int recursion_level);
+static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
+                                       struct btrfsic_block *const block,
+                                       int recursion_level);
+static void btrfsic_print_add_link(const struct btrfsic_state *state,
+                                  const struct btrfsic_block_link *l);
+static void btrfsic_print_rem_link(const struct btrfsic_state *state,
+                                  const struct btrfsic_block_link *l);
+static char btrfsic_get_block_type(const struct btrfsic_state *state,
+                                  const struct btrfsic_block *block);
+static void btrfsic_dump_tree(const struct btrfsic_state *state);
+static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
+                                 const struct btrfsic_block *block,
+                                 int indent_level);
+static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
+               struct btrfsic_state *state,
+               struct btrfsic_block_data_ctx *next_block_ctx,
+               struct btrfsic_block *next_block,
+               struct btrfsic_block *from_block,
+               u64 parent_generation);
+static struct btrfsic_block *btrfsic_block_lookup_or_add(
+               struct btrfsic_state *state,
+               struct btrfsic_block_data_ctx *block_ctx,
+               const char *additional_string,
+               int is_metadata,
+               int is_iodone,
+               int never_written,
+               int mirror_num,
+               int *was_created);
+static int btrfsic_process_superblock_dev_mirror(
+               struct btrfsic_state *state,
+               struct btrfsic_dev_state *dev_state,
+               struct btrfs_device *device,
+               int superblock_mirror_num,
+               struct btrfsic_dev_state **selected_dev_state,
+               struct btrfs_super_block *selected_super);
+static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
+               struct block_device *bdev);
+static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
+                                          u64 bytenr,
+                                          struct btrfsic_dev_state *dev_state,
+                                          u64 dev_bytenr, char *data);
+
+static struct mutex btrfsic_mutex;
+static int btrfsic_is_initialized;
+static struct btrfsic_dev_state_hashtable btrfsic_dev_state_hashtable;
+
+
+static void btrfsic_block_init(struct btrfsic_block *b)
+{
+       b->magic_num = BTRFSIC_BLOCK_MAGIC_NUMBER;
+       b->dev_state = NULL;
+       b->dev_bytenr = 0;
+       b->logical_bytenr = 0;
+       b->generation = BTRFSIC_GENERATION_UNKNOWN;
+       b->disk_key.objectid = 0;
+       b->disk_key.type = 0;
+       b->disk_key.offset = 0;
+       b->is_metadata = 0;
+       b->is_superblock = 0;
+       b->is_iodone = 0;
+       b->iodone_w_error = 0;
+       b->never_written = 0;
+       b->mirror_num = 0;
+       b->next_in_same_bio = NULL;
+       b->orig_bio_bh_private = NULL;
+       b->orig_bio_bh_end_io.bio = NULL;
+       INIT_LIST_HEAD(&b->collision_resolving_node);
+       INIT_LIST_HEAD(&b->all_blocks_node);
+       INIT_LIST_HEAD(&b->ref_to_list);
+       INIT_LIST_HEAD(&b->ref_from_list);
+       b->submit_bio_bh_rw = 0;
+       b->flush_gen = 0;
+}
+
+static struct btrfsic_block *btrfsic_block_alloc(void)
+{
+       struct btrfsic_block *b;
+
+       b = kzalloc(sizeof(*b), GFP_NOFS);
+       if (NULL != b)
+               btrfsic_block_init(b);
+
+       return b;
+}
+
+static void btrfsic_block_free(struct btrfsic_block *b)
+{
+       BUG_ON(!(NULL == b || BTRFSIC_BLOCK_MAGIC_NUMBER == b->magic_num));
+       kfree(b);
+}
+
+static void btrfsic_block_link_init(struct btrfsic_block_link *l)
+{
+       l->magic_num = BTRFSIC_BLOCK_LINK_MAGIC_NUMBER;
+       l->ref_cnt = 1;
+       INIT_LIST_HEAD(&l->node_ref_to);
+       INIT_LIST_HEAD(&l->node_ref_from);
+       INIT_LIST_HEAD(&l->collision_resolving_node);
+       l->block_ref_to = NULL;
+       l->block_ref_from = NULL;
+}
+
+static struct btrfsic_block_link *btrfsic_block_link_alloc(void)
+{
+       struct btrfsic_block_link *l;
+
+       l = kzalloc(sizeof(*l), GFP_NOFS);
+       if (NULL != l)
+               btrfsic_block_link_init(l);
+
+       return l;
+}
+
+static void btrfsic_block_link_free(struct btrfsic_block_link *l)
+{
+       BUG_ON(!(NULL == l || BTRFSIC_BLOCK_LINK_MAGIC_NUMBER == l->magic_num));
+       kfree(l);
+}
+
+static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds)
+{
+       ds->magic_num = BTRFSIC_DEV2STATE_MAGIC_NUMBER;
+       ds->bdev = NULL;
+       ds->state = NULL;
+       ds->name[0] = '\0';
+       INIT_LIST_HEAD(&ds->collision_resolving_node);
+       ds->last_flush_gen = 0;
+       btrfsic_block_init(&ds->dummy_block_for_bio_bh_flush);
+       ds->dummy_block_for_bio_bh_flush.is_iodone = 1;
+       ds->dummy_block_for_bio_bh_flush.dev_state = ds;
+}
+
+static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void)
+{
+       struct btrfsic_dev_state *ds;
+
+       ds = kzalloc(sizeof(*ds), GFP_NOFS);
+       if (NULL != ds)
+               btrfsic_dev_state_init(ds);
+
+       return ds;
+}
+
+static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds)
+{
+       BUG_ON(!(NULL == ds ||
+                BTRFSIC_DEV2STATE_MAGIC_NUMBER == ds->magic_num));
+       kfree(ds);
+}
+
+static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h)
+{
+       int i;
+
+       for (i = 0; i < BTRFSIC_BLOCK_HASHTABLE_SIZE; i++)
+               INIT_LIST_HEAD(h->table + i);
+}
+
+static void btrfsic_block_hashtable_add(struct btrfsic_block *b,
+                                       struct btrfsic_block_hashtable *h)
+{
+       const unsigned int hashval =
+           (((unsigned int)(b->dev_bytenr >> 16)) ^
+            ((unsigned int)((uintptr_t)b->dev_state->bdev))) &
+            (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
+
+       list_add(&b->collision_resolving_node, h->table + hashval);
+}
+
+static void btrfsic_block_hashtable_remove(struct btrfsic_block *b)
+{
+       list_del(&b->collision_resolving_node);
+}
+
+static struct btrfsic_block *btrfsic_block_hashtable_lookup(
+               struct block_device *bdev,
+               u64 dev_bytenr,
+               struct btrfsic_block_hashtable *h)
+{
+       const unsigned int hashval =
+           (((unsigned int)(dev_bytenr >> 16)) ^
+            ((unsigned int)((uintptr_t)bdev))) &
+            (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
+       struct list_head *elem;
+
+       list_for_each(elem, h->table + hashval) {
+               struct btrfsic_block *const b =
+                   list_entry(elem, struct btrfsic_block,
+                              collision_resolving_node);
+
+               if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr)
+                       return b;
+       }
+
+       return NULL;
+}
+
+static void btrfsic_block_link_hashtable_init(
+               struct btrfsic_block_link_hashtable *h)
+{
+       int i;
+
+       for (i = 0; i < BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE; i++)
+               INIT_LIST_HEAD(h->table + i);
+}
+
+static void btrfsic_block_link_hashtable_add(
+               struct btrfsic_block_link *l,
+               struct btrfsic_block_link_hashtable *h)
+{
+       const unsigned int hashval =
+           (((unsigned int)(l->block_ref_to->dev_bytenr >> 16)) ^
+            ((unsigned int)(l->block_ref_from->dev_bytenr >> 16)) ^
+            ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^
+            ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev)))
+            & (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
+
+       BUG_ON(NULL == l->block_ref_to);
+       BUG_ON(NULL == l->block_ref_from);
+       list_add(&l->collision_resolving_node, h->table + hashval);
+}
+
+static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l)
+{
+       list_del(&l->collision_resolving_node);
+}
+
+static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
+               struct block_device *bdev_ref_to,
+               u64 dev_bytenr_ref_to,
+               struct block_device *bdev_ref_from,
+               u64 dev_bytenr_ref_from,
+               struct btrfsic_block_link_hashtable *h)
+{
+       const unsigned int hashval =
+           (((unsigned int)(dev_bytenr_ref_to >> 16)) ^
+            ((unsigned int)(dev_bytenr_ref_from >> 16)) ^
+            ((unsigned int)((uintptr_t)bdev_ref_to)) ^
+            ((unsigned int)((uintptr_t)bdev_ref_from))) &
+            (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
+       struct list_head *elem;
+
+       list_for_each(elem, h->table + hashval) {
+               struct btrfsic_block_link *const l =
+                   list_entry(elem, struct btrfsic_block_link,
+                              collision_resolving_node);
+
+               BUG_ON(NULL == l->block_ref_to);
+               BUG_ON(NULL == l->block_ref_from);
+               if (l->block_ref_to->dev_state->bdev == bdev_ref_to &&
+                   l->block_ref_to->dev_bytenr == dev_bytenr_ref_to &&
+                   l->block_ref_from->dev_state->bdev == bdev_ref_from &&
+                   l->block_ref_from->dev_bytenr == dev_bytenr_ref_from)
+                       return l;
+       }
+
+       return NULL;
+}
+
+static void btrfsic_dev_state_hashtable_init(
+               struct btrfsic_dev_state_hashtable *h)
+{
+       int i;
+
+       for (i = 0; i < BTRFSIC_DEV2STATE_HASHTABLE_SIZE; i++)
+               INIT_LIST_HEAD(h->table + i);
+}
+
+static void btrfsic_dev_state_hashtable_add(
+               struct btrfsic_dev_state *ds,
+               struct btrfsic_dev_state_hashtable *h)
+{
+       const unsigned int hashval =
+           (((unsigned int)((uintptr_t)ds->bdev)) &
+            (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
+
+       list_add(&ds->collision_resolving_node, h->table + hashval);
+}
+
+static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds)
+{
+       list_del(&ds->collision_resolving_node);
+}
+
+static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
+               struct block_device *bdev,
+               struct btrfsic_dev_state_hashtable *h)
+{
+       const unsigned int hashval =
+           (((unsigned int)((uintptr_t)bdev)) &
+            (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
+       struct list_head *elem;
+
+       list_for_each(elem, h->table + hashval) {
+               struct btrfsic_dev_state *const ds =
+                   list_entry(elem, struct btrfsic_dev_state,
+                              collision_resolving_node);
+
+               if (ds->bdev == bdev)
+                       return ds;
+       }
+
+       return NULL;
+}
+
+static int btrfsic_process_superblock(struct btrfsic_state *state,
+                                     struct btrfs_fs_devices *fs_devices)
+{
+       int ret;
+       struct btrfs_super_block *selected_super;
+       struct list_head *dev_head = &fs_devices->devices;
+       struct btrfs_device *device;
+       struct btrfsic_dev_state *selected_dev_state = NULL;
+       int pass;
+
+       BUG_ON(NULL == state);
+       selected_super = kmalloc(sizeof(*selected_super), GFP_NOFS);
+       if (NULL == selected_super) {
+               printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+               return -1;
+       }
+
+       list_for_each_entry(device, dev_head, dev_list) {
+               int i;
+               struct btrfsic_dev_state *dev_state;
+
+               if (!device->bdev || !device->name)
+                       continue;
+
+               dev_state = btrfsic_dev_state_lookup(device->bdev);
+               BUG_ON(NULL == dev_state);
+               for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+                       ret = btrfsic_process_superblock_dev_mirror(
+                                       state, dev_state, device, i,
+                                       &selected_dev_state, selected_super);
+                       if (0 != ret && 0 == i) {
+                               kfree(selected_super);
+                               return ret;
+                       }
+               }
+       }
+
+       if (NULL == state->latest_superblock) {
+               printk(KERN_INFO "btrfsic: no superblock found!\n");
+               kfree(selected_super);
+               return -1;
+       }
+
+       state->csum_size = btrfs_super_csum_size(selected_super);
+
+       for (pass = 0; pass < 3; pass++) {
+               int num_copies;
+               int mirror_num;
+               u64 next_bytenr;
+
+               switch (pass) {
+               case 0:
+                       next_bytenr = btrfs_super_root(selected_super);
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
+                               printk(KERN_INFO "root@%llu\n",
+                                      (unsigned long long)next_bytenr);
+                       break;
+               case 1:
+                       next_bytenr = btrfs_super_chunk_root(selected_super);
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
+                               printk(KERN_INFO "chunk@%llu\n",
+                                      (unsigned long long)next_bytenr);
+                       break;
+               case 2:
+                       next_bytenr = btrfs_super_log_root(selected_super);
+                       if (0 == next_bytenr)
+                               continue;
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
+                               printk(KERN_INFO "log@%llu\n",
+                                      (unsigned long long)next_bytenr);
+                       break;
+               }
+
+               num_copies =
+                   btrfs_num_copies(&state->root->fs_info->mapping_tree,
+                                    next_bytenr, PAGE_SIZE);
+               if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
+                       printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+                              (unsigned long long)next_bytenr, num_copies);
+
+               for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
+                       struct btrfsic_block *next_block;
+                       struct btrfsic_block_data_ctx tmp_next_block_ctx;
+                       struct btrfsic_block_link *l;
+                       struct btrfs_header *hdr;
+
+                       ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+                                               &tmp_next_block_ctx,
+                                               mirror_num);
+                       if (ret) {
+                               printk(KERN_INFO "btrfsic:"
+                                      " btrfsic_map_block(root @%llu,"
+                                      " mirror %d) failed!\n",
+                                      (unsigned long long)next_bytenr,
+                                      mirror_num);
+                               kfree(selected_super);
+                               return -1;
+                       }
+
+                       next_block = btrfsic_block_hashtable_lookup(
+                                       tmp_next_block_ctx.dev->bdev,
+                                       tmp_next_block_ctx.dev_bytenr,
+                                       &state->block_hashtable);
+                       BUG_ON(NULL == next_block);
+
+                       l = btrfsic_block_link_hashtable_lookup(
+                                       tmp_next_block_ctx.dev->bdev,
+                                       tmp_next_block_ctx.dev_bytenr,
+                                       state->latest_superblock->dev_state->
+                                       bdev,
+                                       state->latest_superblock->dev_bytenr,
+                                       &state->block_link_hashtable);
+                       BUG_ON(NULL == l);
+
+                       ret = btrfsic_read_block(state, &tmp_next_block_ctx);
+                       if (ret < (int)BTRFSIC_BLOCK_SIZE) {
+                               printk(KERN_INFO
+                                      "btrfsic: read @logical %llu failed!\n",
+                                      (unsigned long long)
+                                      tmp_next_block_ctx.start);
+                               btrfsic_release_block_ctx(&tmp_next_block_ctx);
+                               kfree(selected_super);
+                               return -1;
+                       }
+
+                       hdr = (struct btrfs_header *)tmp_next_block_ctx.data;
+                       ret = btrfsic_process_metablock(state,
+                                                       next_block,
+                                                       &tmp_next_block_ctx,
+                                                       hdr,
+                                                       BTRFS_MAX_LEVEL + 3, 1);
+                       btrfsic_release_block_ctx(&tmp_next_block_ctx);
+               }
+       }
+
+       kfree(selected_super);
+       return ret;
+}
+
+static int btrfsic_process_superblock_dev_mirror(
+               struct btrfsic_state *state,
+               struct btrfsic_dev_state *dev_state,
+               struct btrfs_device *device,
+               int superblock_mirror_num,
+               struct btrfsic_dev_state **selected_dev_state,
+               struct btrfs_super_block *selected_super)
+{
+       struct btrfs_super_block *super_tmp;
+       u64 dev_bytenr;
+       struct buffer_head *bh;
+       struct btrfsic_block *superblock_tmp;
+       int pass;
+       struct block_device *const superblock_bdev = device->bdev;
+
+       /* super block bytenr is always the unmapped device bytenr */
+       dev_bytenr = btrfs_sb_offset(superblock_mirror_num);
+       bh = __bread(superblock_bdev, dev_bytenr / 4096, 4096);
+       if (NULL == bh)
+               return -1;
+       super_tmp = (struct btrfs_super_block *)
+           (bh->b_data + (dev_bytenr & 4095));
+
+       if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
+           strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC,
+                   sizeof(super_tmp->magic)) ||
+           memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE)) {
+               brelse(bh);
+               return 0;
+       }
+
+       superblock_tmp =
+           btrfsic_block_hashtable_lookup(superblock_bdev,
+                                          dev_bytenr,
+                                          &state->block_hashtable);
+       if (NULL == superblock_tmp) {
+               superblock_tmp = btrfsic_block_alloc();
+               if (NULL == superblock_tmp) {
+                       printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+                       brelse(bh);
+                       return -1;
+               }
+               /* for superblock, only the dev_bytenr makes sense */
+               superblock_tmp->dev_bytenr = dev_bytenr;
+               superblock_tmp->dev_state = dev_state;
+               superblock_tmp->logical_bytenr = dev_bytenr;
+               superblock_tmp->generation = btrfs_super_generation(super_tmp);
+               superblock_tmp->is_metadata = 1;
+               superblock_tmp->is_superblock = 1;
+               superblock_tmp->is_iodone = 1;
+               superblock_tmp->never_written = 0;
+               superblock_tmp->mirror_num = 1 + superblock_mirror_num;
+               if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
+                       printk(KERN_INFO "New initial S-block (bdev %p, %s)"
+                              " @%llu (%s/%llu/%d)\n",
+                              superblock_bdev, device->name,
+                              (unsigned long long)dev_bytenr,
+                              dev_state->name,
+                              (unsigned long long)dev_bytenr,
+                              superblock_mirror_num);
+               list_add(&superblock_tmp->all_blocks_node,
+                        &state->all_blocks_list);
+               btrfsic_block_hashtable_add(superblock_tmp,
+                                           &state->block_hashtable);
+       }
+
+       /* select the one with the highest generation field */
+       if (btrfs_super_generation(super_tmp) >
+           state->max_superblock_generation ||
+           0 == state->max_superblock_generation) {
+               memcpy(selected_super, super_tmp, sizeof(*selected_super));
+               *selected_dev_state = dev_state;
+               state->max_superblock_generation =
+                   btrfs_super_generation(super_tmp);
+               state->latest_superblock = superblock_tmp;
+       }
+
+       for (pass = 0; pass < 3; pass++) {
+               u64 next_bytenr;
+               int num_copies;
+               int mirror_num;
+               const char *additional_string = NULL;
+               struct btrfs_disk_key tmp_disk_key;
+
+               tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY;
+               tmp_disk_key.offset = 0;
+               switch (pass) {
+               case 0:
+                       tmp_disk_key.objectid =
+                           cpu_to_le64(BTRFS_ROOT_TREE_OBJECTID);
+                       additional_string = "initial root ";
+                       next_bytenr = btrfs_super_root(super_tmp);
+                       break;
+               case 1:
+                       tmp_disk_key.objectid =
+                           cpu_to_le64(BTRFS_CHUNK_TREE_OBJECTID);
+                       additional_string = "initial chunk ";
+                       next_bytenr = btrfs_super_chunk_root(super_tmp);
+                       break;
+               case 2:
+                       tmp_disk_key.objectid =
+                           cpu_to_le64(BTRFS_TREE_LOG_OBJECTID);
+                       additional_string = "initial log ";
+                       next_bytenr = btrfs_super_log_root(super_tmp);
+                       if (0 == next_bytenr)
+                               continue;
+                       break;
+               }
+
+               num_copies =
+                   btrfs_num_copies(&state->root->fs_info->mapping_tree,
+                                    next_bytenr, PAGE_SIZE);
+               if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
+                       printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+                              (unsigned long long)next_bytenr, num_copies);
+               for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
+                       struct btrfsic_block *next_block;
+                       struct btrfsic_block_data_ctx tmp_next_block_ctx;
+                       struct btrfsic_block_link *l;
+
+                       if (btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+                                             &tmp_next_block_ctx,
+                                             mirror_num)) {
+                               printk(KERN_INFO "btrfsic: btrfsic_map_block("
+                                      "bytenr @%llu, mirror %d) failed!\n",
+                                      (unsigned long long)next_bytenr,
+                                      mirror_num);
+                               brelse(bh);
+                               return -1;
+                       }
+
+                       next_block = btrfsic_block_lookup_or_add(
+                                       state, &tmp_next_block_ctx,
+                                       additional_string, 1, 1, 0,
+                                       mirror_num, NULL);
+                       if (NULL == next_block) {
+                               btrfsic_release_block_ctx(&tmp_next_block_ctx);
+                               brelse(bh);
+                               return -1;
+                       }
+
+                       next_block->disk_key = tmp_disk_key;
+                       next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
+                       l = btrfsic_block_link_lookup_or_add(
+                                       state, &tmp_next_block_ctx,
+                                       next_block, superblock_tmp,
+                                       BTRFSIC_GENERATION_UNKNOWN);
+                       btrfsic_release_block_ctx(&tmp_next_block_ctx);
+                       if (NULL == l) {
+                               brelse(bh);
+                               return -1;
+                       }
+               }
+       }
+       if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES)
+               btrfsic_dump_tree_sub(state, superblock_tmp, 0);
+
+       brelse(bh);
+       return 0;
+}
+
+static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void)
+{
+       struct btrfsic_stack_frame *sf;
+
+       sf = kzalloc(sizeof(*sf), GFP_NOFS);
+       if (NULL == sf)
+               printk(KERN_INFO "btrfsic: alloc memory failed!\n");
+       else
+               sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER;
+       return sf;
+}
+
+static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf)
+{
+       BUG_ON(!(NULL == sf ||
+                BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER == sf->magic));
+       kfree(sf);
+}
+
+static int btrfsic_process_metablock(
+               struct btrfsic_state *state,
+               struct btrfsic_block *const first_block,
+               struct btrfsic_block_data_ctx *const first_block_ctx,
+               struct btrfs_header *const first_hdr,
+               int first_limit_nesting, int force_iodone_flag)
+{
+       struct btrfsic_stack_frame initial_stack_frame = { 0 };
+       struct btrfsic_stack_frame *sf;
+       struct btrfsic_stack_frame *next_stack;
+
+       sf = &initial_stack_frame;
+       sf->error = 0;
+       sf->i = -1;
+       sf->limit_nesting = first_limit_nesting;
+       sf->block = first_block;
+       sf->block_ctx = first_block_ctx;
+       sf->next_block = NULL;
+       sf->hdr = first_hdr;
+       sf->prev = NULL;
+
+continue_with_new_stack_frame:
+       sf->block->generation = le64_to_cpu(sf->hdr->generation);
+       if (0 == sf->hdr->level) {
+               struct btrfs_leaf *const leafhdr =
+                   (struct btrfs_leaf *)sf->hdr;
+
+               if (-1 == sf->i) {
+                       sf->nr = le32_to_cpu(leafhdr->header.nritems);
+
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO
+                                      "leaf %llu items %d generation %llu"
+                                      " owner %llu\n",
+                                      (unsigned long long)
+                                      sf->block_ctx->start,
+                                      sf->nr,
+                                      (unsigned long long)
+                                      le64_to_cpu(leafhdr->header.generation),
+                                      (unsigned long long)
+                                      le64_to_cpu(leafhdr->header.owner));
+               }
+
+continue_with_current_leaf_stack_frame:
+               if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
+                       sf->i++;
+                       sf->num_copies = 0;
+               }
+
+               if (sf->i < sf->nr) {
+                       struct btrfs_item *disk_item = leafhdr->items + sf->i;
+                       struct btrfs_disk_key *disk_key = &disk_item->key;
+                       u8 type;
+                       const u32 item_offset = le32_to_cpu(disk_item->offset);
+
+                       type = disk_key->type;
+
+                       if (BTRFS_ROOT_ITEM_KEY == type) {
+                               const struct btrfs_root_item *const root_item =
+                                   (struct btrfs_root_item *)
+                                   (sf->block_ctx->data +
+                                    offsetof(struct btrfs_leaf, items) +
+                                    item_offset);
+                               const u64 next_bytenr =
+                                   le64_to_cpu(root_item->bytenr);
+
+                               sf->error =
+                                   btrfsic_create_link_to_next_block(
+                                               state,
+                                               sf->block,
+                                               sf->block_ctx,
+                                               next_bytenr,
+                                               sf->limit_nesting,
+                                               &sf->next_block_ctx,
+                                               &sf->next_block,
+                                               force_iodone_flag,
+                                               &sf->num_copies,
+                                               &sf->mirror_num,
+                                               disk_key,
+                                               le64_to_cpu(root_item->
+                                               generation));
+                               if (sf->error)
+                                       goto one_stack_frame_backwards;
+
+                               if (NULL != sf->next_block) {
+                                       struct btrfs_header *const next_hdr =
+                                           (struct btrfs_header *)
+                                           sf->next_block_ctx.data;
+
+                                       next_stack =
+                                           btrfsic_stack_frame_alloc();
+                                       if (NULL == next_stack) {
+                                               btrfsic_release_block_ctx(
+                                                               &sf->
+                                                               next_block_ctx);
+                                               goto one_stack_frame_backwards;
+                                       }
+
+                                       next_stack->i = -1;
+                                       next_stack->block = sf->next_block;
+                                       next_stack->block_ctx =
+                                           &sf->next_block_ctx;
+                                       next_stack->next_block = NULL;
+                                       next_stack->hdr = next_hdr;
+                                       next_stack->limit_nesting =
+                                           sf->limit_nesting - 1;
+                                       next_stack->prev = sf;
+                                       sf = next_stack;
+                                       goto continue_with_new_stack_frame;
+                               }
+                       } else if (BTRFS_EXTENT_DATA_KEY == type &&
+                                  state->include_extent_data) {
+                               sf->error = btrfsic_handle_extent_data(
+                                               state,
+                                               sf->block,
+                                               sf->block_ctx,
+                                               item_offset,
+                                               force_iodone_flag);
+                               if (sf->error)
+                                       goto one_stack_frame_backwards;
+                       }
+
+                       goto continue_with_current_leaf_stack_frame;
+               }
+       } else {
+               struct btrfs_node *const nodehdr = (struct btrfs_node *)sf->hdr;
+
+               if (-1 == sf->i) {
+                       sf->nr = le32_to_cpu(nodehdr->header.nritems);
+
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO "node %llu level %d items %d"
+                                      " generation %llu owner %llu\n",
+                                      (unsigned long long)
+                                      sf->block_ctx->start,
+                                      nodehdr->header.level, sf->nr,
+                                      (unsigned long long)
+                                      le64_to_cpu(nodehdr->header.generation),
+                                      (unsigned long long)
+                                      le64_to_cpu(nodehdr->header.owner));
+               }
+
+continue_with_current_node_stack_frame:
+               if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
+                       sf->i++;
+                       sf->num_copies = 0;
+               }
+
+               if (sf->i < sf->nr) {
+                       struct btrfs_key_ptr *disk_key_ptr =
+                           nodehdr->ptrs + sf->i;
+                       const u64 next_bytenr =
+                           le64_to_cpu(disk_key_ptr->blockptr);
+
+                       sf->error = btrfsic_create_link_to_next_block(
+                                       state,
+                                       sf->block,
+                                       sf->block_ctx,
+                                       next_bytenr,
+                                       sf->limit_nesting,
+                                       &sf->next_block_ctx,
+                                       &sf->next_block,
+                                       force_iodone_flag,
+                                       &sf->num_copies,
+                                       &sf->mirror_num,
+                                       &disk_key_ptr->key,
+                                       le64_to_cpu(disk_key_ptr->generation));
+                       if (sf->error)
+                               goto one_stack_frame_backwards;
+
+                       if (NULL != sf->next_block) {
+                               struct btrfs_header *const next_hdr =
+                                   (struct btrfs_header *)
+                                   sf->next_block_ctx.data;
+
+                               next_stack = btrfsic_stack_frame_alloc();
+                               if (NULL == next_stack)
+                                       goto one_stack_frame_backwards;
+
+                               next_stack->i = -1;
+                               next_stack->block = sf->next_block;
+                               next_stack->block_ctx = &sf->next_block_ctx;
+                               next_stack->next_block = NULL;
+                               next_stack->hdr = next_hdr;
+                               next_stack->limit_nesting =
+                                   sf->limit_nesting - 1;
+                               next_stack->prev = sf;
+                               sf = next_stack;
+                               goto continue_with_new_stack_frame;
+                       }
+
+                       goto continue_with_current_node_stack_frame;
+               }
+       }
+
+one_stack_frame_backwards:
+       if (NULL != sf->prev) {
+               struct btrfsic_stack_frame *const prev = sf->prev;
+
+               /* the one for the initial block is freed in the caller */
+               btrfsic_release_block_ctx(sf->block_ctx);
+
+               if (sf->error) {
+                       prev->error = sf->error;
+                       btrfsic_stack_frame_free(sf);
+                       sf = prev;
+                       goto one_stack_frame_backwards;
+               }
+
+               btrfsic_stack_frame_free(sf);
+               sf = prev;
+               goto continue_with_new_stack_frame;
+       } else {
+               BUG_ON(&initial_stack_frame != sf);
+       }
+
+       return sf->error;
+}
+
+static int btrfsic_create_link_to_next_block(
+               struct btrfsic_state *state,
+               struct btrfsic_block *block,
+               struct btrfsic_block_data_ctx *block_ctx,
+               u64 next_bytenr,
+               int limit_nesting,
+               struct btrfsic_block_data_ctx *next_block_ctx,
+               struct btrfsic_block **next_blockp,
+               int force_iodone_flag,
+               int *num_copiesp, int *mirror_nump,
+               struct btrfs_disk_key *disk_key,
+               u64 parent_generation)
+{
+       struct btrfsic_block *next_block = NULL;
+       int ret;
+       struct btrfsic_block_link *l;
+       int did_alloc_block_link;
+       int block_was_created;
+
+       *next_blockp = NULL;
+       if (0 == *num_copiesp) {
+               *num_copiesp =
+                   btrfs_num_copies(&state->root->fs_info->mapping_tree,
+                                    next_bytenr, PAGE_SIZE);
+               if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
+                       printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+                              (unsigned long long)next_bytenr, *num_copiesp);
+               *mirror_nump = 1;
+       }
+
+       if (*mirror_nump > *num_copiesp)
+               return 0;
+
+       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+               printk(KERN_INFO
+                      "btrfsic_create_link_to_next_block(mirror_num=%d)\n",
+                      *mirror_nump);
+       ret = btrfsic_map_block(state, next_bytenr,
+                               BTRFSIC_BLOCK_SIZE,
+                               next_block_ctx, *mirror_nump);
+       if (ret) {
+               printk(KERN_INFO
+                      "btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
+                      (unsigned long long)next_bytenr, *mirror_nump);
+               btrfsic_release_block_ctx(next_block_ctx);
+               *next_blockp = NULL;
+               return -1;
+       }
+
+       next_block = btrfsic_block_lookup_or_add(state,
+                                                next_block_ctx, "referenced ",
+                                                1, force_iodone_flag,
+                                                !force_iodone_flag,
+                                                *mirror_nump,
+                                                &block_was_created);
+       if (NULL == next_block) {
+               btrfsic_release_block_ctx(next_block_ctx);
+               *next_blockp = NULL;
+               return -1;
+       }
+       if (block_was_created) {
+               l = NULL;
+               next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
+       } else {
+               if (next_block->logical_bytenr != next_bytenr &&
+                   !(!next_block->is_metadata &&
+                     0 == next_block->logical_bytenr)) {
+                       printk(KERN_INFO
+                              "Referenced block @%llu (%s/%llu/%d)"
+                              " found in hash table, %c,"
+                              " bytenr mismatch (!= stored %llu).\n",
+                              (unsigned long long)next_bytenr,
+                              next_block_ctx->dev->name,
+                              (unsigned long long)next_block_ctx->dev_bytenr,
+                              *mirror_nump,
+                              btrfsic_get_block_type(state, next_block),
+                              (unsigned long long)next_block->logical_bytenr);
+               } else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "Referenced block @%llu (%s/%llu/%d)"
+                              " found in hash table, %c.\n",
+                              (unsigned long long)next_bytenr,
+                              next_block_ctx->dev->name,
+                              (unsigned long long)next_block_ctx->dev_bytenr,
+                              *mirror_nump,
+                              btrfsic_get_block_type(state, next_block));
+               next_block->logical_bytenr = next_bytenr;
+
+               next_block->mirror_num = *mirror_nump;
+               l = btrfsic_block_link_hashtable_lookup(
+                               next_block_ctx->dev->bdev,
+                               next_block_ctx->dev_bytenr,
+                               block_ctx->dev->bdev,
+                               block_ctx->dev_bytenr,
+                               &state->block_link_hashtable);
+       }
+
+       next_block->disk_key = *disk_key;
+       if (NULL == l) {
+               l = btrfsic_block_link_alloc();
+               if (NULL == l) {
+                       printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+                       btrfsic_release_block_ctx(next_block_ctx);
+                       *next_blockp = NULL;
+                       return -1;
+               }
+
+               did_alloc_block_link = 1;
+               l->block_ref_to = next_block;
+               l->block_ref_from = block;
+               l->ref_cnt = 1;
+               l->parent_generation = parent_generation;
+
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       btrfsic_print_add_link(state, l);
+
+               list_add(&l->node_ref_to, &block->ref_to_list);
+               list_add(&l->node_ref_from, &next_block->ref_from_list);
+
+               btrfsic_block_link_hashtable_add(l,
+                                                &state->block_link_hashtable);
+       } else {
+               did_alloc_block_link = 0;
+               if (0 == limit_nesting) {
+                       l->ref_cnt++;
+                       l->parent_generation = parent_generation;
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               btrfsic_print_add_link(state, l);
+               }
+       }
+
+       if (limit_nesting > 0 && did_alloc_block_link) {
+               ret = btrfsic_read_block(state, next_block_ctx);
+               if (ret < (int)BTRFSIC_BLOCK_SIZE) {
+                       printk(KERN_INFO
+                              "btrfsic: read block @logical %llu failed!\n",
+                              (unsigned long long)next_bytenr);
+                       btrfsic_release_block_ctx(next_block_ctx);
+                       *next_blockp = NULL;
+                       return -1;
+               }
+
+               *next_blockp = next_block;
+       } else {
+               *next_blockp = NULL;
+       }
+       (*mirror_nump)++;
+
+       return 0;
+}
+
+static int btrfsic_handle_extent_data(
+               struct btrfsic_state *state,
+               struct btrfsic_block *block,
+               struct btrfsic_block_data_ctx *block_ctx,
+               u32 item_offset, int force_iodone_flag)
+{
+       int ret;
+       struct btrfs_file_extent_item *file_extent_item =
+           (struct btrfs_file_extent_item *)(block_ctx->data +
+                                             offsetof(struct btrfs_leaf,
+                                                      items) + item_offset);
+       u64 next_bytenr =
+           le64_to_cpu(file_extent_item->disk_bytenr) +
+           le64_to_cpu(file_extent_item->offset);
+       u64 num_bytes = le64_to_cpu(file_extent_item->num_bytes);
+       u64 generation = le64_to_cpu(file_extent_item->generation);
+       struct btrfsic_block_link *l;
+
+       if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
+               printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu,"
+                      " offset = %llu, num_bytes = %llu\n",
+                      file_extent_item->type,
+                      (unsigned long long)
+                      le64_to_cpu(file_extent_item->disk_bytenr),
+                      (unsigned long long)
+                      le64_to_cpu(file_extent_item->offset),
+                      (unsigned long long)
+                      le64_to_cpu(file_extent_item->num_bytes));
+       if (BTRFS_FILE_EXTENT_REG != file_extent_item->type ||
+           ((u64)0) == le64_to_cpu(file_extent_item->disk_bytenr))
+               return 0;
+       while (num_bytes > 0) {
+               u32 chunk_len;
+               int num_copies;
+               int mirror_num;
+
+               if (num_bytes > BTRFSIC_BLOCK_SIZE)
+                       chunk_len = BTRFSIC_BLOCK_SIZE;
+               else
+                       chunk_len = num_bytes;
+
+               num_copies =
+                   btrfs_num_copies(&state->root->fs_info->mapping_tree,
+                                    next_bytenr, PAGE_SIZE);
+               if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
+                       printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+                              (unsigned long long)next_bytenr, num_copies);
+               for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
+                       struct btrfsic_block_data_ctx next_block_ctx;
+                       struct btrfsic_block *next_block;
+                       int block_was_created;
+
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO "btrfsic_handle_extent_data("
+                                      "mirror_num=%d)\n", mirror_num);
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
+                               printk(KERN_INFO
+                                      "\tdisk_bytenr = %llu, num_bytes %u\n",
+                                      (unsigned long long)next_bytenr,
+                                      chunk_len);
+                       ret = btrfsic_map_block(state, next_bytenr,
+                                               chunk_len, &next_block_ctx,
+                                               mirror_num);
+                       if (ret) {
+                               printk(KERN_INFO
+                                      "btrfsic: btrfsic_map_block(@%llu,"
+                                      " mirror=%d) failed!\n",
+                                      (unsigned long long)next_bytenr,
+                                      mirror_num);
+                               return -1;
+                       }
+
+                       next_block = btrfsic_block_lookup_or_add(
+                                       state,
+                                       &next_block_ctx,
+                                       "referenced ",
+                                       0,
+                                       force_iodone_flag,
+                                       !force_iodone_flag,
+                                       mirror_num,
+                                       &block_was_created);
+                       if (NULL == next_block) {
+                               printk(KERN_INFO
+                                      "btrfsic: error, kmalloc failed!\n");
+                               btrfsic_release_block_ctx(&next_block_ctx);
+                               return -1;
+                       }
+                       if (!block_was_created) {
+                               if (next_block->logical_bytenr != next_bytenr &&
+                                   !(!next_block->is_metadata &&
+                                     0 == next_block->logical_bytenr)) {
+                                       printk(KERN_INFO
+                                              "Referenced block"
+                                              " @%llu (%s/%llu/%d)"
+                                              " found in hash table, D,"
+                                              " bytenr mismatch"
+                                              " (!= stored %llu).\n",
+                                              (unsigned long long)next_bytenr,
+                                              next_block_ctx.dev->name,
+                                              (unsigned long long)
+                                              next_block_ctx.dev_bytenr,
+                                              mirror_num,
+                                              (unsigned long long)
+                                              next_block->logical_bytenr);
+                               }
+                               next_block->logical_bytenr = next_bytenr;
+                               next_block->mirror_num = mirror_num;
+                       }
+
+                       l = btrfsic_block_link_lookup_or_add(state,
+                                                            &next_block_ctx,
+                                                            next_block, block,
+                                                            generation);
+                       btrfsic_release_block_ctx(&next_block_ctx);
+                       if (NULL == l)
+                               return -1;
+               }
+
+               next_bytenr += chunk_len;
+               num_bytes -= chunk_len;
+       }
+
+       return 0;
+}
+
+static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
+                            struct btrfsic_block_data_ctx *block_ctx_out,
+                            int mirror_num)
+{
+       int ret;
+       u64 length;
+       struct btrfs_bio *multi = NULL;
+       struct btrfs_device *device;
+
+       length = len;
+       ret = btrfs_map_block(&state->root->fs_info->mapping_tree, READ,
+                             bytenr, &length, &multi, mirror_num);
+
+       device = multi->stripes[0].dev;
+       block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev);
+       block_ctx_out->dev_bytenr = multi->stripes[0].physical;
+       block_ctx_out->start = bytenr;
+       block_ctx_out->len = len;
+       block_ctx_out->data = NULL;
+       block_ctx_out->bh = NULL;
+
+       if (0 == ret)
+               kfree(multi);
+       if (NULL == block_ctx_out->dev) {
+               ret = -ENXIO;
+               printk(KERN_INFO "btrfsic: error, cannot lookup dev (#1)!\n");
+       }
+
+       return ret;
+}
+
+static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
+                                 u32 len, struct block_device *bdev,
+                                 struct btrfsic_block_data_ctx *block_ctx_out)
+{
+       block_ctx_out->dev = btrfsic_dev_state_lookup(bdev);
+       block_ctx_out->dev_bytenr = bytenr;
+       block_ctx_out->start = bytenr;
+       block_ctx_out->len = len;
+       block_ctx_out->data = NULL;
+       block_ctx_out->bh = NULL;
+       if (NULL != block_ctx_out->dev) {
+               return 0;
+       } else {
+               printk(KERN_INFO "btrfsic: error, cannot lookup dev (#2)!\n");
+               return -ENXIO;
+       }
+}
+
+static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
+{
+       if (NULL != block_ctx->bh) {
+               brelse(block_ctx->bh);
+               block_ctx->bh = NULL;
+       }
+}
+
+static int btrfsic_read_block(struct btrfsic_state *state,
+                             struct btrfsic_block_data_ctx *block_ctx)
+{
+       block_ctx->bh = NULL;
+       if (block_ctx->dev_bytenr & 4095) {
+               printk(KERN_INFO
+                      "btrfsic: read_block() with unaligned bytenr %llu\n",
+                      (unsigned long long)block_ctx->dev_bytenr);
+               return -1;
+       }
+       if (block_ctx->len > 4096) {
+               printk(KERN_INFO
+                      "btrfsic: read_block() with too huge size %d\n",
+                      block_ctx->len);
+               return -1;
+       }
+
+       block_ctx->bh = __bread(block_ctx->dev->bdev,
+                               block_ctx->dev_bytenr >> 12, 4096);
+       if (NULL == block_ctx->bh)
+               return -1;
+       block_ctx->data = block_ctx->bh->b_data;
+
+       return block_ctx->len;
+}
+
+static void btrfsic_dump_database(struct btrfsic_state *state)
+{
+       struct list_head *elem_all;
+
+       BUG_ON(NULL == state);
+
+       printk(KERN_INFO "all_blocks_list:\n");
+       list_for_each(elem_all, &state->all_blocks_list) {
+               const struct btrfsic_block *const b_all =
+                   list_entry(elem_all, struct btrfsic_block,
+                              all_blocks_node);
+               struct list_head *elem_ref_to;
+               struct list_head *elem_ref_from;
+
+               printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n",
+                      btrfsic_get_block_type(state, b_all),
+                      (unsigned long long)b_all->logical_bytenr,
+                      b_all->dev_state->name,
+                      (unsigned long long)b_all->dev_bytenr,
+                      b_all->mirror_num);
+
+               list_for_each(elem_ref_to, &b_all->ref_to_list) {
+                       const struct btrfsic_block_link *const l =
+                           list_entry(elem_ref_to,
+                                      struct btrfsic_block_link,
+                                      node_ref_to);
+
+                       printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
+                              " refers %u* to"
+                              " %c @%llu (%s/%llu/%d)\n",
+                              btrfsic_get_block_type(state, b_all),
+                              (unsigned long long)b_all->logical_bytenr,
+                              b_all->dev_state->name,
+                              (unsigned long long)b_all->dev_bytenr,
+                              b_all->mirror_num,
+                              l->ref_cnt,
+                              btrfsic_get_block_type(state, l->block_ref_to),
+                              (unsigned long long)
+                              l->block_ref_to->logical_bytenr,
+                              l->block_ref_to->dev_state->name,
+                              (unsigned long long)l->block_ref_to->dev_bytenr,
+                              l->block_ref_to->mirror_num);
+               }
+
+               list_for_each(elem_ref_from, &b_all->ref_from_list) {
+                       const struct btrfsic_block_link *const l =
+                           list_entry(elem_ref_from,
+                                      struct btrfsic_block_link,
+                                      node_ref_from);
+
+                       printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
+                              " is ref %u* from"
+                              " %c @%llu (%s/%llu/%d)\n",
+                              btrfsic_get_block_type(state, b_all),
+                              (unsigned long long)b_all->logical_bytenr,
+                              b_all->dev_state->name,
+                              (unsigned long long)b_all->dev_bytenr,
+                              b_all->mirror_num,
+                              l->ref_cnt,
+                              btrfsic_get_block_type(state, l->block_ref_from),
+                              (unsigned long long)
+                              l->block_ref_from->logical_bytenr,
+                              l->block_ref_from->dev_state->name,
+                              (unsigned long long)
+                              l->block_ref_from->dev_bytenr,
+                              l->block_ref_from->mirror_num);
+               }
+
+               printk(KERN_INFO "\n");
+       }
+}
+
+/*
+ * Test whether the disk block contains a tree block (leaf or node)
+ * (note that this test fails for the super block)
+ */
+static int btrfsic_test_for_metadata(struct btrfsic_state *state,
+                                    const u8 *data, unsigned int size)
+{
+       struct btrfs_header *h;
+       u8 csum[BTRFS_CSUM_SIZE];
+       u32 crc = ~(u32)0;
+       int fail = 0;
+       int crc_fail = 0;
+
+       h = (struct btrfs_header *)data;
+
+       if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
+               fail++;
+
+       crc = crc32c(crc, data + BTRFS_CSUM_SIZE, PAGE_SIZE - BTRFS_CSUM_SIZE);
+       btrfs_csum_final(crc, csum);
+       if (memcmp(csum, h->csum, state->csum_size))
+               crc_fail++;
+
+       return fail || crc_fail;
+}
+
+static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
+                                         u64 dev_bytenr,
+                                         u8 *mapped_data, unsigned int len,
+                                         struct bio *bio,
+                                         int *bio_is_patched,
+                                         struct buffer_head *bh,
+                                         int submit_bio_bh_rw)
+{
+       int is_metadata;
+       struct btrfsic_block *block;
+       struct btrfsic_block_data_ctx block_ctx;
+       int ret;
+       struct btrfsic_state *state = dev_state->state;
+       struct block_device *bdev = dev_state->bdev;
+
+       WARN_ON(len > PAGE_SIZE);
+       is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_data, len));
+       if (NULL != bio_is_patched)
+               *bio_is_patched = 0;
+
+       block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
+                                              &state->block_hashtable);
+       if (NULL != block) {
+               u64 bytenr;
+               struct list_head *elem_ref_to;
+               struct list_head *tmp_ref_to;
+
+               if (block->is_superblock) {
+                       bytenr = le64_to_cpu(((struct btrfs_super_block *)
+                                             mapped_data)->bytenr);
+                       is_metadata = 1;
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
+                               printk(KERN_INFO
+                                      "[before new superblock is written]:\n");
+                               btrfsic_dump_tree_sub(state, block, 0);
+                       }
+               }
+               if (is_metadata) {
+                       if (!block->is_superblock) {
+                               bytenr = le64_to_cpu(((struct btrfs_header *)
+                                                     mapped_data)->bytenr);
+                               btrfsic_cmp_log_and_dev_bytenr(state, bytenr,
+                                                              dev_state,
+                                                              dev_bytenr,
+                                                              mapped_data);
+                       }
+                       if (block->logical_bytenr != bytenr) {
+                               printk(KERN_INFO
+                                      "Written block @%llu (%s/%llu/%d)"
+                                      " found in hash table, %c,"
+                                      " bytenr mismatch"
+                                      " (!= stored %llu).\n",
+                                      (unsigned long long)bytenr,
+                                      dev_state->name,
+                                      (unsigned long long)dev_bytenr,
+                                      block->mirror_num,
+                                      btrfsic_get_block_type(state, block),
+                                      (unsigned long long)
+                                      block->logical_bytenr);
+                               block->logical_bytenr = bytenr;
+                       } else if (state->print_mask &
+                                  BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO
+                                      "Written block @%llu (%s/%llu/%d)"
+                                      " found in hash table, %c.\n",
+                                      (unsigned long long)bytenr,
+                                      dev_state->name,
+                                      (unsigned long long)dev_bytenr,
+                                      block->mirror_num,
+                                      btrfsic_get_block_type(state, block));
+               } else {
+                       bytenr = block->logical_bytenr;
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO
+                                      "Written block @%llu (%s/%llu/%d)"
+                                      " found in hash table, %c.\n",
+                                      (unsigned long long)bytenr,
+                                      dev_state->name,
+                                      (unsigned long long)dev_bytenr,
+                                      block->mirror_num,
+                                      btrfsic_get_block_type(state, block));
+               }
+
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "ref_to_list: %cE, ref_from_list: %cE\n",
+                              list_empty(&block->ref_to_list) ? ' ' : '!',
+                              list_empty(&block->ref_from_list) ? ' ' : '!');
+               if (btrfsic_is_block_ref_by_superblock(state, block, 0)) {
+                       printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
+                              " @%llu (%s/%llu/%d), old(gen=%llu,"
+                              " objectid=%llu, type=%d, offset=%llu),"
+                              " new(gen=%llu),"
+                              " which is referenced by most recent superblock"
+                              " (superblockgen=%llu)!\n",
+                              btrfsic_get_block_type(state, block),
+                              (unsigned long long)bytenr,
+                              dev_state->name,
+                              (unsigned long long)dev_bytenr,
+                              block->mirror_num,
+                              (unsigned long long)block->generation,
+                              (unsigned long long)
+                              le64_to_cpu(block->disk_key.objectid),
+                              block->disk_key.type,
+                              (unsigned long long)
+                              le64_to_cpu(block->disk_key.offset),
+                              (unsigned long long)
+                              le64_to_cpu(((struct btrfs_header *)
+                                           mapped_data)->generation),
+                              (unsigned long long)
+                              state->max_superblock_generation);
+                       btrfsic_dump_tree(state);
+               }
+
+               if (!block->is_iodone && !block->never_written) {
+                       printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
+                              " @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu,"
+                              " which is not yet iodone!\n",
+                              btrfsic_get_block_type(state, block),
+                              (unsigned long long)bytenr,
+                              dev_state->name,
+                              (unsigned long long)dev_bytenr,
+                              block->mirror_num,
+                              (unsigned long long)block->generation,
+                              (unsigned long long)
+                              le64_to_cpu(((struct btrfs_header *)
+                                           mapped_data)->generation));
+                       /* it would not be safe to go on */
+                       btrfsic_dump_tree(state);
+                       return;
+               }
+
+               /*
+                * Clear all references of this block. Do not free
+                * the block itself even if is not referenced anymore
+                * because it still carries valueable information
+                * like whether it was ever written and IO completed.
+                */
+               list_for_each_safe(elem_ref_to, tmp_ref_to,
+                                  &block->ref_to_list) {
+                       struct btrfsic_block_link *const l =
+                           list_entry(elem_ref_to,
+                                      struct btrfsic_block_link,
+                                      node_ref_to);
+
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               btrfsic_print_rem_link(state, l);
+                       l->ref_cnt--;
+                       if (0 == l->ref_cnt) {
+                               list_del(&l->node_ref_to);
+                               list_del(&l->node_ref_from);
+                               btrfsic_block_link_hashtable_remove(l);
+                               btrfsic_block_link_free(l);
+                       }
+               }
+
+               if (block->is_superblock)
+                       ret = btrfsic_map_superblock(state, bytenr, len,
+                                                    bdev, &block_ctx);
+               else
+                       ret = btrfsic_map_block(state, bytenr, len,
+                                               &block_ctx, 0);
+               if (ret) {
+                       printk(KERN_INFO
+                              "btrfsic: btrfsic_map_block(root @%llu)"
+                              " failed!\n", (unsigned long long)bytenr);
+                       return;
+               }
+               block_ctx.data = mapped_data;
+               /* the following is required in case of writes to mirrors,
+                * use the same that was used for the lookup */
+               block_ctx.dev = dev_state;
+               block_ctx.dev_bytenr = dev_bytenr;
+
+               if (is_metadata || state->include_extent_data) {
+                       block->never_written = 0;
+                       block->iodone_w_error = 0;
+                       if (NULL != bio) {
+                               block->is_iodone = 0;
+                               BUG_ON(NULL == bio_is_patched);
+                               if (!*bio_is_patched) {
+                                       block->orig_bio_bh_private =
+                                           bio->bi_private;
+                                       block->orig_bio_bh_end_io.bio =
+                                           bio->bi_end_io;
+                                       block->next_in_same_bio = NULL;
+                                       bio->bi_private = block;
+                                       bio->bi_end_io = btrfsic_bio_end_io;
+                                       *bio_is_patched = 1;
+                               } else {
+                                       struct btrfsic_block *chained_block =
+                                           (struct btrfsic_block *)
+                                           bio->bi_private;
+
+                                       BUG_ON(NULL == chained_block);
+                                       block->orig_bio_bh_private =
+                                           chained_block->orig_bio_bh_private;
+                                       block->orig_bio_bh_end_io.bio =
+                                           chained_block->orig_bio_bh_end_io.
+                                           bio;
+                                       block->next_in_same_bio = chained_block;
+                                       bio->bi_private = block;
+                               }
+                       } else if (NULL != bh) {
+                               block->is_iodone = 0;
+                               block->orig_bio_bh_private = bh->b_private;
+                               block->orig_bio_bh_end_io.bh = bh->b_end_io;
+                               block->next_in_same_bio = NULL;
+                               bh->b_private = block;
+                               bh->b_end_io = btrfsic_bh_end_io;
+                       } else {
+                               block->is_iodone = 1;
+                               block->orig_bio_bh_private = NULL;
+                               block->orig_bio_bh_end_io.bio = NULL;
+                               block->next_in_same_bio = NULL;
+                       }
+               }
+
+               block->flush_gen = dev_state->last_flush_gen + 1;
+               block->submit_bio_bh_rw = submit_bio_bh_rw;
+               if (is_metadata) {
+                       block->logical_bytenr = bytenr;
+                       block->is_metadata = 1;
+                       if (block->is_superblock) {
+                               ret = btrfsic_process_written_superblock(
+                                               state,
+                                               block,
+                                               (struct btrfs_super_block *)
+                                               mapped_data);
+                               if (state->print_mask &
+                                   BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) {
+                                       printk(KERN_INFO
+                                       "[after new superblock is written]:\n");
+                                       btrfsic_dump_tree_sub(state, block, 0);
+                               }
+                       } else {
+                               block->mirror_num = 0;  /* unknown */
+                               ret = btrfsic_process_metablock(
+                                               state,
+                                               block,
+                                               &block_ctx,
+                                               (struct btrfs_header *)
+                                               block_ctx.data,
+                                               0, 0);
+                       }
+                       if (ret)
+                               printk(KERN_INFO
+                                      "btrfsic: btrfsic_process_metablock"
+                                      "(root @%llu) failed!\n",
+                                      (unsigned long long)dev_bytenr);
+               } else {
+                       block->is_metadata = 0;
+                       block->mirror_num = 0;  /* unknown */
+                       block->generation = BTRFSIC_GENERATION_UNKNOWN;
+                       if (!state->include_extent_data
+                           && list_empty(&block->ref_from_list)) {
+                               /*
+                                * disk block is overwritten with extent
+                                * data (not meta data) and we are configured
+                                * to not include extent data: take the
+                                * chance and free the block's memory
+                                */
+                               btrfsic_block_hashtable_remove(block);
+                               list_del(&block->all_blocks_node);
+                               btrfsic_block_free(block);
+                       }
+               }
+               btrfsic_release_block_ctx(&block_ctx);
+       } else {
+               /* block has not been found in hash table */
+               u64 bytenr;
+
+               if (!is_metadata) {
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO "Written block (%s/%llu/?)"
+                                      " !found in hash table, D.\n",
+                                      dev_state->name,
+                                      (unsigned long long)dev_bytenr);
+                       if (!state->include_extent_data)
+                               return; /* ignore that written D block */
+
+                       /* this is getting ugly for the
+                        * include_extent_data case... */
+                       bytenr = 0;     /* unknown */
+                       block_ctx.start = bytenr;
+                       block_ctx.len = len;
+                       block_ctx.bh = NULL;
+               } else {
+                       bytenr = le64_to_cpu(((struct btrfs_header *)
+                                             mapped_data)->bytenr);
+                       btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state,
+                                                      dev_bytenr,
+                                                      mapped_data);
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO
+                                      "Written block @%llu (%s/%llu/?)"
+                                      " !found in hash table, M.\n",
+                                      (unsigned long long)bytenr,
+                                      dev_state->name,
+                                      (unsigned long long)dev_bytenr);
+
+                       ret = btrfsic_map_block(state, bytenr, len, &block_ctx,
+                                               0);
+                       if (ret) {
+                               printk(KERN_INFO
+                                      "btrfsic: btrfsic_map_block(root @%llu)"
+                                      " failed!\n",
+                                      (unsigned long long)dev_bytenr);
+                               return;
+                       }
+               }
+               block_ctx.data = mapped_data;
+               /* the following is required in case of writes to mirrors,
+                * use the same that was used for the lookup */
+               block_ctx.dev = dev_state;
+               block_ctx.dev_bytenr = dev_bytenr;
+
+               block = btrfsic_block_alloc();
+               if (NULL == block) {
+                       printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+                       btrfsic_release_block_ctx(&block_ctx);
+                       return;
+               }
+               block->dev_state = dev_state;
+               block->dev_bytenr = dev_bytenr;
+               block->logical_bytenr = bytenr;
+               block->is_metadata = is_metadata;
+               block->never_written = 0;
+               block->iodone_w_error = 0;
+               block->mirror_num = 0;  /* unknown */
+               block->flush_gen = dev_state->last_flush_gen + 1;
+               block->submit_bio_bh_rw = submit_bio_bh_rw;
+               if (NULL != bio) {
+                       block->is_iodone = 0;
+                       BUG_ON(NULL == bio_is_patched);
+                       if (!*bio_is_patched) {
+                               block->orig_bio_bh_private = bio->bi_private;
+                               block->orig_bio_bh_end_io.bio = bio->bi_end_io;
+                               block->next_in_same_bio = NULL;
+                               bio->bi_private = block;
+                               bio->bi_end_io = btrfsic_bio_end_io;
+                               *bio_is_patched = 1;
+                       } else {
+                               struct btrfsic_block *chained_block =
+                                   (struct btrfsic_block *)
+                                   bio->bi_private;
+
+                               BUG_ON(NULL == chained_block);
+                               block->orig_bio_bh_private =
+                                   chained_block->orig_bio_bh_private;
+                               block->orig_bio_bh_end_io.bio =
+                                   chained_block->orig_bio_bh_end_io.bio;
+                               block->next_in_same_bio = chained_block;
+                               bio->bi_private = block;
+                       }
+               } else if (NULL != bh) {
+                       block->is_iodone = 0;
+                       block->orig_bio_bh_private = bh->b_private;
+                       block->orig_bio_bh_end_io.bh = bh->b_end_io;
+                       block->next_in_same_bio = NULL;
+                       bh->b_private = block;
+                       bh->b_end_io = btrfsic_bh_end_io;
+               } else {
+                       block->is_iodone = 1;
+                       block->orig_bio_bh_private = NULL;
+                       block->orig_bio_bh_end_io.bio = NULL;
+                       block->next_in_same_bio = NULL;
+               }
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "New written %c-block @%llu (%s/%llu/%d)\n",
+                              is_metadata ? 'M' : 'D',
+                              (unsigned long long)block->logical_bytenr,
+                              block->dev_state->name,
+                              (unsigned long long)block->dev_bytenr,
+                              block->mirror_num);
+               list_add(&block->all_blocks_node, &state->all_blocks_list);
+               btrfsic_block_hashtable_add(block, &state->block_hashtable);
+
+               if (is_metadata) {
+                       ret = btrfsic_process_metablock(state, block,
+                                                       &block_ctx,
+                                                       (struct btrfs_header *)
+                                                       block_ctx.data, 0, 0);
+                       if (ret)
+                               printk(KERN_INFO
+                                      "btrfsic: process_metablock(root @%llu)"
+                                      " failed!\n",
+                                      (unsigned long long)dev_bytenr);
+               }
+               btrfsic_release_block_ctx(&block_ctx);
+       }
+}
+
+static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status)
+{
+       struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private;
+       int iodone_w_error;
+
+       /* mutex is not held! This is not save if IO is not yet completed
+        * on umount */
+       iodone_w_error = 0;
+       if (bio_error_status)
+               iodone_w_error = 1;
+
+       BUG_ON(NULL == block);
+       bp->bi_private = block->orig_bio_bh_private;
+       bp->bi_end_io = block->orig_bio_bh_end_io.bio;
+
+       do {
+               struct btrfsic_block *next_block;
+               struct btrfsic_dev_state *const dev_state = block->dev_state;
+
+               if ((dev_state->state->print_mask &
+                    BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
+                       printk(KERN_INFO
+                              "bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
+                              bio_error_status,
+                              btrfsic_get_block_type(dev_state->state, block),
+                              (unsigned long long)block->logical_bytenr,
+                              dev_state->name,
+                              (unsigned long long)block->dev_bytenr,
+                              block->mirror_num);
+               next_block = block->next_in_same_bio;
+               block->iodone_w_error = iodone_w_error;
+               if (block->submit_bio_bh_rw & REQ_FLUSH) {
+                       dev_state->last_flush_gen++;
+                       if ((dev_state->state->print_mask &
+                            BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
+                               printk(KERN_INFO
+                                      "bio_end_io() new %s flush_gen=%llu\n",
+                                      dev_state->name,
+                                      (unsigned long long)
+                                      dev_state->last_flush_gen);
+               }
+               if (block->submit_bio_bh_rw & REQ_FUA)
+                       block->flush_gen = 0; /* FUA completed means block is
+                                              * on disk */
+               block->is_iodone = 1; /* for FLUSH, this releases the block */
+               block = next_block;
+       } while (NULL != block);
+
+       bp->bi_end_io(bp, bio_error_status);
+}
+
+static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
+{
+       struct btrfsic_block *block = (struct btrfsic_block *)bh->b_private;
+       int iodone_w_error = !uptodate;
+       struct btrfsic_dev_state *dev_state;
+
+       BUG_ON(NULL == block);
+       dev_state = block->dev_state;
+       if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
+               printk(KERN_INFO
+                      "bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n",
+                      iodone_w_error,
+                      btrfsic_get_block_type(dev_state->state, block),
+                      (unsigned long long)block->logical_bytenr,
+                      block->dev_state->name,
+                      (unsigned long long)block->dev_bytenr,
+                      block->mirror_num);
+
+       block->iodone_w_error = iodone_w_error;
+       if (block->submit_bio_bh_rw & REQ_FLUSH) {
+               dev_state->last_flush_gen++;
+               if ((dev_state->state->print_mask &
+                    BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
+                       printk(KERN_INFO
+                              "bh_end_io() new %s flush_gen=%llu\n",
+                              dev_state->name,
+                              (unsigned long long)dev_state->last_flush_gen);
+       }
+       if (block->submit_bio_bh_rw & REQ_FUA)
+               block->flush_gen = 0; /* FUA completed means block is on disk */
+
+       bh->b_private = block->orig_bio_bh_private;
+       bh->b_end_io = block->orig_bio_bh_end_io.bh;
+       block->is_iodone = 1; /* for FLUSH, this releases the block */
+       bh->b_end_io(bh, uptodate);
+}
+
+static int btrfsic_process_written_superblock(
+               struct btrfsic_state *state,
+               struct btrfsic_block *const superblock,
+               struct btrfs_super_block *const super_hdr)
+{
+       int pass;
+
+       superblock->generation = btrfs_super_generation(super_hdr);
+       if (!(superblock->generation > state->max_superblock_generation ||
+             0 == state->max_superblock_generation)) {
+               if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
+                       printk(KERN_INFO
+                              "btrfsic: superblock @%llu (%s/%llu/%d)"
+                              " with old gen %llu <= %llu\n",
+                              (unsigned long long)superblock->logical_bytenr,
+                              superblock->dev_state->name,
+                              (unsigned long long)superblock->dev_bytenr,
+                              superblock->mirror_num,
+                              (unsigned long long)
+                              btrfs_super_generation(super_hdr),
+                              (unsigned long long)
+                              state->max_superblock_generation);
+       } else {
+               if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
+                       printk(KERN_INFO
+                              "btrfsic: got new superblock @%llu (%s/%llu/%d)"
+                              " with new gen %llu > %llu\n",
+                              (unsigned long long)superblock->logical_bytenr,
+                              superblock->dev_state->name,
+                              (unsigned long long)superblock->dev_bytenr,
+                              superblock->mirror_num,
+                              (unsigned long long)
+                              btrfs_super_generation(super_hdr),
+                              (unsigned long long)
+                              state->max_superblock_generation);
+
+               state->max_superblock_generation =
+                   btrfs_super_generation(super_hdr);
+               state->latest_superblock = superblock;
+       }
+
+       for (pass = 0; pass < 3; pass++) {
+               int ret;
+               u64 next_bytenr;
+               struct btrfsic_block *next_block;
+               struct btrfsic_block_data_ctx tmp_next_block_ctx;
+               struct btrfsic_block_link *l;
+               int num_copies;
+               int mirror_num;
+               const char *additional_string = NULL;
+               struct btrfs_disk_key tmp_disk_key;
+
+               tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY;
+               tmp_disk_key.offset = 0;
+
+               switch (pass) {
+               case 0:
+                       tmp_disk_key.objectid =
+                           cpu_to_le64(BTRFS_ROOT_TREE_OBJECTID);
+                       additional_string = "root ";
+                       next_bytenr = btrfs_super_root(super_hdr);
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
+                               printk(KERN_INFO "root@%llu\n",
+                                      (unsigned long long)next_bytenr);
+                       break;
+               case 1:
+                       tmp_disk_key.objectid =
+                           cpu_to_le64(BTRFS_CHUNK_TREE_OBJECTID);
+                       additional_string = "chunk ";
+                       next_bytenr = btrfs_super_chunk_root(super_hdr);
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
+                               printk(KERN_INFO "chunk@%llu\n",
+                                      (unsigned long long)next_bytenr);
+                       break;
+               case 2:
+                       tmp_disk_key.objectid =
+                           cpu_to_le64(BTRFS_TREE_LOG_OBJECTID);
+                       additional_string = "log ";
+                       next_bytenr = btrfs_super_log_root(super_hdr);
+                       if (0 == next_bytenr)
+                               continue;
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
+                               printk(KERN_INFO "log@%llu\n",
+                                      (unsigned long long)next_bytenr);
+                       break;
+               }
+
+               num_copies =
+                   btrfs_num_copies(&state->root->fs_info->mapping_tree,
+                                    next_bytenr, PAGE_SIZE);
+               if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
+                       printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+                              (unsigned long long)next_bytenr, num_copies);
+               for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
+                       int was_created;
+
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO
+                                      "btrfsic_process_written_superblock("
+                                      "mirror_num=%d)\n", mirror_num);
+                       ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+                                               &tmp_next_block_ctx,
+                                               mirror_num);
+                       if (ret) {
+                               printk(KERN_INFO
+                                      "btrfsic: btrfsic_map_block(@%llu,"
+                                      " mirror=%d) failed!\n",
+                                      (unsigned long long)next_bytenr,
+                                      mirror_num);
+                               return -1;
+                       }
+
+                       next_block = btrfsic_block_lookup_or_add(
+                                       state,
+                                       &tmp_next_block_ctx,
+                                       additional_string,
+                                       1, 0, 1,
+                                       mirror_num,
+                                       &was_created);
+                       if (NULL == next_block) {
+                               printk(KERN_INFO
+                                      "btrfsic: error, kmalloc failed!\n");
+                               btrfsic_release_block_ctx(&tmp_next_block_ctx);
+                               return -1;
+                       }
+
+                       next_block->disk_key = tmp_disk_key;
+                       if (was_created)
+                               next_block->generation =
+                                   BTRFSIC_GENERATION_UNKNOWN;
+                       l = btrfsic_block_link_lookup_or_add(
+                                       state,
+                                       &tmp_next_block_ctx,
+                                       next_block,
+                                       superblock,
+                                       BTRFSIC_GENERATION_UNKNOWN);
+                       btrfsic_release_block_ctx(&tmp_next_block_ctx);
+                       if (NULL == l)
+                               return -1;
+               }
+       }
+
+       if (-1 == btrfsic_check_all_ref_blocks(state, superblock, 0)) {
+               WARN_ON(1);
+               btrfsic_dump_tree(state);
+       }
+
+       return 0;
+}
+
+static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
+                                       struct btrfsic_block *const block,
+                                       int recursion_level)
+{
+       struct list_head *elem_ref_to;
+       int ret = 0;
+
+       if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
+               /*
+                * Note that this situation can happen and does not
+                * indicate an error in regular cases. It happens
+                * when disk blocks are freed and later reused.
+                * The check-integrity module is not aware of any
+                * block free operations, it just recognizes block
+                * write operations. Therefore it keeps the linkage
+                * information for a block until a block is
+                * rewritten. This can temporarily cause incorrect
+                * and even circular linkage informations. This
+                * causes no harm unless such blocks are referenced
+                * by the most recent super block.
+                */
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "btrfsic: abort cyclic linkage (case 1).\n");
+
+               return ret;
+       }
+
+       /*
+        * This algorithm is recursive because the amount of used stack
+        * space is very small and the max recursion depth is limited.
+        */
+       list_for_each(elem_ref_to, &block->ref_to_list) {
+               const struct btrfsic_block_link *const l =
+                   list_entry(elem_ref_to, struct btrfsic_block_link,
+                              node_ref_to);
+
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "rl=%d, %c @%llu (%s/%llu/%d)"
+                              " %u* refers to %c @%llu (%s/%llu/%d)\n",
+                              recursion_level,
+                              btrfsic_get_block_type(state, block),
+                              (unsigned long long)block->logical_bytenr,
+                              block->dev_state->name,
+                              (unsigned long long)block->dev_bytenr,
+                              block->mirror_num,
+                              l->ref_cnt,
+                              btrfsic_get_block_type(state, l->block_ref_to),
+                              (unsigned long long)
+                              l->block_ref_to->logical_bytenr,
+                              l->block_ref_to->dev_state->name,
+                              (unsigned long long)l->block_ref_to->dev_bytenr,
+                              l->block_ref_to->mirror_num);
+               if (l->block_ref_to->never_written) {
+                       printk(KERN_INFO "btrfs: attempt to write superblock"
+                              " which references block %c @%llu (%s/%llu/%d)"
+                              " which is never written!\n",
+                              btrfsic_get_block_type(state, l->block_ref_to),
+                              (unsigned long long)
+                              l->block_ref_to->logical_bytenr,
+                              l->block_ref_to->dev_state->name,
+                              (unsigned long long)l->block_ref_to->dev_bytenr,
+                              l->block_ref_to->mirror_num);
+                       ret = -1;
+               } else if (!l->block_ref_to->is_iodone) {
+                       printk(KERN_INFO "btrfs: attempt to write superblock"
+                              " which references block %c @%llu (%s/%llu/%d)"
+                              " which is not yet iodone!\n",
+                              btrfsic_get_block_type(state, l->block_ref_to),
+                              (unsigned long long)
+                              l->block_ref_to->logical_bytenr,
+                              l->block_ref_to->dev_state->name,
+                              (unsigned long long)l->block_ref_to->dev_bytenr,
+                              l->block_ref_to->mirror_num);
+                       ret = -1;
+               } else if (l->parent_generation !=
+                          l->block_ref_to->generation &&
+                          BTRFSIC_GENERATION_UNKNOWN !=
+                          l->parent_generation &&
+                          BTRFSIC_GENERATION_UNKNOWN !=
+                          l->block_ref_to->generation) {
+                       printk(KERN_INFO "btrfs: attempt to write superblock"
+                              " which references block %c @%llu (%s/%llu/%d)"
+                              " with generation %llu !="
+                              " parent generation %llu!\n",
+                              btrfsic_get_block_type(state, l->block_ref_to),
+                              (unsigned long long)
+                              l->block_ref_to->logical_bytenr,
+                              l->block_ref_to->dev_state->name,
+                              (unsigned long long)l->block_ref_to->dev_bytenr,
+                              l->block_ref_to->mirror_num,
+                              (unsigned long long)l->block_ref_to->generation,
+                              (unsigned long long)l->parent_generation);
+                       ret = -1;
+               } else if (l->block_ref_to->flush_gen >
+                          l->block_ref_to->dev_state->last_flush_gen) {
+                       printk(KERN_INFO "btrfs: attempt to write superblock"
+                              " which references block %c @%llu (%s/%llu/%d)"
+                              " which is not flushed out of disk's write cache"
+                              " (block flush_gen=%llu,"
+                              " dev->flush_gen=%llu)!\n",
+                              btrfsic_get_block_type(state, l->block_ref_to),
+                              (unsigned long long)
+                              l->block_ref_to->logical_bytenr,
+                              l->block_ref_to->dev_state->name,
+                              (unsigned long long)l->block_ref_to->dev_bytenr,
+                              l->block_ref_to->mirror_num,
+                              (unsigned long long)block->flush_gen,
+                              (unsigned long long)
+                              l->block_ref_to->dev_state->last_flush_gen);
+                       ret = -1;
+               } else if (-1 == btrfsic_check_all_ref_blocks(state,
+                                                             l->block_ref_to,
+                                                             recursion_level +
+                                                             1)) {
+                       ret = -1;
+               }
+       }
+
+       return ret;
+}
+
+static int btrfsic_is_block_ref_by_superblock(
+               const struct btrfsic_state *state,
+               const struct btrfsic_block *block,
+               int recursion_level)
+{
+       struct list_head *elem_ref_from;
+
+       if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
+               /* refer to comment at "abort cyclic linkage (case 1)" */
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "btrfsic: abort cyclic linkage (case 2).\n");
+
+               return 0;
+       }
+
+       /*
+        * This algorithm is recursive because the amount of used stack space
+        * is very small and the max recursion depth is limited.
+        */
+       list_for_each(elem_ref_from, &block->ref_from_list) {
+               const struct btrfsic_block_link *const l =
+                   list_entry(elem_ref_from, struct btrfsic_block_link,
+                              node_ref_from);
+
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "rl=%d, %c @%llu (%s/%llu/%d)"
+                              " is ref %u* from %c @%llu (%s/%llu/%d)\n",
+                              recursion_level,
+                              btrfsic_get_block_type(state, block),
+                              (unsigned long long)block->logical_bytenr,
+                              block->dev_state->name,
+                              (unsigned long long)block->dev_bytenr,
+                              block->mirror_num,
+                              l->ref_cnt,
+                              btrfsic_get_block_type(state, l->block_ref_from),
+                              (unsigned long long)
+                              l->block_ref_from->logical_bytenr,
+                              l->block_ref_from->dev_state->name,
+                              (unsigned long long)
+                              l->block_ref_from->dev_bytenr,
+                              l->block_ref_from->mirror_num);
+               if (l->block_ref_from->is_superblock &&
+                   state->latest_superblock->dev_bytenr ==
+                   l->block_ref_from->dev_bytenr &&
+                   state->latest_superblock->dev_state->bdev ==
+                   l->block_ref_from->dev_state->bdev)
+                       return 1;
+               else if (btrfsic_is_block_ref_by_superblock(state,
+                                                           l->block_ref_from,
+                                                           recursion_level +
+                                                           1))
+                       return 1;
+       }
+
+       return 0;
+}
+
+static void btrfsic_print_add_link(const struct btrfsic_state *state,
+                                  const struct btrfsic_block_link *l)
+{
+       printk(KERN_INFO
+              "Add %u* link from %c @%llu (%s/%llu/%d)"
+              " to %c @%llu (%s/%llu/%d).\n",
+              l->ref_cnt,
+              btrfsic_get_block_type(state, l->block_ref_from),
+              (unsigned long long)l->block_ref_from->logical_bytenr,
+              l->block_ref_from->dev_state->name,
+              (unsigned long long)l->block_ref_from->dev_bytenr,
+              l->block_ref_from->mirror_num,
+              btrfsic_get_block_type(state, l->block_ref_to),
+              (unsigned long long)l->block_ref_to->logical_bytenr,
+              l->block_ref_to->dev_state->name,
+              (unsigned long long)l->block_ref_to->dev_bytenr,
+              l->block_ref_to->mirror_num);
+}
+
+static void btrfsic_print_rem_link(const struct btrfsic_state *state,
+                                  const struct btrfsic_block_link *l)
+{
+       printk(KERN_INFO
+              "Rem %u* link from %c @%llu (%s/%llu/%d)"
+              " to %c @%llu (%s/%llu/%d).\n",
+              l->ref_cnt,
+              btrfsic_get_block_type(state, l->block_ref_from),
+              (unsigned long long)l->block_ref_from->logical_bytenr,
+              l->block_ref_from->dev_state->name,
+              (unsigned long long)l->block_ref_from->dev_bytenr,
+              l->block_ref_from->mirror_num,
+              btrfsic_get_block_type(state, l->block_ref_to),
+              (unsigned long long)l->block_ref_to->logical_bytenr,
+              l->block_ref_to->dev_state->name,
+              (unsigned long long)l->block_ref_to->dev_bytenr,
+              l->block_ref_to->mirror_num);
+}
+
+static char btrfsic_get_block_type(const struct btrfsic_state *state,
+                                  const struct btrfsic_block *block)
+{
+       if (block->is_superblock &&
+           state->latest_superblock->dev_bytenr == block->dev_bytenr &&
+           state->latest_superblock->dev_state->bdev == block->dev_state->bdev)
+               return 'S';
+       else if (block->is_superblock)
+               return 's';
+       else if (block->is_metadata)
+               return 'M';
+       else
+               return 'D';
+}
+
+static void btrfsic_dump_tree(const struct btrfsic_state *state)
+{
+       btrfsic_dump_tree_sub(state, state->latest_superblock, 0);
+}
+
+static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
+                                 const struct btrfsic_block *block,
+                                 int indent_level)
+{
+       struct list_head *elem_ref_to;
+       int indent_add;
+       static char buf[80];
+       int cursor_position;
+
+       /*
+        * Should better fill an on-stack buffer with a complete line and
+        * dump it at once when it is time to print a newline character.
+        */
+
+       /*
+        * This algorithm is recursive because the amount of used stack space
+        * is very small and the max recursion depth is limited.
+        */
+       indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)",
+                            btrfsic_get_block_type(state, block),
+                            (unsigned long long)block->logical_bytenr,
+                            block->dev_state->name,
+                            (unsigned long long)block->dev_bytenr,
+                            block->mirror_num);
+       if (indent_level + indent_add > BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) {
+               printk("[...]\n");
+               return;
+       }
+       printk(buf);
+       indent_level += indent_add;
+       if (list_empty(&block->ref_to_list)) {
+               printk("\n");
+               return;
+       }
+       if (block->mirror_num > 1 &&
+           !(state->print_mask & BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS)) {
+               printk(" [...]\n");
+               return;
+       }
+
+       cursor_position = indent_level;
+       list_for_each(elem_ref_to, &block->ref_to_list) {
+               const struct btrfsic_block_link *const l =
+                   list_entry(elem_ref_to, struct btrfsic_block_link,
+                              node_ref_to);
+
+               while (cursor_position < indent_level) {
+                       printk(" ");
+                       cursor_position++;
+               }
+               if (l->ref_cnt > 1)
+                       indent_add = sprintf(buf, " %d*--> ", l->ref_cnt);
+               else
+                       indent_add = sprintf(buf, " --> ");
+               if (indent_level + indent_add >
+                   BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) {
+                       printk("[...]\n");
+                       cursor_position = 0;
+                       continue;
+               }
+
+               printk(buf);
+
+               btrfsic_dump_tree_sub(state, l->block_ref_to,
+                                     indent_level + indent_add);
+               cursor_position = 0;
+       }
+}
+
+static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
+               struct btrfsic_state *state,
+               struct btrfsic_block_data_ctx *next_block_ctx,
+               struct btrfsic_block *next_block,
+               struct btrfsic_block *from_block,
+               u64 parent_generation)
+{
+       struct btrfsic_block_link *l;
+
+       l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev,
+                                               next_block_ctx->dev_bytenr,
+                                               from_block->dev_state->bdev,
+                                               from_block->dev_bytenr,
+                                               &state->block_link_hashtable);
+       if (NULL == l) {
+               l = btrfsic_block_link_alloc();
+               if (NULL == l) {
+                       printk(KERN_INFO
+                              "btrfsic: error, kmalloc" " failed!\n");
+                       return NULL;
+               }
+
+               l->block_ref_to = next_block;
+               l->block_ref_from = from_block;
+               l->ref_cnt = 1;
+               l->parent_generation = parent_generation;
+
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       btrfsic_print_add_link(state, l);
+
+               list_add(&l->node_ref_to, &from_block->ref_to_list);
+               list_add(&l->node_ref_from, &next_block->ref_from_list);
+
+               btrfsic_block_link_hashtable_add(l,
+                                                &state->block_link_hashtable);
+       } else {
+               l->ref_cnt++;
+               l->parent_generation = parent_generation;
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       btrfsic_print_add_link(state, l);
+       }
+
+       return l;
+}
+
+static struct btrfsic_block *btrfsic_block_lookup_or_add(
+               struct btrfsic_state *state,
+               struct btrfsic_block_data_ctx *block_ctx,
+               const char *additional_string,
+               int is_metadata,
+               int is_iodone,
+               int never_written,
+               int mirror_num,
+               int *was_created)
+{
+       struct btrfsic_block *block;
+
+       block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev,
+                                              block_ctx->dev_bytenr,
+                                              &state->block_hashtable);
+       if (NULL == block) {
+               struct btrfsic_dev_state *dev_state;
+
+               block = btrfsic_block_alloc();
+               if (NULL == block) {
+                       printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+                       return NULL;
+               }
+               dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev);
+               if (NULL == dev_state) {
+                       printk(KERN_INFO
+                              "btrfsic: error, lookup dev_state failed!\n");
+                       btrfsic_block_free(block);
+                       return NULL;
+               }
+               block->dev_state = dev_state;
+               block->dev_bytenr = block_ctx->dev_bytenr;
+               block->logical_bytenr = block_ctx->start;
+               block->is_metadata = is_metadata;
+               block->is_iodone = is_iodone;
+               block->never_written = never_written;
+               block->mirror_num = mirror_num;
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "New %s%c-block @%llu (%s/%llu/%d)\n",
+                              additional_string,
+                              btrfsic_get_block_type(state, block),
+                              (unsigned long long)block->logical_bytenr,
+                              dev_state->name,
+                              (unsigned long long)block->dev_bytenr,
+                              mirror_num);
+               list_add(&block->all_blocks_node, &state->all_blocks_list);
+               btrfsic_block_hashtable_add(block, &state->block_hashtable);
+               if (NULL != was_created)
+                       *was_created = 1;
+       } else {
+               if (NULL != was_created)
+                       *was_created = 0;
+       }
+
+       return block;
+}
+
+static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
+                                          u64 bytenr,
+                                          struct btrfsic_dev_state *dev_state,
+                                          u64 dev_bytenr, char *data)
+{
+       int num_copies;
+       int mirror_num;
+       int ret;
+       struct btrfsic_block_data_ctx block_ctx;
+       int match = 0;
+
+       num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree,
+                                     bytenr, PAGE_SIZE);
+
+       for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
+               ret = btrfsic_map_block(state, bytenr, PAGE_SIZE,
+                                       &block_ctx, mirror_num);
+               if (ret) {
+                       printk(KERN_INFO "btrfsic:"
+                              " btrfsic_map_block(logical @%llu,"
+                              " mirror %d) failed!\n",
+                              (unsigned long long)bytenr, mirror_num);
+                       continue;
+               }
+
+               if (dev_state->bdev == block_ctx.dev->bdev &&
+                   dev_bytenr == block_ctx.dev_bytenr) {
+                       match++;
+                       btrfsic_release_block_ctx(&block_ctx);
+                       break;
+               }
+               btrfsic_release_block_ctx(&block_ctx);
+       }
+
+       if (!match) {
+               printk(KERN_INFO "btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio,"
+                      " buffer->log_bytenr=%llu, submit_bio(bdev=%s,"
+                      " phys_bytenr=%llu)!\n",
+                      (unsigned long long)bytenr, dev_state->name,
+                      (unsigned long long)dev_bytenr);
+               for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
+                       ret = btrfsic_map_block(state, bytenr, PAGE_SIZE,
+                                               &block_ctx, mirror_num);
+                       if (ret)
+                               continue;
+
+                       printk(KERN_INFO "Read logical bytenr @%llu maps to"
+                              " (%s/%llu/%d)\n",
+                              (unsigned long long)bytenr,
+                              block_ctx.dev->name,
+                              (unsigned long long)block_ctx.dev_bytenr,
+                              mirror_num);
+               }
+               WARN_ON(1);
+       }
+}
+
+static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
+               struct block_device *bdev)
+{
+       struct btrfsic_dev_state *ds;
+
+       ds = btrfsic_dev_state_hashtable_lookup(bdev,
+                                               &btrfsic_dev_state_hashtable);
+       return ds;
+}
+
+int btrfsic_submit_bh(int rw, struct buffer_head *bh)
+{
+       struct btrfsic_dev_state *dev_state;
+
+       if (!btrfsic_is_initialized)
+               return submit_bh(rw, bh);
+
+       mutex_lock(&btrfsic_mutex);
+       /* since btrfsic_submit_bh() might also be called before
+        * btrfsic_mount(), this might return NULL */
+       dev_state = btrfsic_dev_state_lookup(bh->b_bdev);
+
+       /* Only called to write the superblock (incl. FLUSH/FUA) */
+       if (NULL != dev_state &&
+           (rw & WRITE) && bh->b_size > 0) {
+               u64 dev_bytenr;
+
+               dev_bytenr = 4096 * bh->b_blocknr;
+               if (dev_state->state->print_mask &
+                   BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
+                       printk(KERN_INFO
+                              "submit_bh(rw=0x%x, blocknr=%lu (bytenr %llu),"
+                              " size=%lu, data=%p, bdev=%p)\n",
+                              rw, bh->b_blocknr,
+                              (unsigned long long)dev_bytenr, bh->b_size,
+                              bh->b_data, bh->b_bdev);
+               btrfsic_process_written_block(dev_state, dev_bytenr,
+                                             bh->b_data, bh->b_size, NULL,
+                                             NULL, bh, rw);
+       } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
+               if (dev_state->state->print_mask &
+                   BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
+                       printk(KERN_INFO
+                              "submit_bh(rw=0x%x) FLUSH, bdev=%p)\n",
+                              rw, bh->b_bdev);
+               if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
+                       if ((dev_state->state->print_mask &
+                            (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
+                             BTRFSIC_PRINT_MASK_VERBOSE)))
+                               printk(KERN_INFO
+                                      "btrfsic_submit_bh(%s) with FLUSH"
+                                      " but dummy block already in use"
+                                      " (ignored)!\n",
+                                      dev_state->name);
+               } else {
+                       struct btrfsic_block *const block =
+                               &dev_state->dummy_block_for_bio_bh_flush;
+
+                       block->is_iodone = 0;
+                       block->never_written = 0;
+                       block->iodone_w_error = 0;
+                       block->flush_gen = dev_state->last_flush_gen + 1;
+                       block->submit_bio_bh_rw = rw;
+                       block->orig_bio_bh_private = bh->b_private;
+                       block->orig_bio_bh_end_io.bh = bh->b_end_io;
+                       block->next_in_same_bio = NULL;
+                       bh->b_private = block;
+                       bh->b_end_io = btrfsic_bh_end_io;
+               }
+       }
+       mutex_unlock(&btrfsic_mutex);
+       return submit_bh(rw, bh);
+}
+
+void btrfsic_submit_bio(int rw, struct bio *bio)
+{
+       struct btrfsic_dev_state *dev_state;
+
+       if (!btrfsic_is_initialized) {
+               submit_bio(rw, bio);
+               return;
+       }
+
+       mutex_lock(&btrfsic_mutex);
+       /* since btrfsic_submit_bio() is also called before
+        * btrfsic_mount(), this might return NULL */
+       dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
+       if (NULL != dev_state &&
+           (rw & WRITE) && NULL != bio->bi_io_vec) {
+               unsigned int i;
+               u64 dev_bytenr;
+               int bio_is_patched;
+
+               dev_bytenr = 512 * bio->bi_sector;
+               bio_is_patched = 0;
+               if (dev_state->state->print_mask &
+                   BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
+                       printk(KERN_INFO
+                              "submit_bio(rw=0x%x, bi_vcnt=%u,"
+                              " bi_sector=%lu (bytenr %llu), bi_bdev=%p)\n",
+                              rw, bio->bi_vcnt, bio->bi_sector,
+                              (unsigned long long)dev_bytenr,
+                              bio->bi_bdev);
+
+               for (i = 0; i < bio->bi_vcnt; i++) {
+                       u8 *mapped_data;
+
+                       mapped_data = kmap(bio->bi_io_vec[i].bv_page);
+                       if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
+                            BTRFSIC_PRINT_MASK_VERBOSE) ==
+                           (dev_state->state->print_mask &
+                            (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
+                             BTRFSIC_PRINT_MASK_VERBOSE)))
+                               printk(KERN_INFO
+                                      "#%u: page=%p, mapped=%p, len=%u,"
+                                      " offset=%u\n",
+                                      i, bio->bi_io_vec[i].bv_page,
+                                      mapped_data,
+                                      bio->bi_io_vec[i].bv_len,
+                                      bio->bi_io_vec[i].bv_offset);
+                       btrfsic_process_written_block(dev_state, dev_bytenr,
+                                                     mapped_data,
+                                                     bio->bi_io_vec[i].bv_len,
+                                                     bio, &bio_is_patched,
+                                                     NULL, rw);
+                       kunmap(bio->bi_io_vec[i].bv_page);
+                       dev_bytenr += bio->bi_io_vec[i].bv_len;
+               }
+       } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
+               if (dev_state->state->print_mask &
+                   BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
+                       printk(KERN_INFO
+                              "submit_bio(rw=0x%x) FLUSH, bdev=%p)\n",
+                              rw, bio->bi_bdev);
+               if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
+                       if ((dev_state->state->print_mask &
+                            (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
+                             BTRFSIC_PRINT_MASK_VERBOSE)))
+                               printk(KERN_INFO
+                                      "btrfsic_submit_bio(%s) with FLUSH"
+                                      " but dummy block already in use"
+                                      " (ignored)!\n",
+                                      dev_state->name);
+               } else {
+                       struct btrfsic_block *const block =
+                               &dev_state->dummy_block_for_bio_bh_flush;
+
+                       block->is_iodone = 0;
+                       block->never_written = 0;
+                       block->iodone_w_error = 0;
+                       block->flush_gen = dev_state->last_flush_gen + 1;
+                       block->submit_bio_bh_rw = rw;
+                       block->orig_bio_bh_private = bio->bi_private;
+                       block->orig_bio_bh_end_io.bio = bio->bi_end_io;
+                       block->next_in_same_bio = NULL;
+                       bio->bi_private = block;
+                       bio->bi_end_io = btrfsic_bio_end_io;
+               }
+       }
+       mutex_unlock(&btrfsic_mutex);
+
+       submit_bio(rw, bio);
+}
+
+int btrfsic_mount(struct btrfs_root *root,
+                 struct btrfs_fs_devices *fs_devices,
+                 int including_extent_data, u32 print_mask)
+{
+       int ret;
+       struct btrfsic_state *state;
+       struct list_head *dev_head = &fs_devices->devices;
+       struct btrfs_device *device;
+
+       state = kzalloc(sizeof(*state), GFP_NOFS);
+       if (NULL == state) {
+               printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n");
+               return -1;
+       }
+
+       if (!btrfsic_is_initialized) {
+               mutex_init(&btrfsic_mutex);
+               btrfsic_dev_state_hashtable_init(&btrfsic_dev_state_hashtable);
+               btrfsic_is_initialized = 1;
+       }
+       mutex_lock(&btrfsic_mutex);
+       state->root = root;
+       state->print_mask = print_mask;
+       state->include_extent_data = including_extent_data;
+       state->csum_size = 0;
+       INIT_LIST_HEAD(&state->all_blocks_list);
+       btrfsic_block_hashtable_init(&state->block_hashtable);
+       btrfsic_block_link_hashtable_init(&state->block_link_hashtable);
+       state->max_superblock_generation = 0;
+       state->latest_superblock = NULL;
+
+       list_for_each_entry(device, dev_head, dev_list) {
+               struct btrfsic_dev_state *ds;
+               char *p;
+
+               if (!device->bdev || !device->name)
+                       continue;
+
+               ds = btrfsic_dev_state_alloc();
+               if (NULL == ds) {
+                       printk(KERN_INFO
+                              "btrfs check-integrity: kmalloc() failed!\n");
+                       mutex_unlock(&btrfsic_mutex);
+                       return -1;
+               }
+               ds->bdev = device->bdev;
+               ds->state = state;
+               bdevname(ds->bdev, ds->name);
+               ds->name[BDEVNAME_SIZE - 1] = '\0';
+               for (p = ds->name; *p != '\0'; p++);
+               while (p > ds->name && *p != '/')
+                       p--;
+               if (*p == '/')
+                       p++;
+               strlcpy(ds->name, p, sizeof(ds->name));
+               btrfsic_dev_state_hashtable_add(ds,
+                                               &btrfsic_dev_state_hashtable);
+       }
+
+       ret = btrfsic_process_superblock(state, fs_devices);
+       if (0 != ret) {
+               mutex_unlock(&btrfsic_mutex);
+               btrfsic_unmount(root, fs_devices);
+               return ret;
+       }
+
+       if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_DATABASE)
+               btrfsic_dump_database(state);
+       if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_TREE)
+               btrfsic_dump_tree(state);
+
+       mutex_unlock(&btrfsic_mutex);
+       return 0;
+}
+
+void btrfsic_unmount(struct btrfs_root *root,
+                    struct btrfs_fs_devices *fs_devices)
+{
+       struct list_head *elem_all;
+       struct list_head *tmp_all;
+       struct btrfsic_state *state;
+       struct list_head *dev_head = &fs_devices->devices;
+       struct btrfs_device *device;
+
+       if (!btrfsic_is_initialized)
+               return;
+
+       mutex_lock(&btrfsic_mutex);
+
+       state = NULL;
+       list_for_each_entry(device, dev_head, dev_list) {
+               struct btrfsic_dev_state *ds;
+
+               if (!device->bdev || !device->name)
+                       continue;
+
+               ds = btrfsic_dev_state_hashtable_lookup(
+                               device->bdev,
+                               &btrfsic_dev_state_hashtable);
+               if (NULL != ds) {
+                       state = ds->state;
+                       btrfsic_dev_state_hashtable_remove(ds);
+                       btrfsic_dev_state_free(ds);
+               }
+       }
+
+       if (NULL == state) {
+               printk(KERN_INFO
+                      "btrfsic: error, cannot find state information"
+                      " on umount!\n");
+               mutex_unlock(&btrfsic_mutex);
+               return;
+       }
+
+       /*
+        * Don't care about keeping the lists' state up to date,
+        * just free all memory that was allocated dynamically.
+        * Free the blocks and the block_links.
+        */
+       list_for_each_safe(elem_all, tmp_all, &state->all_blocks_list) {
+               struct btrfsic_block *const b_all =
+                   list_entry(elem_all, struct btrfsic_block,
+                              all_blocks_node);
+               struct list_head *elem_ref_to;
+               struct list_head *tmp_ref_to;
+
+               list_for_each_safe(elem_ref_to, tmp_ref_to,
+                                  &b_all->ref_to_list) {
+                       struct btrfsic_block_link *const l =
+                           list_entry(elem_ref_to,
+                                      struct btrfsic_block_link,
+                                      node_ref_to);
+
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               btrfsic_print_rem_link(state, l);
+
+                       l->ref_cnt--;
+                       if (0 == l->ref_cnt)
+                               btrfsic_block_link_free(l);
+               }
+
+               if (b_all->is_iodone)
+                       btrfsic_block_free(b_all);
+               else
+                       printk(KERN_INFO "btrfs: attempt to free %c-block"
+                              " @%llu (%s/%llu/%d) on umount which is"
+                              " not yet iodone!\n",
+                              btrfsic_get_block_type(state, b_all),
+                              (unsigned long long)b_all->logical_bytenr,
+                              b_all->dev_state->name,
+                              (unsigned long long)b_all->dev_bytenr,
+                              b_all->mirror_num);
+       }
+
+       mutex_unlock(&btrfsic_mutex);
+
+       kfree(state);
+}
diff --git a/fs/btrfs/check-integrity.h b/fs/btrfs/check-integrity.h
new file mode 100644 (file)
index 0000000..8b59175
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) STRATO AG 2011.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#if !defined(__BTRFS_CHECK_INTEGRITY__)
+#define __BTRFS_CHECK_INTEGRITY__
+
+#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+int btrfsic_submit_bh(int rw, struct buffer_head *bh);
+void btrfsic_submit_bio(int rw, struct bio *bio);
+#else
+#define btrfsic_submit_bh submit_bh
+#define btrfsic_submit_bio submit_bio
+#endif
+
+int btrfsic_mount(struct btrfs_root *root,
+                 struct btrfs_fs_devices *fs_devices,
+                 int including_extent_data, u32 print_mask);
+void btrfsic_unmount(struct btrfs_root *root,
+                    struct btrfs_fs_devices *fs_devices);
+
+#endif
index dede441bdeee2678225187bece170710abe1a9b4..0639a555e16ed1975702ed5509dc9bc1c4dbf490 100644 (file)
@@ -240,7 +240,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
 
        cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
                                     new_root_objectid, &disk_key, level,
-                                    buf->start, 0);
+                                    buf->start, 0, 1);
        if (IS_ERR(cow))
                return PTR_ERR(cow);
 
@@ -261,9 +261,9 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
 
        WARN_ON(btrfs_header_generation(buf) > trans->transid);
        if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
-               ret = btrfs_inc_ref(trans, root, cow, 1);
+               ret = btrfs_inc_ref(trans, root, cow, 1, 1);
        else
-               ret = btrfs_inc_ref(trans, root, cow, 0);
+               ret = btrfs_inc_ref(trans, root, cow, 0, 1);
 
        if (ret)
                return ret;
@@ -350,14 +350,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                if ((owner == root->root_key.objectid ||
                     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
                    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
-                       ret = btrfs_inc_ref(trans, root, buf, 1);
+                       ret = btrfs_inc_ref(trans, root, buf, 1, 1);
                        BUG_ON(ret);
 
                        if (root->root_key.objectid ==
                            BTRFS_TREE_RELOC_OBJECTID) {
-                               ret = btrfs_dec_ref(trans, root, buf, 0);
+                               ret = btrfs_dec_ref(trans, root, buf, 0, 1);
                                BUG_ON(ret);
-                               ret = btrfs_inc_ref(trans, root, cow, 1);
+                               ret = btrfs_inc_ref(trans, root, cow, 1, 1);
                                BUG_ON(ret);
                        }
                        new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
@@ -365,9 +365,9 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
 
                        if (root->root_key.objectid ==
                            BTRFS_TREE_RELOC_OBJECTID)
-                               ret = btrfs_inc_ref(trans, root, cow, 1);
+                               ret = btrfs_inc_ref(trans, root, cow, 1, 1);
                        else
-                               ret = btrfs_inc_ref(trans, root, cow, 0);
+                               ret = btrfs_inc_ref(trans, root, cow, 0, 1);
                        BUG_ON(ret);
                }
                if (new_flags != 0) {
@@ -381,11 +381,11 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
                        if (root->root_key.objectid ==
                            BTRFS_TREE_RELOC_OBJECTID)
-                               ret = btrfs_inc_ref(trans, root, cow, 1);
+                               ret = btrfs_inc_ref(trans, root, cow, 1, 1);
                        else
-                               ret = btrfs_inc_ref(trans, root, cow, 0);
+                               ret = btrfs_inc_ref(trans, root, cow, 0, 1);
                        BUG_ON(ret);
-                       ret = btrfs_dec_ref(trans, root, buf, 1);
+                       ret = btrfs_dec_ref(trans, root, buf, 1, 1);
                        BUG_ON(ret);
                }
                clean_tree_block(trans, root, buf);
@@ -446,7 +446,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
 
        cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
                                     root->root_key.objectid, &disk_key,
-                                    level, search_start, empty_size);
+                                    level, search_start, empty_size, 1);
        if (IS_ERR(cow))
                return PTR_ERR(cow);
 
@@ -484,7 +484,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                rcu_assign_pointer(root->node, cow);
 
                btrfs_free_tree_block(trans, root, buf, parent_start,
-                                     last_ref);
+                                     last_ref, 1);
                free_extent_buffer(buf);
                add_root_to_dirty_list(root);
        } else {
@@ -500,7 +500,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                                              trans->transid);
                btrfs_mark_buffer_dirty(parent);
                btrfs_free_tree_block(trans, root, buf, parent_start,
-                                     last_ref);
+                                     last_ref, 1);
        }
        if (unlock_orig)
                btrfs_tree_unlock(buf);
@@ -957,7 +957,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                free_extent_buffer(mid);
 
                root_sub_used(root, mid->len);
-               btrfs_free_tree_block(trans, root, mid, 0, 1);
+               btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
                /* once for the root ptr */
                free_extent_buffer(mid);
                return 0;
@@ -1015,7 +1015,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                        if (wret)
                                ret = wret;
                        root_sub_used(root, right->len);
-                       btrfs_free_tree_block(trans, root, right, 0, 1);
+                       btrfs_free_tree_block(trans, root, right, 0, 1, 0);
                        free_extent_buffer(right);
                        right = NULL;
                } else {
@@ -1055,7 +1055,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                if (wret)
                        ret = wret;
                root_sub_used(root, mid->len);
-               btrfs_free_tree_block(trans, root, mid, 0, 1);
+               btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
                free_extent_buffer(mid);
                mid = NULL;
        } else {
@@ -2089,7 +2089,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
 
        c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
                                   root->root_key.objectid, &lower_key,
-                                  level, root->node->start, 0);
+                                  level, root->node->start, 0, 0);
        if (IS_ERR(c))
                return PTR_ERR(c);
 
@@ -2216,7 +2216,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
 
        split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
                                        root->root_key.objectid,
-                                       &disk_key, level, c->start, 0);
+                                       &disk_key, level, c->start, 0, 0);
        if (IS_ERR(split))
                return PTR_ERR(split);
 
@@ -2970,7 +2970,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
 
        right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
                                        root->root_key.objectid,
-                                       &disk_key, 0, l->start, 0);
+                                       &disk_key, 0, l->start, 0, 0);
        if (IS_ERR(right))
                return PTR_ERR(right);
 
@@ -3781,7 +3781,7 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
 
        root_sub_used(root, leaf->len);
 
-       btrfs_free_tree_block(trans, root, leaf, 0, 1);
+       btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
        return 0;
 }
 /*
index 67385033323d6e49817398a1df9b1596df07e839..27ebe61d3cccd0233518d743216dce4b7b8c933f 100644 (file)
@@ -86,6 +86,9 @@ struct btrfs_ordered_sum;
 /* holds checksums of all the data extents */
 #define BTRFS_CSUM_TREE_OBJECTID 7ULL
 
+/* for storing balance parameters in the root tree */
+#define BTRFS_BALANCE_OBJECTID -4ULL
+
 /* orhpan objectid for tracking unlinked/truncated files */
 #define BTRFS_ORPHAN_OBJECTID -5ULL
 
@@ -692,6 +695,54 @@ struct btrfs_root_ref {
        __le16 name_len;
 } __attribute__ ((__packed__));
 
+struct btrfs_disk_balance_args {
+       /*
+        * profiles to operate on, single is denoted by
+        * BTRFS_AVAIL_ALLOC_BIT_SINGLE
+        */
+       __le64 profiles;
+
+       /* usage filter */
+       __le64 usage;
+
+       /* devid filter */
+       __le64 devid;
+
+       /* devid subset filter [pstart..pend) */
+       __le64 pstart;
+       __le64 pend;
+
+       /* btrfs virtual address space subset filter [vstart..vend) */
+       __le64 vstart;
+       __le64 vend;
+
+       /*
+        * profile to convert to, single is denoted by
+        * BTRFS_AVAIL_ALLOC_BIT_SINGLE
+        */
+       __le64 target;
+
+       /* BTRFS_BALANCE_ARGS_* */
+       __le64 flags;
+
+       __le64 unused[8];
+} __attribute__ ((__packed__));
+
+/*
+ * store balance parameters to disk so that balance can be properly
+ * resumed after crash or unmount
+ */
+struct btrfs_balance_item {
+       /* BTRFS_BALANCE_* */
+       __le64 flags;
+
+       struct btrfs_disk_balance_args data;
+       struct btrfs_disk_balance_args meta;
+       struct btrfs_disk_balance_args sys;
+
+       __le64 unused[4];
+} __attribute__ ((__packed__));
+
 #define BTRFS_FILE_EXTENT_INLINE 0
 #define BTRFS_FILE_EXTENT_REG 1
 #define BTRFS_FILE_EXTENT_PREALLOC 2
@@ -751,14 +802,32 @@ struct btrfs_csum_item {
 } __attribute__ ((__packed__));
 
 /* different types of block groups (and chunks) */
-#define BTRFS_BLOCK_GROUP_DATA     (1 << 0)
-#define BTRFS_BLOCK_GROUP_SYSTEM   (1 << 1)
-#define BTRFS_BLOCK_GROUP_METADATA (1 << 2)
-#define BTRFS_BLOCK_GROUP_RAID0    (1 << 3)
-#define BTRFS_BLOCK_GROUP_RAID1    (1 << 4)
-#define BTRFS_BLOCK_GROUP_DUP     (1 << 5)
-#define BTRFS_BLOCK_GROUP_RAID10   (1 << 6)
-#define BTRFS_NR_RAID_TYPES       5
+#define BTRFS_BLOCK_GROUP_DATA         (1ULL << 0)
+#define BTRFS_BLOCK_GROUP_SYSTEM       (1ULL << 1)
+#define BTRFS_BLOCK_GROUP_METADATA     (1ULL << 2)
+#define BTRFS_BLOCK_GROUP_RAID0                (1ULL << 3)
+#define BTRFS_BLOCK_GROUP_RAID1                (1ULL << 4)
+#define BTRFS_BLOCK_GROUP_DUP          (1ULL << 5)
+#define BTRFS_BLOCK_GROUP_RAID10       (1ULL << 6)
+#define BTRFS_BLOCK_GROUP_RESERVED     BTRFS_AVAIL_ALLOC_BIT_SINGLE
+#define BTRFS_NR_RAID_TYPES            5
+
+#define BTRFS_BLOCK_GROUP_TYPE_MASK    (BTRFS_BLOCK_GROUP_DATA |    \
+                                        BTRFS_BLOCK_GROUP_SYSTEM |  \
+                                        BTRFS_BLOCK_GROUP_METADATA)
+
+#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 |   \
+                                        BTRFS_BLOCK_GROUP_RAID1 |   \
+                                        BTRFS_BLOCK_GROUP_DUP |     \
+                                        BTRFS_BLOCK_GROUP_RAID10)
+/*
+ * We need a bit for restriper to be able to tell when chunks of type
+ * SINGLE are available.  This "extended" profile format is used in
+ * fs_info->avail_*_alloc_bits (in-memory) and balance item fields
+ * (on-disk).  The corresponding on-disk bit in chunk.type is reserved
+ * to avoid remappings between two formats in future.
+ */
+#define BTRFS_AVAIL_ALLOC_BIT_SINGLE   (1ULL << 48)
 
 struct btrfs_block_group_item {
        __le64 used;
@@ -916,6 +985,7 @@ struct btrfs_block_group_cache {
 struct reloc_control;
 struct btrfs_device;
 struct btrfs_fs_devices;
+struct btrfs_balance_control;
 struct btrfs_delayed_root;
 struct btrfs_fs_info {
        u8 fsid[BTRFS_FSID_SIZE];
@@ -971,7 +1041,7 @@ struct btrfs_fs_info {
         * is required instead of the faster short fsync log commits
         */
        u64 last_trans_log_full_commit;
-       unsigned long mount_opt:20;
+       unsigned long mount_opt:21;
        unsigned long compress_type:4;
        u64 max_inline;
        u64 alloc_start;
@@ -1132,12 +1202,23 @@ struct btrfs_fs_info {
        spinlock_t ref_cache_lock;
        u64 total_ref_cache_size;
 
+       /*
+        * these three are in extended format (availability of single
+        * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
+        * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits)
+        */
        u64 avail_data_alloc_bits;
        u64 avail_metadata_alloc_bits;
        u64 avail_system_alloc_bits;
-       u64 data_alloc_profile;
-       u64 metadata_alloc_profile;
-       u64 system_alloc_profile;
+
+       /* restriper state */
+       spinlock_t balance_lock;
+       struct mutex balance_mutex;
+       atomic_t balance_running;
+       atomic_t balance_pause_req;
+       atomic_t balance_cancel_req;
+       struct btrfs_balance_control *balance_ctl;
+       wait_queue_head_t balance_wait_q;
 
        unsigned data_chunk_allocations;
        unsigned metadata_ratio;
@@ -1155,6 +1236,10 @@ struct btrfs_fs_info {
        int scrub_workers_refcnt;
        struct btrfs_workers scrub_workers;
 
+#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+       u32 check_integrity_print_mask;
+#endif
+
        /* filesystem state */
        u64 fs_state;
 
@@ -1383,6 +1468,8 @@ struct btrfs_ioctl_defrag_range_args {
 #define BTRFS_DEV_ITEM_KEY     216
 #define BTRFS_CHUNK_ITEM_KEY   228
 
+#define BTRFS_BALANCE_ITEM_KEY 248
+
 /*
  * string items are for debugging.  They just store a short string of
  * data in the FS
@@ -1413,6 +1500,9 @@ struct btrfs_ioctl_defrag_range_args {
 #define BTRFS_MOUNT_AUTO_DEFRAG                (1 << 16)
 #define BTRFS_MOUNT_INODE_MAP_CACHE    (1 << 17)
 #define BTRFS_MOUNT_RECOVERY           (1 << 18)
+#define BTRFS_MOUNT_SKIP_BALANCE       (1 << 19)
+#define BTRFS_MOUNT_CHECK_INTEGRITY    (1 << 20)
+#define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21)
 
 #define btrfs_clear_opt(o, opt)                ((o) &= ~BTRFS_MOUNT_##opt)
 #define btrfs_set_opt(o, opt)          ((o) |= BTRFS_MOUNT_##opt)
@@ -2077,8 +2167,86 @@ BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup,
 BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup,
                   num_devices, 64);
 
-/* struct btrfs_super_block */
+/* struct btrfs_balance_item */
+BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64);
 
+static inline void btrfs_balance_data(struct extent_buffer *eb,
+                                     struct btrfs_balance_item *bi,
+                                     struct btrfs_disk_balance_args *ba)
+{
+       read_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
+}
+
+static inline void btrfs_set_balance_data(struct extent_buffer *eb,
+                                         struct btrfs_balance_item *bi,
+                                         struct btrfs_disk_balance_args *ba)
+{
+       write_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
+}
+
+static inline void btrfs_balance_meta(struct extent_buffer *eb,
+                                     struct btrfs_balance_item *bi,
+                                     struct btrfs_disk_balance_args *ba)
+{
+       read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
+}
+
+static inline void btrfs_set_balance_meta(struct extent_buffer *eb,
+                                         struct btrfs_balance_item *bi,
+                                         struct btrfs_disk_balance_args *ba)
+{
+       write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
+}
+
+static inline void btrfs_balance_sys(struct extent_buffer *eb,
+                                    struct btrfs_balance_item *bi,
+                                    struct btrfs_disk_balance_args *ba)
+{
+       read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
+}
+
+static inline void btrfs_set_balance_sys(struct extent_buffer *eb,
+                                        struct btrfs_balance_item *bi,
+                                        struct btrfs_disk_balance_args *ba)
+{
+       write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
+}
+
+static inline void
+btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
+                              struct btrfs_disk_balance_args *disk)
+{
+       memset(cpu, 0, sizeof(*cpu));
+
+       cpu->profiles = le64_to_cpu(disk->profiles);
+       cpu->usage = le64_to_cpu(disk->usage);
+       cpu->devid = le64_to_cpu(disk->devid);
+       cpu->pstart = le64_to_cpu(disk->pstart);
+       cpu->pend = le64_to_cpu(disk->pend);
+       cpu->vstart = le64_to_cpu(disk->vstart);
+       cpu->vend = le64_to_cpu(disk->vend);
+       cpu->target = le64_to_cpu(disk->target);
+       cpu->flags = le64_to_cpu(disk->flags);
+}
+
+static inline void
+btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
+                              struct btrfs_balance_args *cpu)
+{
+       memset(disk, 0, sizeof(*disk));
+
+       disk->profiles = cpu_to_le64(cpu->profiles);
+       disk->usage = cpu_to_le64(cpu->usage);
+       disk->devid = cpu_to_le64(cpu->devid);
+       disk->pstart = cpu_to_le64(cpu->pstart);
+       disk->pend = cpu_to_le64(cpu->pend);
+       disk->vstart = cpu_to_le64(cpu->vstart);
+       disk->vend = cpu_to_le64(cpu->vend);
+       disk->target = cpu_to_le64(cpu->target);
+       disk->flags = cpu_to_le64(cpu->flags);
+}
+
+/* struct btrfs_super_block */
 BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64);
 BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64);
 BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block,
@@ -2196,7 +2364,7 @@ static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
        return btrfs_item_size(eb, e) - offset;
 }
 
-static inline struct btrfs_root *btrfs_sb(struct super_block *sb)
+static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
 {
        return sb->s_fs_info;
 }
@@ -2277,11 +2445,11 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                                        struct btrfs_root *root, u32 blocksize,
                                        u64 parent, u64 root_objectid,
                                        struct btrfs_disk_key *key, int level,
-                                       u64 hint, u64 empty_size);
+                                       u64 hint, u64 empty_size, int for_cow);
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct extent_buffer *buf,
-                          u64 parent, int last_ref);
+                          u64 parent, int last_ref, int for_cow);
 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
                                            struct btrfs_root *root,
                                            u64 bytenr, u32 blocksize,
@@ -2301,17 +2469,17 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
                                  u64 search_end, struct btrfs_key *ins,
                                  u64 data);
 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                 struct extent_buffer *buf, int full_backref);
+                 struct extent_buffer *buf, int full_backref, int for_cow);
 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                 struct extent_buffer *buf, int full_backref);
+                 struct extent_buffer *buf, int full_backref, int for_cow);
 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
                                u64 bytenr, u64 num_bytes, u64 flags,
                                int is_data);
 int btrfs_free_extent(struct btrfs_trans_handle *trans,
                      struct btrfs_root *root,
-                     u64 bytenr, u64 num_bytes, u64 parent,
-                     u64 root_objectid, u64 owner, u64 offset);
+                     u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
+                     u64 owner, u64 offset, int for_cow);
 
 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
@@ -2323,7 +2491,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
                         struct btrfs_root *root,
                         u64 bytenr, u64 num_bytes, u64 parent,
-                        u64 root_objectid, u64 owner, u64 offset);
+                        u64 root_objectid, u64 owner, u64 offset, int for_cow);
 
 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                                    struct btrfs_root *root);
@@ -2482,10 +2650,18 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
+static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
+{
+       ++p->slots[0];
+       if (p->slots[0] >= btrfs_header_nritems(p->nodes[0]))
+               return btrfs_next_leaf(root, p);
+       return 0;
+}
 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
 void btrfs_drop_snapshot(struct btrfs_root *root,
-                        struct btrfs_block_rsv *block_rsv, int update_ref);
+                        struct btrfs_block_rsv *block_rsv, int update_ref,
+                        int for_reloc);
 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
                        struct btrfs_root *root,
                        struct extent_buffer *node,
@@ -2500,6 +2676,7 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
 }
 static inline void free_fs_info(struct btrfs_fs_info *fs_info)
 {
+       kfree(fs_info->balance_ctl);
        kfree(fs_info->delayed_root);
        kfree(fs_info->extent_root);
        kfree(fs_info->tree_root);
@@ -2510,6 +2687,24 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
        kfree(fs_info->super_for_commit);
        kfree(fs_info);
 }
+/**
+ * profile_is_valid - tests whether a given profile is valid and reduced
+ * @flags: profile to validate
+ * @extended: if true @flags is treated as an extended profile
+ */
+static inline int profile_is_valid(u64 flags, int extended)
+{
+       u64 mask = ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+       flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
+       if (extended)
+               mask &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+
+       if (flags & mask)
+               return 0;
+       /* true if zero or exactly one bit set */
+       return (flags & (~flags + 1)) == flags;
+}
 
 /* root-item.c */
 int btrfs_find_root_ref(struct btrfs_root *tree_root,
index 9c1eccc2c503e5eec8bd20d3dfc057a417eef89a..fe4cd0f1cef188b8cf584c67c8ab0f58ffb62cbb 100644 (file)
@@ -595,8 +595,12 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 
        num_bytes = btrfs_calc_trans_metadata_size(root, 1);
        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
-       if (!ret)
+       if (!ret) {
+               trace_btrfs_space_reservation(root->fs_info, "delayed_item",
+                                             item->key.objectid,
+                                             num_bytes, 1);
                item->bytes_reserved = num_bytes;
+       }
 
        return ret;
 }
@@ -610,6 +614,9 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
                return;
 
        rsv = &root->fs_info->delayed_block_rsv;
+       trace_btrfs_space_reservation(root->fs_info, "delayed_item",
+                                     item->key.objectid, item->bytes_reserved,
+                                     0);
        btrfs_block_rsv_release(root, rsv,
                                item->bytes_reserved);
 }
@@ -624,7 +631,7 @@ static int btrfs_delayed_inode_reserve_metadata(
        struct btrfs_block_rsv *dst_rsv;
        u64 num_bytes;
        int ret;
-       int release = false;
+       bool release = false;
 
        src_rsv = trans->block_rsv;
        dst_rsv = &root->fs_info->delayed_block_rsv;
@@ -651,8 +658,13 @@ static int btrfs_delayed_inode_reserve_metadata(
                 */
                if (ret == -EAGAIN)
                        ret = -ENOSPC;
-               if (!ret)
+               if (!ret) {
                        node->bytes_reserved = num_bytes;
+                       trace_btrfs_space_reservation(root->fs_info,
+                                                     "delayed_inode",
+                                                     btrfs_ino(inode),
+                                                     num_bytes, 1);
+               }
                return ret;
        } else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
                spin_lock(&BTRFS_I(inode)->lock);
@@ -707,11 +719,17 @@ static int btrfs_delayed_inode_reserve_metadata(
         * reservation here.  I think it may be time for a documentation page on
         * how block rsvs. work.
         */
-       if (!ret)
+       if (!ret) {
+               trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
+                                             btrfs_ino(inode), num_bytes, 1);
                node->bytes_reserved = num_bytes;
+       }
 
-       if (release)
+       if (release) {
+               trace_btrfs_space_reservation(root->fs_info, "delalloc",
+                                             btrfs_ino(inode), num_bytes, 0);
                btrfs_block_rsv_release(root, src_rsv, num_bytes);
+       }
 
        return ret;
 }
@@ -725,6 +743,8 @@ static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
                return;
 
        rsv = &root->fs_info->delayed_block_rsv;
+       trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
+                                     node->inode_id, node->bytes_reserved, 0);
        btrfs_block_rsv_release(root, rsv,
                                node->bytes_reserved);
        node->bytes_reserved = 0;
@@ -1372,13 +1392,6 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
                goto release_node;
        }
 
-       ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
-       /*
-        * we have reserved enough space when we start a new transaction,
-        * so reserving metadata failure is impossible
-        */
-       BUG_ON(ret);
-
        delayed_item->key.objectid = btrfs_ino(dir);
        btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
        delayed_item->key.offset = index;
@@ -1391,6 +1404,14 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
        dir_item->type = type;
        memcpy((char *)(dir_item + 1), name, name_len);
 
+       ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
+       /*
+        * we have reserved enough space when we start a new transaction,
+        * so reserving metadata failure is impossible
+        */
+       BUG_ON(ret);
+
+
        mutex_lock(&delayed_node->mutex);
        ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
        if (unlikely(ret)) {
index 125cf76fcd086803d35bd257bdb54168486e0e0b..66e4f29505a33dbecd45b5d6a80e878c87818bc0 100644 (file)
@@ -101,6 +101,11 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2,
                return -1;
        if (ref1->type > ref2->type)
                return 1;
+       /* merging of sequenced refs is not allowed */
+       if (ref1->seq < ref2->seq)
+               return -1;
+       if (ref1->seq > ref2->seq)
+               return 1;
        if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
            ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
                return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
@@ -150,16 +155,22 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
 
 /*
  * find an head entry based on bytenr. This returns the delayed ref
- * head if it was able to find one, or NULL if nothing was in that spot
+ * head if it was able to find one, or NULL if nothing was in that spot.
+ * If return_bigger is given, the next bigger entry is returned if no exact
+ * match is found.
  */
 static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
                                  u64 bytenr,
-                                 struct btrfs_delayed_ref_node **last)
+                                 struct btrfs_delayed_ref_node **last,
+                                 int return_bigger)
 {
-       struct rb_node *n = root->rb_node;
+       struct rb_node *n;
        struct btrfs_delayed_ref_node *entry;
-       int cmp;
+       int cmp = 0;
 
+again:
+       n = root->rb_node;
+       entry = NULL;
        while (n) {
                entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
                WARN_ON(!entry->in_tree);
@@ -182,6 +193,19 @@ static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
                else
                        return entry;
        }
+       if (entry && return_bigger) {
+               if (cmp > 0) {
+                       n = rb_next(&entry->rb_node);
+                       if (!n)
+                               n = rb_first(root);
+                       entry = rb_entry(n, struct btrfs_delayed_ref_node,
+                                        rb_node);
+                       bytenr = entry->bytenr;
+                       return_bigger = 0;
+                       goto again;
+               }
+               return entry;
+       }
        return NULL;
 }
 
@@ -209,6 +233,24 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
+                           u64 seq)
+{
+       struct seq_list *elem;
+
+       assert_spin_locked(&delayed_refs->lock);
+       if (list_empty(&delayed_refs->seq_head))
+               return 0;
+
+       elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list);
+       if (seq >= elem->seq) {
+               pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n",
+                        seq, elem->seq, delayed_refs);
+               return 1;
+       }
+       return 0;
+}
+
 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
                           struct list_head *cluster, u64 start)
 {
@@ -223,20 +265,8 @@ int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
                node = rb_first(&delayed_refs->root);
        } else {
                ref = NULL;
-               find_ref_head(&delayed_refs->root, start, &ref);
+               find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
                if (ref) {
-                       struct btrfs_delayed_ref_node *tmp;
-
-                       node = rb_prev(&ref->rb_node);
-                       while (node) {
-                               tmp = rb_entry(node,
-                                              struct btrfs_delayed_ref_node,
-                                              rb_node);
-                               if (tmp->bytenr < start)
-                                       break;
-                               ref = tmp;
-                               node = rb_prev(&ref->rb_node);
-                       }
                        node = &ref->rb_node;
                } else
                        node = rb_first(&delayed_refs->root);
@@ -390,7 +420,8 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
  * this does all the dirty work in terms of maintaining the correct
  * overall modification count.
  */
-static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
+static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info,
+                                       struct btrfs_trans_handle *trans,
                                        struct btrfs_delayed_ref_node *ref,
                                        u64 bytenr, u64 num_bytes,
                                        int action, int is_data)
@@ -437,6 +468,7 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
        ref->action  = 0;
        ref->is_head = 1;
        ref->in_tree = 1;
+       ref->seq = 0;
 
        head_ref = btrfs_delayed_node_to_head(ref);
        head_ref->must_insert_reserved = must_insert_reserved;
@@ -468,14 +500,17 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
 /*
  * helper to insert a delayed tree ref into the rbtree.
  */
-static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+                                        struct btrfs_trans_handle *trans,
                                         struct btrfs_delayed_ref_node *ref,
                                         u64 bytenr, u64 num_bytes, u64 parent,
-                                        u64 ref_root, int level, int action)
+                                        u64 ref_root, int level, int action,
+                                        int for_cow)
 {
        struct btrfs_delayed_ref_node *existing;
        struct btrfs_delayed_tree_ref *full_ref;
        struct btrfs_delayed_ref_root *delayed_refs;
+       u64 seq = 0;
 
        if (action == BTRFS_ADD_DELAYED_EXTENT)
                action = BTRFS_ADD_DELAYED_REF;
@@ -491,14 +526,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
        ref->is_head = 0;
        ref->in_tree = 1;
 
+       if (need_ref_seq(for_cow, ref_root))
+               seq = inc_delayed_seq(delayed_refs);
+       ref->seq = seq;
+
        full_ref = btrfs_delayed_node_to_tree_ref(ref);
-       if (parent) {
-               full_ref->parent = parent;
+       full_ref->parent = parent;
+       full_ref->root = ref_root;
+       if (parent)
                ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
-       } else {
-               full_ref->root = ref_root;
+       else
                ref->type = BTRFS_TREE_BLOCK_REF_KEY;
-       }
        full_ref->level = level;
 
        trace_btrfs_delayed_tree_ref(ref, full_ref, action);
@@ -522,15 +560,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 /*
  * helper to insert a delayed data ref into the rbtree.
  */
-static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
+static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+                                        struct btrfs_trans_handle *trans,
                                         struct btrfs_delayed_ref_node *ref,
                                         u64 bytenr, u64 num_bytes, u64 parent,
                                         u64 ref_root, u64 owner, u64 offset,
-                                        int action)
+                                        int action, int for_cow)
 {
        struct btrfs_delayed_ref_node *existing;
        struct btrfs_delayed_data_ref *full_ref;
        struct btrfs_delayed_ref_root *delayed_refs;
+       u64 seq = 0;
 
        if (action == BTRFS_ADD_DELAYED_EXTENT)
                action = BTRFS_ADD_DELAYED_REF;
@@ -546,14 +586,18 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
        ref->is_head = 0;
        ref->in_tree = 1;
 
+       if (need_ref_seq(for_cow, ref_root))
+               seq = inc_delayed_seq(delayed_refs);
+       ref->seq = seq;
+
        full_ref = btrfs_delayed_node_to_data_ref(ref);
-       if (parent) {
-               full_ref->parent = parent;
+       full_ref->parent = parent;
+       full_ref->root = ref_root;
+       if (parent)
                ref->type = BTRFS_SHARED_DATA_REF_KEY;
-       } else {
-               full_ref->root = ref_root;
+       else
                ref->type = BTRFS_EXTENT_DATA_REF_KEY;
-       }
+
        full_ref->objectid = owner;
        full_ref->offset = offset;
 
@@ -580,10 +624,12 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
  * to make sure the delayed ref is eventually processed before this
  * transaction commits.
  */
-int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+                              struct btrfs_trans_handle *trans,
                               u64 bytenr, u64 num_bytes, u64 parent,
                               u64 ref_root,  int level, int action,
-                              struct btrfs_delayed_extent_op *extent_op)
+                              struct btrfs_delayed_extent_op *extent_op,
+                              int for_cow)
 {
        struct btrfs_delayed_tree_ref *ref;
        struct btrfs_delayed_ref_head *head_ref;
@@ -610,13 +656,17 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
         * insert both the head node and the new ref without dropping
         * the spin lock
         */
-       ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
-                                  action, 0);
+       ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
+                                  num_bytes, action, 0);
        BUG_ON(ret);
 
-       ret = add_delayed_tree_ref(trans, &ref->node, bytenr, num_bytes,
-                                  parent, ref_root, level, action);
+       ret = add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
+                                  num_bytes, parent, ref_root, level, action,
+                                  for_cow);
        BUG_ON(ret);
+       if (!need_ref_seq(for_cow, ref_root) &&
+           waitqueue_active(&delayed_refs->seq_wait))
+               wake_up(&delayed_refs->seq_wait);
        spin_unlock(&delayed_refs->lock);
        return 0;
 }
@@ -624,11 +674,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 /*
  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
  */
-int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+                              struct btrfs_trans_handle *trans,
                               u64 bytenr, u64 num_bytes,
                               u64 parent, u64 ref_root,
                               u64 owner, u64 offset, int action,
-                              struct btrfs_delayed_extent_op *extent_op)
+                              struct btrfs_delayed_extent_op *extent_op,
+                              int for_cow)
 {
        struct btrfs_delayed_data_ref *ref;
        struct btrfs_delayed_ref_head *head_ref;
@@ -655,18 +707,23 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
         * insert both the head node and the new ref without dropping
         * the spin lock
         */
-       ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
-                                  action, 1);
+       ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
+                                  num_bytes, action, 1);
        BUG_ON(ret);
 
-       ret = add_delayed_data_ref(trans, &ref->node, bytenr, num_bytes,
-                                  parent, ref_root, owner, offset, action);
+       ret = add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
+                                  num_bytes, parent, ref_root, owner, offset,
+                                  action, for_cow);
        BUG_ON(ret);
+       if (!need_ref_seq(for_cow, ref_root) &&
+           waitqueue_active(&delayed_refs->seq_wait))
+               wake_up(&delayed_refs->seq_wait);
        spin_unlock(&delayed_refs->lock);
        return 0;
 }
 
-int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
+                               struct btrfs_trans_handle *trans,
                                u64 bytenr, u64 num_bytes,
                                struct btrfs_delayed_extent_op *extent_op)
 {
@@ -683,11 +740,13 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
        delayed_refs = &trans->transaction->delayed_refs;
        spin_lock(&delayed_refs->lock);
 
-       ret = add_delayed_ref_head(trans, &head_ref->node, bytenr,
+       ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
                                   num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
                                   extent_op->is_data);
        BUG_ON(ret);
 
+       if (waitqueue_active(&delayed_refs->seq_wait))
+               wake_up(&delayed_refs->seq_wait);
        spin_unlock(&delayed_refs->lock);
        return 0;
 }
@@ -704,7 +763,7 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
        struct btrfs_delayed_ref_root *delayed_refs;
 
        delayed_refs = &trans->transaction->delayed_refs;
-       ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
+       ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
        if (ref)
                return btrfs_delayed_node_to_head(ref);
        return NULL;
index e287e3b0eab0d970d37f0f4c70fd688b22f276ac..d8f244d9492511e3b108b26bcf4da1bc9fbf6826 100644 (file)
@@ -33,6 +33,9 @@ struct btrfs_delayed_ref_node {
        /* the size of the extent */
        u64 num_bytes;
 
+       /* seq number to keep track of insertion order */
+       u64 seq;
+
        /* ref count on this data structure */
        atomic_t refs;
 
@@ -98,19 +101,15 @@ struct btrfs_delayed_ref_head {
 
 struct btrfs_delayed_tree_ref {
        struct btrfs_delayed_ref_node node;
-       union {
-               u64 root;
-               u64 parent;
-       };
+       u64 root;
+       u64 parent;
        int level;
 };
 
 struct btrfs_delayed_data_ref {
        struct btrfs_delayed_ref_node node;
-       union {
-               u64 root;
-               u64 parent;
-       };
+       u64 root;
+       u64 parent;
        u64 objectid;
        u64 offset;
 };
@@ -140,6 +139,26 @@ struct btrfs_delayed_ref_root {
        int flushing;
 
        u64 run_delayed_start;
+
+       /*
+        * seq number of delayed refs. We need to know if a backref was being
+        * added before the currently processed ref or afterwards.
+        */
+       u64 seq;
+
+       /*
+        * seq_list holds a list of all seq numbers that are currently being
+        * added to the list. While walking backrefs (btrfs_find_all_roots,
+        * qgroups), which might take some time, no newer ref must be processed,
+        * as it might influence the outcome of the walk.
+        */
+       struct list_head seq_head;
+
+       /*
+        * when the only refs we have in the list must not be processed, we want
+        * to wait for more refs to show up or for the end of backref walking.
+        */
+       wait_queue_head_t seq_wait;
 };
 
 static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
@@ -151,16 +170,21 @@ static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
        }
 }
 
-int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+                              struct btrfs_trans_handle *trans,
                               u64 bytenr, u64 num_bytes, u64 parent,
                               u64 ref_root, int level, int action,
-                              struct btrfs_delayed_extent_op *extent_op);
-int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+                              struct btrfs_delayed_extent_op *extent_op,
+                              int for_cow);
+int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+                              struct btrfs_trans_handle *trans,
                               u64 bytenr, u64 num_bytes,
                               u64 parent, u64 ref_root,
                               u64 owner, u64 offset, int action,
-                              struct btrfs_delayed_extent_op *extent_op);
-int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
+                              struct btrfs_delayed_extent_op *extent_op,
+                              int for_cow);
+int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
+                               struct btrfs_trans_handle *trans,
                                u64 bytenr, u64 num_bytes,
                                struct btrfs_delayed_extent_op *extent_op);
 
@@ -170,6 +194,60 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
                           struct btrfs_delayed_ref_head *head);
 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
                           struct list_head *cluster, u64 search_start);
+
+struct seq_list {
+       struct list_head list;
+       u64 seq;
+};
+
+static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs)
+{
+       assert_spin_locked(&delayed_refs->lock);
+       ++delayed_refs->seq;
+       return delayed_refs->seq;
+}
+
+static inline void
+btrfs_get_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
+                     struct seq_list *elem)
+{
+       assert_spin_locked(&delayed_refs->lock);
+       elem->seq = delayed_refs->seq;
+       list_add_tail(&elem->list, &delayed_refs->seq_head);
+}
+
+static inline void
+btrfs_put_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
+                     struct seq_list *elem)
+{
+       spin_lock(&delayed_refs->lock);
+       list_del(&elem->list);
+       wake_up(&delayed_refs->seq_wait);
+       spin_unlock(&delayed_refs->lock);
+}
+
+int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
+                           u64 seq);
+
+/*
+ * delayed refs with a ref_seq > 0 must be held back during backref walking.
+ * this only applies to items in one of the fs-trees. for_cow items never need
+ * to be held back, so they won't get a ref_seq number.
+ */
+static inline int need_ref_seq(int for_cow, u64 rootid)
+{
+       if (for_cow)
+               return 0;
+
+       if (rootid == BTRFS_FS_TREE_OBJECTID)
+               return 1;
+
+       if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
+               return 1;
+
+       return 0;
+}
+
 /*
  * a node might live in a head or a regular ref, this lets you
  * test for the proper type to use.
index d8525662ca7a721b18e197191816ea16b095eb19..7aa9cd36bf1b496efd2a02e7cc86c1a08a29c663 100644 (file)
@@ -43,6 +43,7 @@
 #include "tree-log.h"
 #include "free-space-cache.h"
 #include "inode-map.h"
+#include "check-integrity.h"
 
 static struct extent_io_ops btree_extent_io_ops;
 static void end_workqueue_fn(struct btrfs_work *work);
@@ -1143,7 +1144,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        root->orphan_item_inserted = 0;
        root->orphan_cleanup_state = 0;
 
-       root->fs_info = fs_info;
        root->objectid = objectid;
        root->last_trans = 0;
        root->highest_objectid = 0;
@@ -1217,6 +1217,14 @@ static int find_and_setup_root(struct btrfs_root *tree_root,
        return 0;
 }
 
+static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
+       if (root)
+               root->fs_info = fs_info;
+       return root;
+}
+
 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
                                         struct btrfs_fs_info *fs_info)
 {
@@ -1224,7 +1232,7 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
        struct btrfs_root *tree_root = fs_info->tree_root;
        struct extent_buffer *leaf;
 
-       root = kzalloc(sizeof(*root), GFP_NOFS);
+       root = btrfs_alloc_root(fs_info);
        if (!root)
                return ERR_PTR(-ENOMEM);
 
@@ -1244,7 +1252,8 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
        root->ref_cows = 0;
 
        leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
-                                     BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0);
+                                     BTRFS_TREE_LOG_OBJECTID, NULL,
+                                     0, 0, 0, 0);
        if (IS_ERR(leaf)) {
                kfree(root);
                return ERR_CAST(leaf);
@@ -1318,7 +1327,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
        u32 blocksize;
        int ret = 0;
 
-       root = kzalloc(sizeof(*root), GFP_NOFS);
+       root = btrfs_alloc_root(fs_info);
        if (!root)
                return ERR_PTR(-ENOMEM);
        if (location->offset == (u64)-1) {
@@ -1874,9 +1883,9 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
 }
 
 
-struct btrfs_root *open_ctree(struct super_block *sb,
-                             struct btrfs_fs_devices *fs_devices,
-                             char *options)
+int open_ctree(struct super_block *sb,
+              struct btrfs_fs_devices *fs_devices,
+              char *options)
 {
        u32 sectorsize;
        u32 nodesize;
@@ -1888,8 +1897,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        struct btrfs_key location;
        struct buffer_head *bh;
        struct btrfs_super_block *disk_super;
-       struct btrfs_root *tree_root = btrfs_sb(sb);
-       struct btrfs_fs_info *fs_info = tree_root->fs_info;
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       struct btrfs_root *tree_root;
        struct btrfs_root *extent_root;
        struct btrfs_root *csum_root;
        struct btrfs_root *chunk_root;
@@ -1900,16 +1909,14 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        int num_backups_tried = 0;
        int backup_index = 0;
 
-       extent_root = fs_info->extent_root =
-               kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
-       csum_root = fs_info->csum_root =
-               kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
-       chunk_root = fs_info->chunk_root =
-               kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
-       dev_root = fs_info->dev_root =
-               kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
+       tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
+       extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info);
+       csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
+       chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
+       dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
 
-       if (!extent_root || !csum_root || !chunk_root || !dev_root) {
+       if (!tree_root || !extent_root || !csum_root ||
+           !chunk_root || !dev_root) {
                err = -ENOMEM;
                goto fail;
        }
@@ -1998,6 +2005,17 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        init_waitqueue_head(&fs_info->scrub_pause_wait);
        init_rwsem(&fs_info->scrub_super_lock);
        fs_info->scrub_workers_refcnt = 0;
+#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+       fs_info->check_integrity_print_mask = 0;
+#endif
+
+       spin_lock_init(&fs_info->balance_lock);
+       mutex_init(&fs_info->balance_mutex);
+       atomic_set(&fs_info->balance_running, 0);
+       atomic_set(&fs_info->balance_pause_req, 0);
+       atomic_set(&fs_info->balance_cancel_req, 0);
+       fs_info->balance_ctl = NULL;
+       init_waitqueue_head(&fs_info->balance_wait_q);
 
        sb->s_blocksize = 4096;
        sb->s_blocksize_bits = blksize_bits(4096);
@@ -2267,9 +2285,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
           (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
           BTRFS_UUID_SIZE);
 
-       mutex_lock(&fs_info->chunk_mutex);
        ret = btrfs_read_chunk_tree(chunk_root);
-       mutex_unlock(&fs_info->chunk_mutex);
        if (ret) {
                printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
                       sb->s_id);
@@ -2318,9 +2334,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 
        fs_info->generation = generation;
        fs_info->last_trans_committed = generation;
-       fs_info->data_alloc_profile = (u64)-1;
-       fs_info->metadata_alloc_profile = (u64)-1;
-       fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
 
        ret = btrfs_init_space_info(fs_info);
        if (ret) {
@@ -2353,6 +2366,19 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                btrfs_set_opt(fs_info->mount_opt, SSD);
        }
 
+#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+       if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
+               ret = btrfsic_mount(tree_root, fs_devices,
+                                   btrfs_test_opt(tree_root,
+                                       CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
+                                   1 : 0,
+                                   fs_info->check_integrity_print_mask);
+               if (ret)
+                       printk(KERN_WARNING "btrfs: failed to initialize"
+                              " integrity check module %s\n", sb->s_id);
+       }
+#endif
+
        /* do not make disk changes in broken FS */
        if (btrfs_super_log_root(disk_super) != 0 &&
            !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
@@ -2368,7 +2394,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                     btrfs_level_size(tree_root,
                                      btrfs_super_log_root_level(disk_super));
 
-               log_tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
+               log_tree_root = btrfs_alloc_root(fs_info);
                if (!log_tree_root) {
                        err = -ENOMEM;
                        goto fail_trans_kthread;
@@ -2423,13 +2449,17 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                if (!err)
                        err = btrfs_orphan_cleanup(fs_info->tree_root);
                up_read(&fs_info->cleanup_work_sem);
+
+               if (!err)
+                       err = btrfs_recover_balance(fs_info->tree_root);
+
                if (err) {
                        close_ctree(tree_root);
-                       return ERR_PTR(err);
+                       return err;
                }
        }
 
-       return tree_root;
+       return 0;
 
 fail_trans_kthread:
        kthread_stop(fs_info->transaction_kthread);
@@ -2475,8 +2505,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        cleanup_srcu_struct(&fs_info->subvol_srcu);
 fail:
        btrfs_close_devices(fs_info->fs_devices);
-       free_fs_info(fs_info);
-       return ERR_PTR(err);
+       return err;
 
 recovery_tree_root:
        if (!btrfs_test_opt(tree_root, RECOVERY))
@@ -2631,7 +2660,7 @@ static int write_dev_supers(struct btrfs_device *device,
                 * we fua the first super.  The others we allow
                 * to go down lazy.
                 */
-               ret = submit_bh(WRITE_FUA, bh);
+               ret = btrfsic_submit_bh(WRITE_FUA, bh);
                if (ret)
                        errors++;
        }
@@ -2708,7 +2737,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
        device->flush_bio = bio;
 
        bio_get(bio);
-       submit_bio(WRITE_FLUSH, bio);
+       btrfsic_submit_bio(WRITE_FLUSH, bio);
 
        return 0;
 }
@@ -2972,6 +3001,9 @@ int close_ctree(struct btrfs_root *root)
        fs_info->closing = 1;
        smp_mb();
 
+       /* pause restriper - we want to resume on mount */
+       btrfs_pause_balance(root->fs_info);
+
        btrfs_scrub_cancel(root);
 
        /* wait for any defraggers to finish */
@@ -2979,7 +3011,7 @@ int close_ctree(struct btrfs_root *root)
                   (atomic_read(&fs_info->defrag_running) == 0));
 
        /* clear out the rbtree of defraggable inodes */
-       btrfs_run_defrag_inodes(root->fs_info);
+       btrfs_run_defrag_inodes(fs_info);
 
        /*
         * Here come 2 situations when btrfs is broken to flip readonly:
@@ -3008,8 +3040,8 @@ int close_ctree(struct btrfs_root *root)
 
        btrfs_put_block_group_cache(fs_info);
 
-       kthread_stop(root->fs_info->transaction_kthread);
-       kthread_stop(root->fs_info->cleaner_kthread);
+       kthread_stop(fs_info->transaction_kthread);
+       kthread_stop(fs_info->cleaner_kthread);
 
        fs_info->closing = 2;
        smp_mb();
@@ -3027,14 +3059,14 @@ int close_ctree(struct btrfs_root *root)
        free_extent_buffer(fs_info->extent_root->commit_root);
        free_extent_buffer(fs_info->tree_root->node);
        free_extent_buffer(fs_info->tree_root->commit_root);
-       free_extent_buffer(root->fs_info->chunk_root->node);
-       free_extent_buffer(root->fs_info->chunk_root->commit_root);
-       free_extent_buffer(root->fs_info->dev_root->node);
-       free_extent_buffer(root->fs_info->dev_root->commit_root);
-       free_extent_buffer(root->fs_info->csum_root->node);
-       free_extent_buffer(root->fs_info->csum_root->commit_root);
+       free_extent_buffer(fs_info->chunk_root->node);
+       free_extent_buffer(fs_info->chunk_root->commit_root);
+       free_extent_buffer(fs_info->dev_root->node);
+       free_extent_buffer(fs_info->dev_root->commit_root);
+       free_extent_buffer(fs_info->csum_root->node);
+       free_extent_buffer(fs_info->csum_root->commit_root);
 
-       btrfs_free_block_groups(root->fs_info);
+       btrfs_free_block_groups(fs_info);
 
        del_fs_roots(fs_info);
 
@@ -3054,14 +3086,17 @@ int close_ctree(struct btrfs_root *root)
        btrfs_stop_workers(&fs_info->caching_workers);
        btrfs_stop_workers(&fs_info->readahead_workers);
 
+#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+       if (btrfs_test_opt(root, CHECK_INTEGRITY))
+               btrfsic_unmount(root, fs_info->fs_devices);
+#endif
+
        btrfs_close_devices(fs_info->fs_devices);
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
 
        bdi_destroy(&fs_info->bdi);
        cleanup_srcu_struct(&fs_info->subvol_srcu);
 
-       free_fs_info(fs_info);
-
        return 0;
 }
 
index c99d0a8f13fa2f38642f7826cb00c45070ac95ae..e4bc4741319bd3a1e094de03566feeb360d35e4d 100644 (file)
@@ -46,9 +46,9 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
                                                   u64 bytenr, u32 blocksize);
 int clean_tree_block(struct btrfs_trans_handle *trans,
                     struct btrfs_root *root, struct extent_buffer *buf);
-struct btrfs_root *open_ctree(struct super_block *sb,
-                             struct btrfs_fs_devices *fs_devices,
-                             char *options);
+int open_ctree(struct super_block *sb,
+              struct btrfs_fs_devices *fs_devices,
+              char *options);
 int close_ctree(struct btrfs_root *root);
 int write_ctree_super(struct btrfs_trans_handle *trans,
                      struct btrfs_root *root, int max_mirrors);
index 1b8dc33778f9c411206cc6ad9467546412431947..5f77166fd01c7eb3d33cb78666e31822fc0a1083 100644 (file)
@@ -67,7 +67,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
                                       u64 root_objectid, u32 generation,
                                       int check_generation)
 {
-       struct btrfs_fs_info *fs_info = btrfs_sb(sb)->fs_info;
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
        struct btrfs_root *root;
        struct inode *inode;
        struct btrfs_key key;
index f5fbe576d2baf48519a01bd449344b49edffa070..700879ed64cfcc73f43f6e03819e579578db1e57 100644 (file)
@@ -618,8 +618,7 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
        struct list_head *head = &info->space_info;
        struct btrfs_space_info *found;
 
-       flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
-                BTRFS_BLOCK_GROUP_METADATA;
+       flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
 
        rcu_read_lock();
        list_for_each_entry_rcu(found, head, list) {
@@ -1872,20 +1871,24 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
                         struct btrfs_root *root,
                         u64 bytenr, u64 num_bytes, u64 parent,
-                        u64 root_objectid, u64 owner, u64 offset)
+                        u64 root_objectid, u64 owner, u64 offset, int for_cow)
 {
        int ret;
+       struct btrfs_fs_info *fs_info = root->fs_info;
+
        BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
               root_objectid == BTRFS_TREE_LOG_OBJECTID);
 
        if (owner < BTRFS_FIRST_FREE_OBJECTID) {
-               ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
+               ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+                                       num_bytes,
                                        parent, root_objectid, (int)owner,
-                                       BTRFS_ADD_DELAYED_REF, NULL);
+                                       BTRFS_ADD_DELAYED_REF, NULL, for_cow);
        } else {
-               ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
+               ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+                                       num_bytes,
                                        parent, root_objectid, owner, offset,
-                                       BTRFS_ADD_DELAYED_REF, NULL);
+                                       BTRFS_ADD_DELAYED_REF, NULL, for_cow);
        }
        return ret;
 }
@@ -2232,6 +2235,28 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                        }
                }
 
+               /*
+                * locked_ref is the head node, so we have to go one
+                * node back for any delayed ref updates
+                */
+               ref = select_delayed_ref(locked_ref);
+
+               if (ref && ref->seq &&
+                   btrfs_check_delayed_seq(delayed_refs, ref->seq)) {
+                       /*
+                        * there are still refs with lower seq numbers in the
+                        * process of being added. Don't run this ref yet.
+                        */
+                       list_del_init(&locked_ref->cluster);
+                       mutex_unlock(&locked_ref->mutex);
+                       locked_ref = NULL;
+                       delayed_refs->num_heads_ready++;
+                       spin_unlock(&delayed_refs->lock);
+                       cond_resched();
+                       spin_lock(&delayed_refs->lock);
+                       continue;
+               }
+
                /*
                 * record the must insert reserved flag before we
                 * drop the spin lock.
@@ -2242,11 +2267,6 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                extent_op = locked_ref->extent_op;
                locked_ref->extent_op = NULL;
 
-               /*
-                * locked_ref is the head node, so we have to go one
-                * node back for any delayed ref updates
-                */
-               ref = select_delayed_ref(locked_ref);
                if (!ref) {
                        /* All delayed refs have been processed, Go ahead
                         * and send the head node to run_one_delayed_ref,
@@ -2267,9 +2287,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                                BUG_ON(ret);
                                kfree(extent_op);
 
-                               cond_resched();
-                               spin_lock(&delayed_refs->lock);
-                               continue;
+                               goto next;
                        }
 
                        list_del_init(&locked_ref->cluster);
@@ -2279,7 +2297,12 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                ref->in_tree = 0;
                rb_erase(&ref->rb_node, &delayed_refs->root);
                delayed_refs->num_entries--;
-
+               /*
+                * we modified num_entries, but as we're currently running
+                * delayed refs, skip
+                *     wake_up(&delayed_refs->seq_wait);
+                * here.
+                */
                spin_unlock(&delayed_refs->lock);
 
                ret = run_one_delayed_ref(trans, root, ref, extent_op,
@@ -2289,13 +2312,34 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                btrfs_put_delayed_ref(ref);
                kfree(extent_op);
                count++;
-
+next:
+               do_chunk_alloc(trans, root->fs_info->extent_root,
+                              2 * 1024 * 1024,
+                              btrfs_get_alloc_profile(root, 0),
+                              CHUNK_ALLOC_NO_FORCE);
                cond_resched();
                spin_lock(&delayed_refs->lock);
        }
        return count;
 }
 
+
+static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
+                       unsigned long num_refs)
+{
+       struct list_head *first_seq = delayed_refs->seq_head.next;
+
+       spin_unlock(&delayed_refs->lock);
+       pr_debug("waiting for more refs (num %ld, first %p)\n",
+                num_refs, first_seq);
+       wait_event(delayed_refs->seq_wait,
+                  num_refs != delayed_refs->num_entries ||
+                  delayed_refs->seq_head.next != first_seq);
+       pr_debug("done waiting for more refs (num %ld, first %p)\n",
+                delayed_refs->num_entries, delayed_refs->seq_head.next);
+       spin_lock(&delayed_refs->lock);
+}
+
 /*
  * this starts processing the delayed reference count updates and
  * extent insertions we have queued up so far.  count can be
@@ -2311,15 +2355,23 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_ref_node *ref;
        struct list_head cluster;
        int ret;
+       u64 delayed_start;
        int run_all = count == (unsigned long)-1;
        int run_most = 0;
+       unsigned long num_refs = 0;
+       int consider_waiting;
 
        if (root == root->fs_info->extent_root)
                root = root->fs_info->tree_root;
 
+       do_chunk_alloc(trans, root->fs_info->extent_root,
+                      2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
+                      CHUNK_ALLOC_NO_FORCE);
+
        delayed_refs = &trans->transaction->delayed_refs;
        INIT_LIST_HEAD(&cluster);
 again:
+       consider_waiting = 0;
        spin_lock(&delayed_refs->lock);
        if (count == 0) {
                count = delayed_refs->num_entries * 2;
@@ -2336,11 +2388,35 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                 * of refs to process starting at the first one we are able to
                 * lock
                 */
+               delayed_start = delayed_refs->run_delayed_start;
                ret = btrfs_find_ref_cluster(trans, &cluster,
                                             delayed_refs->run_delayed_start);
                if (ret)
                        break;
 
+               if (delayed_start >= delayed_refs->run_delayed_start) {
+                       if (consider_waiting == 0) {
+                               /*
+                                * btrfs_find_ref_cluster looped. let's do one
+                                * more cycle. if we don't run any delayed ref
+                                * during that cycle (because we can't because
+                                * all of them are blocked) and if the number of
+                                * refs doesn't change, we avoid busy waiting.
+                                */
+                               consider_waiting = 1;
+                               num_refs = delayed_refs->num_entries;
+                       } else {
+                               wait_for_more_refs(delayed_refs, num_refs);
+                               /*
+                                * after waiting, things have changed. we
+                                * dropped the lock and someone else might have
+                                * run some refs, built new clusters and so on.
+                                * therefore, we restart staleness detection.
+                                */
+                               consider_waiting = 0;
+                       }
+               }
+
                ret = run_clustered_refs(trans, root, &cluster);
                BUG_ON(ret < 0);
 
@@ -2348,6 +2424,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 
                if (count == 0)
                        break;
+
+               if (ret || delayed_refs->run_delayed_start == 0) {
+                       /* refs were run, let's reset staleness detection */
+                       consider_waiting = 0;
+               }
        }
 
        if (run_all) {
@@ -2405,7 +2486,8 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
        extent_op->update_key = 0;
        extent_op->is_data = is_data ? 1 : 0;
 
-       ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
+       ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
+                                         num_bytes, extent_op);
        if (ret)
                kfree(extent_op);
        return ret;
@@ -2590,7 +2672,7 @@ int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct extent_buffer *buf,
-                          int full_backref, int inc)
+                          int full_backref, int inc, int for_cow)
 {
        u64 bytenr;
        u64 num_bytes;
@@ -2603,7 +2685,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
        int level;
        int ret = 0;
        int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
-                           u64, u64, u64, u64, u64, u64);
+                           u64, u64, u64, u64, u64, u64, int);
 
        ref_root = btrfs_header_owner(buf);
        nritems = btrfs_header_nritems(buf);
@@ -2640,14 +2722,15 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
                        key.offset -= btrfs_file_extent_offset(buf, fi);
                        ret = process_func(trans, root, bytenr, num_bytes,
                                           parent, ref_root, key.objectid,
-                                          key.offset);
+                                          key.offset, for_cow);
                        if (ret)
                                goto fail;
                } else {
                        bytenr = btrfs_node_blockptr(buf, i);
                        num_bytes = btrfs_level_size(root, level - 1);
                        ret = process_func(trans, root, bytenr, num_bytes,
-                                          parent, ref_root, level - 1, 0);
+                                          parent, ref_root, level - 1, 0,
+                                          for_cow);
                        if (ret)
                                goto fail;
                }
@@ -2659,15 +2742,15 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                 struct extent_buffer *buf, int full_backref)
+                 struct extent_buffer *buf, int full_backref, int for_cow)
 {
-       return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
+       return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
 }
 
 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                 struct extent_buffer *buf, int full_backref)
+                 struct extent_buffer *buf, int full_backref, int for_cow)
 {
-       return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
+       return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
 }
 
 static int write_one_cache_group(struct btrfs_trans_handle *trans,
@@ -2993,9 +3076,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
                INIT_LIST_HEAD(&found->block_groups[i]);
        init_rwsem(&found->groups_sem);
        spin_lock_init(&found->lock);
-       found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
-                               BTRFS_BLOCK_GROUP_SYSTEM |
-                               BTRFS_BLOCK_GROUP_METADATA);
+       found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
        found->total_bytes = total_bytes;
        found->disk_total = total_bytes * factor;
        found->bytes_used = bytes_used;
@@ -3016,20 +3097,27 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
 
 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 {
-       u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
-                                  BTRFS_BLOCK_GROUP_RAID1 |
-                                  BTRFS_BLOCK_GROUP_RAID10 |
-                                  BTRFS_BLOCK_GROUP_DUP);
-       if (extra_flags) {
-               if (flags & BTRFS_BLOCK_GROUP_DATA)
-                       fs_info->avail_data_alloc_bits |= extra_flags;
-               if (flags & BTRFS_BLOCK_GROUP_METADATA)
-                       fs_info->avail_metadata_alloc_bits |= extra_flags;
-               if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
-                       fs_info->avail_system_alloc_bits |= extra_flags;
-       }
+       u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+       /* chunk -> extended profile */
+       if (extra_flags == 0)
+               extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+
+       if (flags & BTRFS_BLOCK_GROUP_DATA)
+               fs_info->avail_data_alloc_bits |= extra_flags;
+       if (flags & BTRFS_BLOCK_GROUP_METADATA)
+               fs_info->avail_metadata_alloc_bits |= extra_flags;
+       if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+               fs_info->avail_system_alloc_bits |= extra_flags;
 }
 
+/*
+ * @flags: available profiles in extended format (see ctree.h)
+ *
+ * Returns reduced profile in chunk format.  If profile changing is in
+ * progress (either running or paused) picks the target profile (if it's
+ * already available), otherwise falls back to plain reducing.
+ */
 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
 {
        /*
@@ -3040,6 +3128,34 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
        u64 num_devices = root->fs_info->fs_devices->rw_devices +
                root->fs_info->fs_devices->missing_devices;
 
+       /* pick restriper's target profile if it's available */
+       spin_lock(&root->fs_info->balance_lock);
+       if (root->fs_info->balance_ctl) {
+               struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
+               u64 tgt = 0;
+
+               if ((flags & BTRFS_BLOCK_GROUP_DATA) &&
+                   (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+                   (flags & bctl->data.target)) {
+                       tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
+               } else if ((flags & BTRFS_BLOCK_GROUP_SYSTEM) &&
+                          (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+                          (flags & bctl->sys.target)) {
+                       tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
+               } else if ((flags & BTRFS_BLOCK_GROUP_METADATA) &&
+                          (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+                          (flags & bctl->meta.target)) {
+                       tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
+               }
+
+               if (tgt) {
+                       spin_unlock(&root->fs_info->balance_lock);
+                       flags = tgt;
+                       goto out;
+               }
+       }
+       spin_unlock(&root->fs_info->balance_lock);
+
        if (num_devices == 1)
                flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
        if (num_devices < 4)
@@ -3059,22 +3175,25 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
        if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
            ((flags & BTRFS_BLOCK_GROUP_RAID1) |
             (flags & BTRFS_BLOCK_GROUP_RAID10) |
-            (flags & BTRFS_BLOCK_GROUP_DUP)))
+            (flags & BTRFS_BLOCK_GROUP_DUP))) {
                flags &= ~BTRFS_BLOCK_GROUP_RAID0;
+       }
+
+out:
+       /* extended -> chunk profile */
+       flags &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
        return flags;
 }
 
 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
 {
        if (flags & BTRFS_BLOCK_GROUP_DATA)
-               flags |= root->fs_info->avail_data_alloc_bits &
-                        root->fs_info->data_alloc_profile;
+               flags |= root->fs_info->avail_data_alloc_bits;
        else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
-               flags |= root->fs_info->avail_system_alloc_bits &
-                        root->fs_info->system_alloc_profile;
+               flags |= root->fs_info->avail_system_alloc_bits;
        else if (flags & BTRFS_BLOCK_GROUP_METADATA)
-               flags |= root->fs_info->avail_metadata_alloc_bits &
-                        root->fs_info->metadata_alloc_profile;
+               flags |= root->fs_info->avail_metadata_alloc_bits;
+
        return btrfs_reduce_alloc_profile(root, flags);
 }
 
@@ -3191,6 +3310,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
                return -ENOSPC;
        }
        data_sinfo->bytes_may_use += bytes;
+       trace_btrfs_space_reservation(root->fs_info, "space_info",
+                                     (u64)data_sinfo, bytes, 1);
        spin_unlock(&data_sinfo->lock);
 
        return 0;
@@ -3210,6 +3331,8 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
        data_sinfo = BTRFS_I(inode)->space_info;
        spin_lock(&data_sinfo->lock);
        data_sinfo->bytes_may_use -= bytes;
+       trace_btrfs_space_reservation(root->fs_info, "space_info",
+                                     (u64)data_sinfo, bytes, 0);
        spin_unlock(&data_sinfo->lock);
 }
 
@@ -3257,27 +3380,15 @@ static int should_alloc_chunk(struct btrfs_root *root,
                if (num_bytes - num_allocated < thresh)
                        return 1;
        }
-
-       /*
-        * we have two similar checks here, one based on percentage
-        * and once based on a hard number of 256MB.  The idea
-        * is that if we have a good amount of free
-        * room, don't allocate a chunk.  A good mount is
-        * less than 80% utilized of the chunks we have allocated,
-        * or more than 256MB free
-        */
-       if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
-               return 0;
-
-       if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
-               return 0;
-
        thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
 
-       /* 256MB or 5% of the FS */
-       thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
+       /* 256MB or 2% of the FS */
+       thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2));
+       /* system chunks need a much small threshold */
+       if (sinfo->flags & BTRFS_BLOCK_GROUP_SYSTEM)
+               thresh = 32 * 1024 * 1024;
 
-       if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
+       if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8))
                return 0;
        return 1;
 }
@@ -3291,7 +3402,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
        int wait_for_alloc = 0;
        int ret = 0;
 
-       flags = btrfs_reduce_alloc_profile(extent_root, flags);
+       BUG_ON(!profile_is_valid(flags, 0));
 
        space_info = __find_space_info(extent_root->fs_info, flags);
        if (!space_info) {
@@ -3582,6 +3693,10 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
        if (used <= space_info->total_bytes) {
                if (used + orig_bytes <= space_info->total_bytes) {
                        space_info->bytes_may_use += orig_bytes;
+                       trace_btrfs_space_reservation(root->fs_info,
+                                                     "space_info",
+                                                     (u64)space_info,
+                                                     orig_bytes, 1);
                        ret = 0;
                } else {
                        /*
@@ -3649,6 +3764,10 @@ static int reserve_metadata_bytes(struct btrfs_root *root,
 
                if (used + num_bytes < space_info->total_bytes + avail) {
                        space_info->bytes_may_use += orig_bytes;
+                       trace_btrfs_space_reservation(root->fs_info,
+                                                     "space_info",
+                                                     (u64)space_info,
+                                                     orig_bytes, 1);
                        ret = 0;
                } else {
                        wait_ordered = true;
@@ -3755,7 +3874,8 @@ static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
        spin_unlock(&block_rsv->lock);
 }
 
-static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
+static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
+                                   struct btrfs_block_rsv *block_rsv,
                                    struct btrfs_block_rsv *dest, u64 num_bytes)
 {
        struct btrfs_space_info *space_info = block_rsv->space_info;
@@ -3791,6 +3911,9 @@ static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
                if (num_bytes) {
                        spin_lock(&space_info->lock);
                        space_info->bytes_may_use -= num_bytes;
+                       trace_btrfs_space_reservation(fs_info, "space_info",
+                                                     (u64)space_info,
+                                                     num_bytes, 0);
                        space_info->reservation_progress++;
                        spin_unlock(&space_info->lock);
                }
@@ -3947,7 +4070,8 @@ void btrfs_block_rsv_release(struct btrfs_root *root,
        if (global_rsv->full || global_rsv == block_rsv ||
            block_rsv->space_info != global_rsv->space_info)
                global_rsv = NULL;
-       block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
+       block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
+                               num_bytes);
 }
 
 /*
@@ -4006,11 +4130,15 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
                num_bytes = sinfo->total_bytes - num_bytes;
                block_rsv->reserved += num_bytes;
                sinfo->bytes_may_use += num_bytes;
+               trace_btrfs_space_reservation(fs_info, "space_info",
+                                             (u64)sinfo, num_bytes, 1);
        }
 
        if (block_rsv->reserved >= block_rsv->size) {
                num_bytes = block_rsv->reserved - block_rsv->size;
                sinfo->bytes_may_use -= num_bytes;
+               trace_btrfs_space_reservation(fs_info, "space_info",
+                                             (u64)sinfo, num_bytes, 0);
                sinfo->reservation_progress++;
                block_rsv->reserved = block_rsv->size;
                block_rsv->full = 1;
@@ -4045,7 +4173,8 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
 
 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
 {
-       block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
+       block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
+                               (u64)-1);
        WARN_ON(fs_info->delalloc_block_rsv.size > 0);
        WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
        WARN_ON(fs_info->trans_block_rsv.size > 0);
@@ -4062,6 +4191,8 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
        if (!trans->bytes_reserved)
                return;
 
+       trace_btrfs_space_reservation(root->fs_info, "transaction", (u64)trans,
+                                     trans->bytes_reserved, 0);
        btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
        trans->bytes_reserved = 0;
 }
@@ -4079,6 +4210,8 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
         * when we are truly done with the orphan item.
         */
        u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+       trace_btrfs_space_reservation(root->fs_info, "orphan",
+                                     btrfs_ino(inode), num_bytes, 1);
        return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
 }
 
@@ -4086,6 +4219,8 @@ void btrfs_orphan_release_metadata(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+       trace_btrfs_space_reservation(root->fs_info, "orphan",
+                                     btrfs_ino(inode), num_bytes, 0);
        btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
 }
 
@@ -4213,12 +4348,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        /* Need to be holding the i_mutex here if we aren't free space cache */
        if (btrfs_is_free_space_inode(root, inode))
                flush = 0;
-       else
-               WARN_ON(!mutex_is_locked(&inode->i_mutex));
 
        if (flush && btrfs_transaction_in_commit(root->fs_info))
                schedule_timeout(1);
 
+       mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
        num_bytes = ALIGN(num_bytes, root->sectorsize);
 
        spin_lock(&BTRFS_I(inode)->lock);
@@ -4266,8 +4400,14 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
                if (dropped)
                        to_free += btrfs_calc_trans_metadata_size(root, dropped);
 
-               if (to_free)
+               if (to_free) {
                        btrfs_block_rsv_release(root, block_rsv, to_free);
+                       trace_btrfs_space_reservation(root->fs_info,
+                                                     "delalloc",
+                                                     btrfs_ino(inode),
+                                                     to_free, 0);
+               }
+               mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
                return ret;
        }
 
@@ -4278,7 +4418,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        }
        BTRFS_I(inode)->reserved_extents += nr_extents;
        spin_unlock(&BTRFS_I(inode)->lock);
+       mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
 
+       if (to_reserve)
+               trace_btrfs_space_reservation(root->fs_info,"delalloc",
+                                             btrfs_ino(inode), to_reserve, 1);
        block_rsv_add_bytes(block_rsv, to_reserve, 1);
 
        return 0;
@@ -4308,6 +4452,8 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
        if (dropped > 0)
                to_free += btrfs_calc_trans_metadata_size(root, dropped);
 
+       trace_btrfs_space_reservation(root->fs_info, "delalloc",
+                                     btrfs_ino(inode), to_free, 0);
        btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
                                to_free);
 }
@@ -4562,7 +4708,10 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
                        cache->reserved += num_bytes;
                        space_info->bytes_reserved += num_bytes;
                        if (reserve == RESERVE_ALLOC) {
-                               BUG_ON(space_info->bytes_may_use < num_bytes);
+                               trace_btrfs_space_reservation(cache->fs_info,
+                                                             "space_info",
+                                                             (u64)space_info,
+                                                             num_bytes, 0);
                                space_info->bytes_may_use -= num_bytes;
                        }
                }
@@ -4928,6 +5077,8 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
        rb_erase(&head->node.rb_node, &delayed_refs->root);
 
        delayed_refs->num_entries--;
+       if (waitqueue_active(&delayed_refs->seq_wait))
+               wake_up(&delayed_refs->seq_wait);
 
        /*
         * we don't take a ref on the node because we're removing it from the
@@ -4955,16 +5106,17 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct extent_buffer *buf,
-                          u64 parent, int last_ref)
+                          u64 parent, int last_ref, int for_cow)
 {
        struct btrfs_block_group_cache *cache = NULL;
        int ret;
 
        if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
-               ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
-                                               parent, root->root_key.objectid,
-                                               btrfs_header_level(buf),
-                                               BTRFS_DROP_DELAYED_REF, NULL);
+               ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
+                                       buf->start, buf->len,
+                                       parent, root->root_key.objectid,
+                                       btrfs_header_level(buf),
+                                       BTRFS_DROP_DELAYED_REF, NULL, for_cow);
                BUG_ON(ret);
        }
 
@@ -4999,12 +5151,12 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
        btrfs_put_block_group(cache);
 }
 
-int btrfs_free_extent(struct btrfs_trans_handle *trans,
-                     struct btrfs_root *root,
-                     u64 bytenr, u64 num_bytes, u64 parent,
-                     u64 root_objectid, u64 owner, u64 offset)
+int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+                     u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
+                     u64 owner, u64 offset, int for_cow)
 {
        int ret;
+       struct btrfs_fs_info *fs_info = root->fs_info;
 
        /*
         * tree log blocks never actually go into the extent allocation
@@ -5016,14 +5168,17 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
                btrfs_pin_extent(root, bytenr, num_bytes, 1);
                ret = 0;
        } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
-               ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
+               ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+                                       num_bytes,
                                        parent, root_objectid, (int)owner,
-                                       BTRFS_DROP_DELAYED_REF, NULL);
+                                       BTRFS_DROP_DELAYED_REF, NULL, for_cow);
                BUG_ON(ret);
        } else {
-               ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
-                                       parent, root_objectid, owner,
-                                       offset, BTRFS_DROP_DELAYED_REF, NULL);
+               ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+                                               num_bytes,
+                                               parent, root_objectid, owner,
+                                               offset, BTRFS_DROP_DELAYED_REF,
+                                               NULL, for_cow);
                BUG_ON(ret);
        }
        return ret;
@@ -5146,6 +5301,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        ins->objectid = 0;
        ins->offset = 0;
 
+       trace_find_free_extent(orig_root, num_bytes, empty_size, data);
+
        space_info = __find_space_info(root->fs_info, data);
        if (!space_info) {
                printk(KERN_ERR "No space info for %llu\n", data);
@@ -5295,15 +5452,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                if (unlikely(block_group->ro))
                        goto loop;
 
-               spin_lock(&block_group->free_space_ctl->tree_lock);
-               if (cached &&
-                   block_group->free_space_ctl->free_space <
-                   num_bytes + empty_cluster + empty_size) {
-                       spin_unlock(&block_group->free_space_ctl->tree_lock);
-                       goto loop;
-               }
-               spin_unlock(&block_group->free_space_ctl->tree_lock);
-
                /*
                 * Ok we want to try and use the cluster allocator, so
                 * lets look there
@@ -5331,6 +5479,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                        if (offset) {
                                /* we have a block, we're done */
                                spin_unlock(&last_ptr->refill_lock);
+                               trace_btrfs_reserve_extent_cluster(root,
+                                       block_group, search_start, num_bytes);
                                goto checks;
                        }
 
@@ -5349,8 +5499,15 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                         * plenty of times and not have found
                         * anything, so we are likely way too
                         * fragmented for the clustering stuff to find
-                        * anything.  */
-                       if (loop >= LOOP_NO_EMPTY_SIZE) {
+                        * anything.
+                        *
+                        * However, if the cluster is taken from the
+                        * current block group, release the cluster
+                        * first, so that we stand a better chance of
+                        * succeeding in the unclustered
+                        * allocation.  */
+                       if (loop >= LOOP_NO_EMPTY_SIZE &&
+                           last_ptr->block_group != block_group) {
                                spin_unlock(&last_ptr->refill_lock);
                                goto unclustered_alloc;
                        }
@@ -5361,6 +5518,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                         */
                        btrfs_return_cluster_to_free_space(NULL, last_ptr);
 
+                       if (loop >= LOOP_NO_EMPTY_SIZE) {
+                               spin_unlock(&last_ptr->refill_lock);
+                               goto unclustered_alloc;
+                       }
+
                        /* allocate a cluster in this block group */
                        ret = btrfs_find_space_cluster(trans, root,
                                               block_group, last_ptr,
@@ -5377,6 +5539,9 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                                if (offset) {
                                        /* we found one, proceed */
                                        spin_unlock(&last_ptr->refill_lock);
+                                       trace_btrfs_reserve_extent_cluster(root,
+                                               block_group, search_start,
+                                               num_bytes);
                                        goto checks;
                                }
                        } else if (!cached && loop > LOOP_CACHING_NOWAIT
@@ -5401,6 +5566,15 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                }
 
 unclustered_alloc:
+               spin_lock(&block_group->free_space_ctl->tree_lock);
+               if (cached &&
+                   block_group->free_space_ctl->free_space <
+                   num_bytes + empty_cluster + empty_size) {
+                       spin_unlock(&block_group->free_space_ctl->tree_lock);
+                       goto loop;
+               }
+               spin_unlock(&block_group->free_space_ctl->tree_lock);
+
                offset = btrfs_find_space_for_alloc(block_group, search_start,
                                                    num_bytes, empty_size);
                /*
@@ -5438,9 +5612,6 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                        goto loop;
                }
 
-               ins->objectid = search_start;
-               ins->offset = num_bytes;
-
                if (offset < search_start)
                        btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
@@ -5457,6 +5628,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
                ins->objectid = search_start;
                ins->offset = num_bytes;
 
+               trace_btrfs_reserve_extent(orig_root, block_group,
+                                          search_start, num_bytes);
                if (offset < search_start)
                        btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
@@ -5842,9 +6015,10 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
 
        BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
 
-       ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
-                                        0, root_objectid, owner, offset,
-                                        BTRFS_ADD_DELAYED_EXTENT, NULL);
+       ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
+                                        ins->offset, 0,
+                                        root_objectid, owner, offset,
+                                        BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
        return ret;
 }
 
@@ -5997,10 +6171,11 @@ use_block_rsv(struct btrfs_trans_handle *trans,
        return ERR_PTR(-ENOSPC);
 }
 
-static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
+static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
+                           struct btrfs_block_rsv *block_rsv, u32 blocksize)
 {
        block_rsv_add_bytes(block_rsv, blocksize, 0);
-       block_rsv_release_bytes(block_rsv, NULL, 0);
+       block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
 }
 
 /*
@@ -6014,7 +6189,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                                        struct btrfs_root *root, u32 blocksize,
                                        u64 parent, u64 root_objectid,
                                        struct btrfs_disk_key *key, int level,
-                                       u64 hint, u64 empty_size)
+                                       u64 hint, u64 empty_size, int for_cow)
 {
        struct btrfs_key ins;
        struct btrfs_block_rsv *block_rsv;
@@ -6030,7 +6205,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
        ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
                                   empty_size, hint, (u64)-1, &ins, 0);
        if (ret) {
-               unuse_block_rsv(block_rsv, blocksize);
+               unuse_block_rsv(root->fs_info, block_rsv, blocksize);
                return ERR_PTR(ret);
        }
 
@@ -6058,10 +6233,11 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                extent_op->update_flags = 1;
                extent_op->is_data = 0;
 
-               ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
+               ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
+                                       ins.objectid,
                                        ins.offset, parent, root_objectid,
                                        level, BTRFS_ADD_DELAYED_EXTENT,
-                                       extent_op);
+                                       extent_op, for_cow);
                BUG_ON(ret);
        }
        return buf;
@@ -6078,6 +6254,7 @@ struct walk_control {
        int keep_locks;
        int reada_slot;
        int reada_count;
+       int for_reloc;
 };
 
 #define DROP_REFERENCE 1
@@ -6216,9 +6393,9 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
        /* wc->stage == UPDATE_BACKREF */
        if (!(wc->flags[level] & flag)) {
                BUG_ON(!path->locks[level]);
-               ret = btrfs_inc_ref(trans, root, eb, 1);
+               ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
                BUG_ON(ret);
-               ret = btrfs_dec_ref(trans, root, eb, 0);
+               ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
                BUG_ON(ret);
                ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
                                                  eb->len, flag, 0);
@@ -6362,7 +6539,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
                }
 
                ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
-                                       root->root_key.objectid, level - 1, 0);
+                               root->root_key.objectid, level - 1, 0, 0);
                BUG_ON(ret);
        }
        btrfs_tree_unlock(next);
@@ -6436,9 +6613,11 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
        if (wc->refs[level] == 1) {
                if (level == 0) {
                        if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
-                               ret = btrfs_dec_ref(trans, root, eb, 1);
+                               ret = btrfs_dec_ref(trans, root, eb, 1,
+                                                   wc->for_reloc);
                        else
-                               ret = btrfs_dec_ref(trans, root, eb, 0);
+                               ret = btrfs_dec_ref(trans, root, eb, 0,
+                                                   wc->for_reloc);
                        BUG_ON(ret);
                }
                /* make block locked assertion in clean_tree_block happy */
@@ -6465,7 +6644,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                               btrfs_header_owner(path->nodes[level + 1]));
        }
 
-       btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
+       btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1, 0);
 out:
        wc->refs[level] = 0;
        wc->flags[level] = 0;
@@ -6549,7 +6728,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  * blocks are properly updated.
  */
 void btrfs_drop_snapshot(struct btrfs_root *root,
-                        struct btrfs_block_rsv *block_rsv, int update_ref)
+                        struct btrfs_block_rsv *block_rsv, int update_ref,
+                        int for_reloc)
 {
        struct btrfs_path *path;
        struct btrfs_trans_handle *trans;
@@ -6637,6 +6817,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
        wc->stage = DROP_REFERENCE;
        wc->update_ref = update_ref;
        wc->keep_locks = 0;
+       wc->for_reloc = for_reloc;
        wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
 
        while (1) {
@@ -6721,6 +6902,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
  * drop subtree rooted at tree block 'node'.
  *
  * NOTE: this function will unlock and release tree block 'node'
+ * only used by relocation code
  */
 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
                        struct btrfs_root *root,
@@ -6765,6 +6947,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
        wc->stage = DROP_REFERENCE;
        wc->update_ref = 0;
        wc->keep_locks = 1;
+       wc->for_reloc = 1;
        wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
 
        while (1) {
@@ -6792,6 +6975,29 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
        u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
                BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
 
+       if (root->fs_info->balance_ctl) {
+               struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
+               u64 tgt = 0;
+
+               /* pick restriper's target profile and return */
+               if (flags & BTRFS_BLOCK_GROUP_DATA &&
+                   bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+                       tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
+               } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
+                          bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+                       tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
+               } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
+                          bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+                       tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
+               }
+
+               if (tgt) {
+                       /* extended -> chunk profile */
+                       tgt &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+                       return tgt;
+               }
+       }
+
        /*
         * we add in the count of missing devices because we want
         * to make sure that any RAID levels on a degraded FS
@@ -7085,7 +7291,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
                 * space to fit our block group in.
                 */
                if (device->total_bytes > device->bytes_used + min_free) {
-                       ret = find_free_dev_extent(NULL, device, min_free,
+                       ret = find_free_dev_extent(device, min_free,
                                                   &dev_offset, NULL);
                        if (!ret)
                                dev_nr++;
@@ -7447,6 +7653,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
        ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
                                &cache->space_info);
        BUG_ON(ret);
+       update_global_block_rsv(root->fs_info);
 
        spin_lock(&cache->space_info->lock);
        cache->space_info->bytes_readonly += cache->bytes_super;
@@ -7466,6 +7673,22 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
+{
+       u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+       /* chunk -> extended profile */
+       if (extra_flags == 0)
+               extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+
+       if (flags & BTRFS_BLOCK_GROUP_DATA)
+               fs_info->avail_data_alloc_bits &= ~extra_flags;
+       if (flags & BTRFS_BLOCK_GROUP_METADATA)
+               fs_info->avail_metadata_alloc_bits &= ~extra_flags;
+       if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+               fs_info->avail_system_alloc_bits &= ~extra_flags;
+}
+
 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root, u64 group_start)
 {
@@ -7476,6 +7699,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        struct btrfs_key key;
        struct inode *inode;
        int ret;
+       int index;
        int factor;
 
        root = root->fs_info->extent_root;
@@ -7491,6 +7715,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        free_excluded_extents(root, block_group);
 
        memcpy(&key, &block_group->key, sizeof(key));
+       index = get_block_group_index(block_group);
        if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
                                  BTRFS_BLOCK_GROUP_RAID1 |
                                  BTRFS_BLOCK_GROUP_RAID10))
@@ -7565,6 +7790,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
         * are still on the list after taking the semaphore
         */
        list_del_init(&block_group->list);
+       if (list_empty(&block_group->space_info->block_groups[index]))
+               clear_avail_alloc_bits(root->fs_info, block_group->flags);
        up_write(&block_group->space_info->groups_sem);
 
        if (block_group->cached == BTRFS_CACHE_STARTED)
index 49f3c9dc09f4c81902299fd81c62da1ed8423250..9d09a4f81875817ebc45a7c5b80cbe6008061b22 100644 (file)
@@ -18,6 +18,7 @@
 #include "ctree.h"
 #include "btrfs_inode.h"
 #include "volumes.h"
+#include "check-integrity.h"
 
 static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
@@ -1895,7 +1896,7 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
        }
        bio->bi_bdev = dev->bdev;
        bio_add_page(bio, page, length, start-page_offset(page));
-       submit_bio(WRITE_SYNC, bio);
+       btrfsic_submit_bio(WRITE_SYNC, bio);
        wait_for_completion(&compl);
 
        if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
@@ -2393,7 +2394,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
                ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
                                           mirror_num, bio_flags, start);
        else
-               submit_bio(rw, bio);
+               btrfsic_submit_bio(rw, bio);
 
        if (bio_flagged(bio, BIO_EOPNOTSUPP))
                ret = -EOPNOTSUPP;
@@ -3579,6 +3580,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
        atomic_set(&eb->blocking_writers, 0);
        atomic_set(&eb->spinning_readers, 0);
        atomic_set(&eb->spinning_writers, 0);
+       eb->lock_nested = 0;
        init_waitqueue_head(&eb->write_lock_wq);
        init_waitqueue_head(&eb->read_lock_wq);
 
index 7604c30013227fd823b1523503f8faaeccf283c4..bc6a042cb6fc496e6910d21fb3cdaec000f43b8e 100644 (file)
@@ -129,6 +129,7 @@ struct extent_buffer {
        struct list_head leak_list;
        struct rcu_head rcu_head;
        atomic_t refs;
+       pid_t lock_owner;
 
        /* count of read lock holders on the extent buffer */
        atomic_t write_locks;
@@ -137,6 +138,7 @@ struct extent_buffer {
        atomic_t blocking_readers;
        atomic_t spinning_readers;
        atomic_t spinning_writers;
+       int lock_nested;
 
        /* protects write locks */
        rwlock_t lock;
index 034d985032296cd4dfbc80e4d6717ac8b4ea77c4..859ba2dd88903ba207c7b0e448e0f5ce7f99e46e 100644 (file)
@@ -678,7 +678,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
                                                disk_bytenr, num_bytes, 0,
                                                root->root_key.objectid,
                                                new_key.objectid,
-                                               start - extent_offset);
+                                               start - extent_offset, 0);
                                BUG_ON(ret);
                                *hint_byte = disk_bytenr;
                        }
@@ -753,7 +753,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
                                                disk_bytenr, num_bytes, 0,
                                                root->root_key.objectid,
                                                key.objectid, key.offset -
-                                               extent_offset);
+                                               extent_offset, 0);
                                BUG_ON(ret);
                                inode_sub_bytes(inode,
                                                extent_end - key.offset);
@@ -962,7 +962,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 
                ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
                                           root->root_key.objectid,
-                                          ino, orig_offset);
+                                          ino, orig_offset, 0);
                BUG_ON(ret);
 
                if (split == start) {
@@ -989,7 +989,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
                del_nr++;
                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
                                        0, root->root_key.objectid,
-                                       ino, orig_offset);
+                                       ino, orig_offset, 0);
                BUG_ON(ret);
        }
        other_start = 0;
@@ -1006,7 +1006,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
                del_nr++;
                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
                                        0, root->root_key.objectid,
-                                       ino, orig_offset);
+                                       ino, orig_offset, 0);
                BUG_ON(ret);
        }
        if (del_nr == 0) {
@@ -1274,7 +1274,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                                                   dirty_pages);
                if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
                        btrfs_btree_balance_dirty(root, 1);
-               btrfs_throttle(root);
 
                pos += copied;
                num_written += copied;
index 9a897bf795380808e728c8d074cb58c5bcd9cf89..d20ff87ca603bba8255b04a95f34d11bb385ab33 100644 (file)
@@ -319,9 +319,11 @@ static void io_ctl_drop_pages(struct io_ctl *io_ctl)
        io_ctl_unmap_page(io_ctl);
 
        for (i = 0; i < io_ctl->num_pages; i++) {
-               ClearPageChecked(io_ctl->pages[i]);
-               unlock_page(io_ctl->pages[i]);
-               page_cache_release(io_ctl->pages[i]);
+               if (io_ctl->pages[i]) {
+                       ClearPageChecked(io_ctl->pages[i]);
+                       unlock_page(io_ctl->pages[i]);
+                       page_cache_release(io_ctl->pages[i]);
+               }
        }
 }
 
@@ -635,7 +637,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        if (!num_entries)
                return 0;
 
-       io_ctl_init(&io_ctl, inode, root);
+       ret = io_ctl_init(&io_ctl, inode, root);
+       if (ret)
+               return ret;
+
        ret = readahead_cache(inode);
        if (ret)
                goto out;
@@ -838,7 +843,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        struct io_ctl io_ctl;
        struct list_head bitmap_list;
        struct btrfs_key key;
-       u64 start, end, len;
+       u64 start, extent_start, extent_end, len;
        int entries = 0;
        int bitmaps = 0;
        int ret;
@@ -849,7 +854,9 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        if (!i_size_read(inode))
                return -1;
 
-       io_ctl_init(&io_ctl, inode, root);
+       ret = io_ctl_init(&io_ctl, inode, root);
+       if (ret)
+               return -1;
 
        /* Get the cluster for this block_group if it exists */
        if (block_group && !list_empty(&block_group->cluster_list))
@@ -857,25 +864,12 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                                     struct btrfs_free_cluster,
                                     block_group_list);
 
-       /*
-        * We shouldn't have switched the pinned extents yet so this is the
-        * right one
-        */
-       unpin = root->fs_info->pinned_extents;
-
        /* Lock all pages first so we can lock the extent safely. */
        io_ctl_prepare_pages(&io_ctl, inode, 0);
 
        lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
                         0, &cached_state, GFP_NOFS);
 
-       /*
-        * When searching for pinned extents, we need to start at our start
-        * offset.
-        */
-       if (block_group)
-               start = block_group->key.objectid;
-
        node = rb_first(&ctl->free_space_offset);
        if (!node && cluster) {
                node = rb_first(&cluster->root);
@@ -918,9 +912,20 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
         * We want to add any pinned extents to our free space cache
         * so we don't leak the space
         */
+
+       /*
+        * We shouldn't have switched the pinned extents yet so this is the
+        * right one
+        */
+       unpin = root->fs_info->pinned_extents;
+
+       if (block_group)
+               start = block_group->key.objectid;
+
        while (block_group && (start < block_group->key.objectid +
                               block_group->key.offset)) {
-               ret = find_first_extent_bit(unpin, start, &start, &end,
+               ret = find_first_extent_bit(unpin, start,
+                                           &extent_start, &extent_end,
                                            EXTENT_DIRTY);
                if (ret) {
                        ret = 0;
@@ -928,20 +933,21 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                }
 
                /* This pinned extent is out of our range */
-               if (start >= block_group->key.objectid +
+               if (extent_start >= block_group->key.objectid +
                    block_group->key.offset)
                        break;
 
-               len = block_group->key.objectid +
-                       block_group->key.offset - start;
-               len = min(len, end + 1 - start);
+               extent_start = max(extent_start, start);
+               extent_end = min(block_group->key.objectid +
+                                block_group->key.offset, extent_end + 1);
+               len = extent_end - extent_start;
 
                entries++;
-               ret = io_ctl_add_entry(&io_ctl, start, len, NULL);
+               ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL);
                if (ret)
                        goto out_nospc;
 
-               start = end + 1;
+               start = extent_end;
        }
 
        /* Write out the bitmaps */
@@ -2283,23 +2289,23 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
                                struct btrfs_free_space *entry,
                                struct btrfs_free_cluster *cluster,
-                               u64 offset, u64 bytes, u64 min_bytes)
+                               u64 offset, u64 bytes,
+                               u64 cont1_bytes, u64 min_bytes)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        unsigned long next_zero;
        unsigned long i;
-       unsigned long search_bits;
-       unsigned long total_bits;
+       unsigned long want_bits;
+       unsigned long min_bits;
        unsigned long found_bits;
        unsigned long start = 0;
        unsigned long total_found = 0;
        int ret;
-       bool found = false;
 
        i = offset_to_bit(entry->offset, block_group->sectorsize,
                          max_t(u64, offset, entry->offset));
-       search_bits = bytes_to_bits(bytes, block_group->sectorsize);
-       total_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
+       want_bits = bytes_to_bits(bytes, block_group->sectorsize);
+       min_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
 
 again:
        found_bits = 0;
@@ -2308,7 +2314,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
             i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
                next_zero = find_next_zero_bit(entry->bitmap,
                                               BITS_PER_BITMAP, i);
-               if (next_zero - i >= search_bits) {
+               if (next_zero - i >= min_bits) {
                        found_bits = next_zero - i;
                        break;
                }
@@ -2318,10 +2324,9 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
        if (!found_bits)
                return -ENOSPC;
 
-       if (!found) {
+       if (!total_found) {
                start = i;
                cluster->max_size = 0;
-               found = true;
        }
 
        total_found += found_bits;
@@ -2329,13 +2334,8 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
        if (cluster->max_size < found_bits * block_group->sectorsize)
                cluster->max_size = found_bits * block_group->sectorsize;
 
-       if (total_found < total_bits) {
-               i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
-               if (i - start > total_bits * 2) {
-                       total_found = 0;
-                       cluster->max_size = 0;
-                       found = false;
-               }
+       if (total_found < want_bits || cluster->max_size < cont1_bytes) {
+               i = next_zero + 1;
                goto again;
        }
 
@@ -2346,28 +2346,31 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
                                 &entry->offset_index, 1);
        BUG_ON(ret);
 
+       trace_btrfs_setup_cluster(block_group, cluster,
+                                 total_found * block_group->sectorsize, 1);
        return 0;
 }
 
 /*
  * This searches the block group for just extents to fill the cluster with.
+ * Try to find a cluster with at least bytes total bytes, at least one
+ * extent of cont1_bytes, and other clusters of at least min_bytes.
  */
 static noinline int
 setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
                        struct btrfs_free_cluster *cluster,
                        struct list_head *bitmaps, u64 offset, u64 bytes,
-                       u64 min_bytes)
+                       u64 cont1_bytes, u64 min_bytes)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *first = NULL;
        struct btrfs_free_space *entry = NULL;
-       struct btrfs_free_space *prev = NULL;
        struct btrfs_free_space *last;
        struct rb_node *node;
        u64 window_start;
        u64 window_free;
        u64 max_extent;
-       u64 max_gap = 128 * 1024;
+       u64 total_size = 0;
 
        entry = tree_search_offset(ctl, offset, 0, 1);
        if (!entry)
@@ -2377,8 +2380,8 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
         * We don't want bitmaps, so just move along until we find a normal
         * extent entry.
         */
-       while (entry->bitmap) {
-               if (list_empty(&entry->list))
+       while (entry->bitmap || entry->bytes < min_bytes) {
+               if (entry->bitmap && list_empty(&entry->list))
                        list_add_tail(&entry->list, bitmaps);
                node = rb_next(&entry->offset_index);
                if (!node)
@@ -2391,12 +2394,9 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
        max_extent = entry->bytes;
        first = entry;
        last = entry;
-       prev = entry;
 
-       while (window_free <= min_bytes) {
-               node = rb_next(&entry->offset_index);
-               if (!node)
-                       return -ENOSPC;
+       for (node = rb_next(&entry->offset_index); node;
+            node = rb_next(&entry->offset_index)) {
                entry = rb_entry(node, struct btrfs_free_space, offset_index);
 
                if (entry->bitmap) {
@@ -2405,26 +2405,18 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
                        continue;
                }
 
-               /*
-                * we haven't filled the empty size and the window is
-                * very large.  reset and try again
-                */
-               if (entry->offset - (prev->offset + prev->bytes) > max_gap ||
-                   entry->offset - window_start > (min_bytes * 2)) {
-                       first = entry;
-                       window_start = entry->offset;
-                       window_free = entry->bytes;
-                       last = entry;
+               if (entry->bytes < min_bytes)
+                       continue;
+
+               last = entry;
+               window_free += entry->bytes;
+               if (entry->bytes > max_extent)
                        max_extent = entry->bytes;
-               } else {
-                       last = entry;
-                       window_free += entry->bytes;
-                       if (entry->bytes > max_extent)
-                               max_extent = entry->bytes;
-               }
-               prev = entry;
        }
 
+       if (window_free < bytes || max_extent < cont1_bytes)
+               return -ENOSPC;
+
        cluster->window_start = first->offset;
 
        node = &first->offset_index;
@@ -2438,17 +2430,18 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
 
                entry = rb_entry(node, struct btrfs_free_space, offset_index);
                node = rb_next(&entry->offset_index);
-               if (entry->bitmap)
+               if (entry->bitmap || entry->bytes < min_bytes)
                        continue;
 
                rb_erase(&entry->offset_index, &ctl->free_space_offset);
                ret = tree_insert_offset(&cluster->root, entry->offset,
                                         &entry->offset_index, 0);
+               total_size += entry->bytes;
                BUG_ON(ret);
        } while (node && entry != last);
 
        cluster->max_size = max_extent;
-
+       trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
        return 0;
 }
 
@@ -2460,7 +2453,7 @@ static noinline int
 setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
                     struct btrfs_free_cluster *cluster,
                     struct list_head *bitmaps, u64 offset, u64 bytes,
-                    u64 min_bytes)
+                    u64 cont1_bytes, u64 min_bytes)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry;
@@ -2485,7 +2478,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
                if (entry->bytes < min_bytes)
                        continue;
                ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
-                                          bytes, min_bytes);
+                                          bytes, cont1_bytes, min_bytes);
                if (!ret)
                        return 0;
        }
@@ -2499,7 +2492,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
 
 /*
  * here we try to find a cluster of blocks in a block group.  The goal
- * is to find at least bytes free and up to empty_size + bytes free.
+ * is to find at least bytes+empty_size.
  * We might not find them all in one contiguous area.
  *
  * returns zero and sets up cluster if things worked out, otherwise
@@ -2515,23 +2508,24 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
        struct btrfs_free_space *entry, *tmp;
        LIST_HEAD(bitmaps);
        u64 min_bytes;
+       u64 cont1_bytes;
        int ret;
 
-       /* for metadata, allow allocates with more holes */
+       /*
+        * Choose the minimum extent size we'll require for this
+        * cluster.  For SSD_SPREAD, don't allow any fragmentation.
+        * For metadata, allow allocates with smaller extents.  For
+        * data, keep it dense.
+        */
        if (btrfs_test_opt(root, SSD_SPREAD)) {
-               min_bytes = bytes + empty_size;
+               cont1_bytes = min_bytes = bytes + empty_size;
        } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
-               /*
-                * we want to do larger allocations when we are
-                * flushing out the delayed refs, it helps prevent
-                * making more work as we go along.
-                */
-               if (trans->transaction->delayed_refs.flushing)
-                       min_bytes = max(bytes, (bytes + empty_size) >> 1);
-               else
-                       min_bytes = max(bytes, (bytes + empty_size) >> 4);
-       } else
-               min_bytes = max(bytes, (bytes + empty_size) >> 2);
+               cont1_bytes = bytes;
+               min_bytes = block_group->sectorsize;
+       } else {
+               cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
+               min_bytes = block_group->sectorsize;
+       }
 
        spin_lock(&ctl->tree_lock);
 
@@ -2539,7 +2533,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
         * If we know we don't have enough space to make a cluster don't even
         * bother doing all the work to try and find one.
         */
-       if (ctl->free_space < min_bytes) {
+       if (ctl->free_space < bytes) {
                spin_unlock(&ctl->tree_lock);
                return -ENOSPC;
        }
@@ -2552,11 +2546,17 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
                goto out;
        }
 
+       trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
+                                min_bytes);
+
+       INIT_LIST_HEAD(&bitmaps);
        ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
-                                     bytes, min_bytes);
+                                     bytes + empty_size,
+                                     cont1_bytes, min_bytes);
        if (ret)
                ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
-                                          offset, bytes, min_bytes);
+                                          offset, bytes + empty_size,
+                                          cont1_bytes, min_bytes);
 
        /* Clear our temporary list */
        list_for_each_entry_safe(entry, tmp, &bitmaps, list)
@@ -2567,6 +2567,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
                list_add_tail(&cluster->block_group_list,
                              &block_group->cluster_list);
                cluster->block_group = block_group;
+       } else {
+               trace_btrfs_failed_cluster_setup(block_group);
        }
 out:
        spin_unlock(&cluster->lock);
@@ -2588,17 +2590,57 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
        cluster->block_group = NULL;
 }
 
-int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
-                          u64 *trimmed, u64 start, u64 end, u64 minlen)
+static int do_trimming(struct btrfs_block_group_cache *block_group,
+                      u64 *total_trimmed, u64 start, u64 bytes,
+                      u64 reserved_start, u64 reserved_bytes)
 {
-       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
-       struct btrfs_free_space *entry = NULL;
+       struct btrfs_space_info *space_info = block_group->space_info;
        struct btrfs_fs_info *fs_info = block_group->fs_info;
-       u64 bytes = 0;
-       u64 actually_trimmed;
-       int ret = 0;
+       int ret;
+       int update = 0;
+       u64 trimmed = 0;
 
-       *trimmed = 0;
+       spin_lock(&space_info->lock);
+       spin_lock(&block_group->lock);
+       if (!block_group->ro) {
+               block_group->reserved += reserved_bytes;
+               space_info->bytes_reserved += reserved_bytes;
+               update = 1;
+       }
+       spin_unlock(&block_group->lock);
+       spin_unlock(&space_info->lock);
+
+       ret = btrfs_error_discard_extent(fs_info->extent_root,
+                                        start, bytes, &trimmed);
+       if (!ret)
+               *total_trimmed += trimmed;
+
+       btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
+
+       if (update) {
+               spin_lock(&space_info->lock);
+               spin_lock(&block_group->lock);
+               if (block_group->ro)
+                       space_info->bytes_readonly += reserved_bytes;
+               block_group->reserved -= reserved_bytes;
+               space_info->bytes_reserved -= reserved_bytes;
+               spin_unlock(&space_info->lock);
+               spin_unlock(&block_group->lock);
+       }
+
+       return ret;
+}
+
+static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
+                         u64 *total_trimmed, u64 start, u64 end, u64 minlen)
+{
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+       struct btrfs_free_space *entry;
+       struct rb_node *node;
+       int ret = 0;
+       u64 extent_start;
+       u64 extent_bytes;
+       u64 bytes;
 
        while (start < end) {
                spin_lock(&ctl->tree_lock);
@@ -2609,81 +2651,118 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
                }
 
                entry = tree_search_offset(ctl, start, 0, 1);
-               if (!entry)
-                       entry = tree_search_offset(ctl,
-                                                  offset_to_bitmap(ctl, start),
-                                                  1, 1);
-
-               if (!entry || entry->offset >= end) {
+               if (!entry) {
                        spin_unlock(&ctl->tree_lock);
                        break;
                }
 
-               if (entry->bitmap) {
-                       ret = search_bitmap(ctl, entry, &start, &bytes);
-                       if (!ret) {
-                               if (start >= end) {
-                                       spin_unlock(&ctl->tree_lock);
-                                       break;
-                               }
-                               bytes = min(bytes, end - start);
-                               bitmap_clear_bits(ctl, entry, start, bytes);
-                               if (entry->bytes == 0)
-                                       free_bitmap(ctl, entry);
-                       } else {
-                               start = entry->offset + BITS_PER_BITMAP *
-                                       block_group->sectorsize;
+               /* skip bitmaps */
+               while (entry->bitmap) {
+                       node = rb_next(&entry->offset_index);
+                       if (!node) {
                                spin_unlock(&ctl->tree_lock);
-                               ret = 0;
-                               continue;
+                               goto out;
                        }
-               } else {
-                       start = entry->offset;
-                       bytes = min(entry->bytes, end - start);
-                       unlink_free_space(ctl, entry);
-                       kmem_cache_free(btrfs_free_space_cachep, entry);
+                       entry = rb_entry(node, struct btrfs_free_space,
+                                        offset_index);
                }
 
+               if (entry->offset >= end) {
+                       spin_unlock(&ctl->tree_lock);
+                       break;
+               }
+
+               extent_start = entry->offset;
+               extent_bytes = entry->bytes;
+               start = max(start, extent_start);
+               bytes = min(extent_start + extent_bytes, end) - start;
+               if (bytes < minlen) {
+                       spin_unlock(&ctl->tree_lock);
+                       goto next;
+               }
+
+               unlink_free_space(ctl, entry);
+               kmem_cache_free(btrfs_free_space_cachep, entry);
+
                spin_unlock(&ctl->tree_lock);
 
-               if (bytes >= minlen) {
-                       struct btrfs_space_info *space_info;
-                       int update = 0;
-
-                       space_info = block_group->space_info;
-                       spin_lock(&space_info->lock);
-                       spin_lock(&block_group->lock);
-                       if (!block_group->ro) {
-                               block_group->reserved += bytes;
-                               space_info->bytes_reserved += bytes;
-                               update = 1;
-                       }
-                       spin_unlock(&block_group->lock);
-                       spin_unlock(&space_info->lock);
-
-                       ret = btrfs_error_discard_extent(fs_info->extent_root,
-                                                        start,
-                                                        bytes,
-                                                        &actually_trimmed);
-
-                       btrfs_add_free_space(block_group, start, bytes);
-                       if (update) {
-                               spin_lock(&space_info->lock);
-                               spin_lock(&block_group->lock);
-                               if (block_group->ro)
-                                       space_info->bytes_readonly += bytes;
-                               block_group->reserved -= bytes;
-                               space_info->bytes_reserved -= bytes;
-                               spin_unlock(&space_info->lock);
-                               spin_unlock(&block_group->lock);
-                       }
+               ret = do_trimming(block_group, total_trimmed, start, bytes,
+                                 extent_start, extent_bytes);
+               if (ret)
+                       break;
+next:
+               start += bytes;
 
-                       if (ret)
-                               break;
-                       *trimmed += actually_trimmed;
+               if (fatal_signal_pending(current)) {
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+
+               cond_resched();
+       }
+out:
+       return ret;
+}
+
+static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
+                       u64 *total_trimmed, u64 start, u64 end, u64 minlen)
+{
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+       struct btrfs_free_space *entry;
+       int ret = 0;
+       int ret2;
+       u64 bytes;
+       u64 offset = offset_to_bitmap(ctl, start);
+
+       while (offset < end) {
+               bool next_bitmap = false;
+
+               spin_lock(&ctl->tree_lock);
+
+               if (ctl->free_space < minlen) {
+                       spin_unlock(&ctl->tree_lock);
+                       break;
+               }
+
+               entry = tree_search_offset(ctl, offset, 1, 0);
+               if (!entry) {
+                       spin_unlock(&ctl->tree_lock);
+                       next_bitmap = true;
+                       goto next;
+               }
+
+               bytes = minlen;
+               ret2 = search_bitmap(ctl, entry, &start, &bytes);
+               if (ret2 || start >= end) {
+                       spin_unlock(&ctl->tree_lock);
+                       next_bitmap = true;
+                       goto next;
+               }
+
+               bytes = min(bytes, end - start);
+               if (bytes < minlen) {
+                       spin_unlock(&ctl->tree_lock);
+                       goto next;
+               }
+
+               bitmap_clear_bits(ctl, entry, start, bytes);
+               if (entry->bytes == 0)
+                       free_bitmap(ctl, entry);
+
+               spin_unlock(&ctl->tree_lock);
+
+               ret = do_trimming(block_group, total_trimmed, start, bytes,
+                                 start, bytes);
+               if (ret)
+                       break;
+next:
+               if (next_bitmap) {
+                       offset += BITS_PER_BITMAP * ctl->unit;
+               } else {
+                       start += bytes;
+                       if (start >= offset + BITS_PER_BITMAP * ctl->unit)
+                               offset += BITS_PER_BITMAP * ctl->unit;
                }
-               start += bytes;
-               bytes = 0;
 
                if (fatal_signal_pending(current)) {
                        ret = -ERESTARTSYS;
@@ -2696,6 +2775,22 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
        return ret;
 }
 
+int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
+                          u64 *trimmed, u64 start, u64 end, u64 minlen)
+{
+       int ret;
+
+       *trimmed = 0;
+
+       ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
+       if (ret)
+               return ret;
+
+       ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
+
+       return ret;
+}
+
 /*
  * Find the left-most item in the cache tree, and then return the
  * smallest inode number in the item.
index f8962a957d656b385d0f99d65f300598e419db4d..213ffa86ce1b81f30a5f4ad50753c6d2ac6435aa 100644 (file)
@@ -438,6 +438,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
                                          trans->bytes_reserved);
        if (ret)
                goto out;
+       trace_btrfs_space_reservation(root->fs_info, "ino_cache", (u64)trans,
+                                     trans->bytes_reserved, 1);
 again:
        inode = lookup_free_ino_inode(root, path);
        if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
@@ -498,6 +500,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
 out_put:
        iput(inode);
 out_release:
+       trace_btrfs_space_reservation(root->fs_info, "ino_cache", (u64)trans,
+                                     trans->bytes_reserved, 0);
        btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
 out:
        trans->block_rsv = rsv;
index 81b235a61f8c4149dd880d1d7d1238ab690a4e09..0da19a0ea00d5ac1e854cfe1a861ee828ccca15f 100644 (file)
@@ -1951,12 +1951,28 @@ enum btrfs_orphan_cleanup_state {
 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root)
 {
+       struct btrfs_block_rsv *block_rsv;
        int ret;
 
        if (!list_empty(&root->orphan_list) ||
            root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
                return;
 
+       spin_lock(&root->orphan_lock);
+       if (!list_empty(&root->orphan_list)) {
+               spin_unlock(&root->orphan_lock);
+               return;
+       }
+
+       if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
+               spin_unlock(&root->orphan_lock);
+               return;
+       }
+
+       block_rsv = root->orphan_block_rsv;
+       root->orphan_block_rsv = NULL;
+       spin_unlock(&root->orphan_lock);
+
        if (root->orphan_item_inserted &&
            btrfs_root_refs(&root->root_item) > 0) {
                ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
@@ -1965,10 +1981,9 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
                root->orphan_item_inserted = 0;
        }
 
-       if (root->orphan_block_rsv) {
-               WARN_ON(root->orphan_block_rsv->size > 0);
-               btrfs_free_block_rsv(root, root->orphan_block_rsv);
-               root->orphan_block_rsv = NULL;
+       if (block_rsv) {
+               WARN_ON(block_rsv->size > 0);
+               btrfs_free_block_rsv(root, block_rsv);
        }
 }
 
@@ -2224,14 +2239,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                                continue;
                        }
                        nr_truncate++;
-                       /*
-                        * Need to hold the imutex for reservation purposes, not
-                        * a huge deal here but I have a WARN_ON in
-                        * btrfs_delalloc_reserve_space to catch offenders.
-                        */
-                       mutex_lock(&inode->i_mutex);
                        ret = btrfs_truncate(inode);
-                       mutex_unlock(&inode->i_mutex);
                } else {
                        nr_unlink++;
                }
@@ -2845,7 +2853,7 @@ static void __unlink_end_trans(struct btrfs_trans_handle *trans,
                BUG_ON(!root->fs_info->enospc_unlink);
                root->fs_info->enospc_unlink = 0;
        }
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
 }
 
 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
@@ -3009,7 +3017,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
        int pending_del_nr = 0;
        int pending_del_slot = 0;
        int extent_type = -1;
-       int encoding;
        int ret;
        int err = 0;
        u64 ino = btrfs_ino(inode);
@@ -3059,7 +3066,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                leaf = path->nodes[0];
                btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
                found_type = btrfs_key_type(&found_key);
-               encoding = 0;
 
                if (found_key.objectid != ino)
                        break;
@@ -3072,10 +3078,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                        fi = btrfs_item_ptr(leaf, path->slots[0],
                                            struct btrfs_file_extent_item);
                        extent_type = btrfs_file_extent_type(leaf, fi);
-                       encoding = btrfs_file_extent_compression(leaf, fi);
-                       encoding |= btrfs_file_extent_encryption(leaf, fi);
-                       encoding |= btrfs_file_extent_other_encoding(leaf, fi);
-
                        if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
                                item_end +=
                                    btrfs_file_extent_num_bytes(leaf, fi);
@@ -3103,7 +3105,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
                        u64 num_dec;
                        extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
-                       if (!del_item && !encoding) {
+                       if (!del_item) {
                                u64 orig_num_bytes =
                                        btrfs_file_extent_num_bytes(leaf, fi);
                                extent_num_bytes = new_size -
@@ -3179,7 +3181,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                        ret = btrfs_free_extent(trans, root, extent_start,
                                                extent_num_bytes, 0,
                                                btrfs_header_owner(leaf),
-                                               ino, extent_offset);
+                                               ino, extent_offset, 0);
                        BUG_ON(ret);
                }
 
@@ -3434,7 +3436,7 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
                i_size_write(inode, newsize);
                btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
                ret = btrfs_update_inode(trans, root, inode);
-               btrfs_end_transaction_throttle(trans, root);
+               btrfs_end_transaction(trans, root);
        } else {
 
                /*
@@ -4655,7 +4657,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
        }
 out_unlock:
        nr = trans->blocks_used;
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root, nr);
        if (drop_inode) {
                inode_dec_link_count(inode);
@@ -4723,7 +4725,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
        }
 out_unlock:
        nr = trans->blocks_used;
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
@@ -4782,7 +4784,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        }
 
        nr = trans->blocks_used;
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
 fail:
        if (drop_inode) {
                inode_dec_link_count(inode);
@@ -4848,7 +4850,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 
 out_fail:
        nr = trans->blocks_used;
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
        if (drop_on_err)
                iput(inode);
        btrfs_btree_balance_dirty(root, nr);
@@ -5121,7 +5123,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
                        }
                        flush_dcache_page(page);
                } else if (create && PageUptodate(page)) {
-                       WARN_ON(1);
+                       BUG();
                        if (!trans) {
                                kunmap(page);
                                free_extent_map(em);
@@ -6402,10 +6404,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        u64 page_start;
        u64 page_end;
 
-       /* Need this to keep space reservations serialized */
-       mutex_lock(&inode->i_mutex);
        ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
-       mutex_unlock(&inode->i_mutex);
        if (!ret)
                ret = btrfs_update_time(vma->vm_file);
        if (ret) {
@@ -6494,8 +6493,8 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (!ret)
                return VM_FAULT_LOCKED;
        unlock_page(page);
-       btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
 out:
+       btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
        return ret;
 }
 
@@ -6668,7 +6667,7 @@ static int btrfs_truncate(struct inode *inode)
                        err = ret;
 
                nr = trans->blocks_used;
-               ret = btrfs_end_transaction_throttle(trans, root);
+               ret = btrfs_end_transaction(trans, root);
                btrfs_btree_balance_dirty(root, nr);
        }
 
@@ -6749,6 +6748,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        extent_io_tree_init(&ei->io_tree, &inode->i_data);
        extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
        mutex_init(&ei->log_mutex);
+       mutex_init(&ei->delalloc_mutex);
        btrfs_ordered_inode_tree_init(&ei->ordered_tree);
        INIT_LIST_HEAD(&ei->i_orphan);
        INIT_LIST_HEAD(&ei->delalloc_inodes);
@@ -7074,7 +7074,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                btrfs_end_log_trans(root);
        }
 out_fail:
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
 out_notrans:
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&root->fs_info->subvol_sem);
@@ -7246,7 +7246,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        if (!err)
                d_instantiate(dentry, inode);
        nr = trans->blocks_used;
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
index 5441ff1480fdbbd9ce9fe4c9caa6fc0c62392a3c..ab620014bcc3a5bd4782f948251bb25a166b6a78 100644 (file)
@@ -176,6 +176,8 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
        struct btrfs_trans_handle *trans;
        unsigned int flags, oldflags;
        int ret;
+       u64 ip_oldflags;
+       unsigned int i_oldflags;
 
        if (btrfs_root_readonly(root))
                return -EROFS;
@@ -192,6 +194,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
 
        mutex_lock(&inode->i_mutex);
 
+       ip_oldflags = ip->flags;
+       i_oldflags = inode->i_flags;
+
        flags = btrfs_mask_flags(inode->i_mode, flags);
        oldflags = btrfs_flags_to_ioctl(ip->flags);
        if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
@@ -249,19 +254,24 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
                ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
        }
 
-       trans = btrfs_join_transaction(root);
-       BUG_ON(IS_ERR(trans));
+       trans = btrfs_start_transaction(root, 1);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
+               goto out_drop;
+       }
 
        btrfs_update_iflags(inode);
        inode->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, inode);
-       BUG_ON(ret);
 
        btrfs_end_transaction(trans, root);
+ out_drop:
+       if (ret) {
+               ip->flags = ip_oldflags;
+               inode->i_flags = i_oldflags;
+       }
 
        mnt_drop_write_file(file);
-
-       ret = 0;
  out_unlock:
        mutex_unlock(&inode->i_mutex);
        return ret;
@@ -276,14 +286,13 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
 
 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
 {
-       struct btrfs_root *root = fdentry(file)->d_sb->s_fs_info;
-       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_fs_info *fs_info = btrfs_sb(fdentry(file)->d_sb);
        struct btrfs_device *device;
        struct request_queue *q;
        struct fstrim_range range;
        u64 minlen = ULLONG_MAX;
        u64 num_devices = 0;
-       u64 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
+       u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
        int ret;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -312,7 +321,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
 
        range.len = min(range.len, total_bytes - range.start);
        range.minlen = max(range.minlen, minlen);
-       ret = btrfs_trim_fs(root, &range);
+       ret = btrfs_trim_fs(fs_info->tree_root, &range);
        if (ret < 0)
                return ret;
 
@@ -358,7 +367,7 @@ static noinline int create_subvol(struct btrfs_root *root,
                return PTR_ERR(trans);
 
        leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
-                                     0, objectid, NULL, 0, 0, 0);
+                                     0, objectid, NULL, 0, 0, 0, 0);
        if (IS_ERR(leaf)) {
                ret = PTR_ERR(leaf);
                goto fail;
@@ -858,10 +867,8 @@ static int cluster_pages_for_defrag(struct inode *inode,
                return 0;
        file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
 
-       mutex_lock(&inode->i_mutex);
        ret = btrfs_delalloc_reserve_space(inode,
                                           num_pages << PAGE_CACHE_SHIFT);
-       mutex_unlock(&inode->i_mutex);
        if (ret)
                return ret;
 again:
@@ -1203,13 +1210,21 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       mutex_lock(&root->fs_info->volume_mutex);
+       if (root->fs_info->balance_ctl) {
+               printk(KERN_INFO "btrfs: balance in progress\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
-       if (IS_ERR(vol_args))
-               return PTR_ERR(vol_args);
+       if (IS_ERR(vol_args)) {
+               ret = PTR_ERR(vol_args);
+               goto out;
+       }
 
        vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
 
-       mutex_lock(&root->fs_info->volume_mutex);
        sizestr = vol_args->name;
        devstr = strchr(sizestr, ':');
        if (devstr) {
@@ -1226,7 +1241,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
                printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
                       (unsigned long long)devid);
                ret = -EINVAL;
-               goto out_unlock;
+               goto out_free;
        }
        if (!strcmp(sizestr, "max"))
                new_size = device->bdev->bd_inode->i_size;
@@ -1241,7 +1256,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
                new_size = memparse(sizestr, NULL);
                if (new_size == 0) {
                        ret = -EINVAL;
-                       goto out_unlock;
+                       goto out_free;
                }
        }
 
@@ -1250,7 +1265,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
        if (mod < 0) {
                if (new_size > old_size) {
                        ret = -EINVAL;
-                       goto out_unlock;
+                       goto out_free;
                }
                new_size = old_size - new_size;
        } else if (mod > 0) {
@@ -1259,11 +1274,11 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
 
        if (new_size < 256 * 1024 * 1024) {
                ret = -EINVAL;
-               goto out_unlock;
+               goto out_free;
        }
        if (new_size > device->bdev->bd_inode->i_size) {
                ret = -EFBIG;
-               goto out_unlock;
+               goto out_free;
        }
 
        do_div(new_size, root->sectorsize);
@@ -1276,7 +1291,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
                trans = btrfs_start_transaction(root, 0);
                if (IS_ERR(trans)) {
                        ret = PTR_ERR(trans);
-                       goto out_unlock;
+                       goto out_free;
                }
                ret = btrfs_grow_device(trans, device, new_size);
                btrfs_commit_transaction(trans, root);
@@ -1284,9 +1299,10 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
                ret = btrfs_shrink_device(device, new_size);
        }
 
-out_unlock:
-       mutex_unlock(&root->fs_info->volume_mutex);
+out_free:
        kfree(vol_args);
+out:
+       mutex_unlock(&root->fs_info->volume_mutex);
        return ret;
 }
 
@@ -2052,14 +2068,25 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       mutex_lock(&root->fs_info->volume_mutex);
+       if (root->fs_info->balance_ctl) {
+               printk(KERN_INFO "btrfs: balance in progress\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
-       if (IS_ERR(vol_args))
-               return PTR_ERR(vol_args);
+       if (IS_ERR(vol_args)) {
+               ret = PTR_ERR(vol_args);
+               goto out;
+       }
 
        vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
        ret = btrfs_init_new_device(root, vol_args->name);
 
        kfree(vol_args);
+out:
+       mutex_unlock(&root->fs_info->volume_mutex);
        return ret;
 }
 
@@ -2074,14 +2101,25 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
        if (root->fs_info->sb->s_flags & MS_RDONLY)
                return -EROFS;
 
+       mutex_lock(&root->fs_info->volume_mutex);
+       if (root->fs_info->balance_ctl) {
+               printk(KERN_INFO "btrfs: balance in progress\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
-       if (IS_ERR(vol_args))
-               return PTR_ERR(vol_args);
+       if (IS_ERR(vol_args)) {
+               ret = PTR_ERR(vol_args);
+               goto out;
+       }
 
        vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
        ret = btrfs_rm_device(root, vol_args->name);
 
        kfree(vol_args);
+out:
+       mutex_unlock(&root->fs_info->volume_mutex);
        return ret;
 }
 
@@ -2427,7 +2465,8 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                                                        disko, diskl, 0,
                                                        root->root_key.objectid,
                                                        btrfs_ino(inode),
-                                                       new_key.offset - datao);
+                                                       new_key.offset - datao,
+                                                       0);
                                        BUG_ON(ret);
                                }
                        } else if (type == BTRFS_FILE_EXTENT_INLINE) {
@@ -2977,7 +3016,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
 {
        int ret = 0;
        int size;
-       u64 extent_offset;
+       u64 extent_item_pos;
        struct btrfs_ioctl_logical_ino_args *loi;
        struct btrfs_data_container *inodes = NULL;
        struct btrfs_path *path = NULL;
@@ -3008,15 +3047,17 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
        }
 
        ret = extent_from_logical(root->fs_info, loi->logical, path, &key);
+       btrfs_release_path(path);
 
        if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
                ret = -ENOENT;
        if (ret < 0)
                goto out;
 
-       extent_offset = loi->logical - key.objectid;
+       extent_item_pos = loi->logical - key.objectid;
        ret = iterate_extent_inodes(root->fs_info, path, key.objectid,
-                                       extent_offset, build_ino_list, inodes);
+                                       extent_item_pos, build_ino_list,
+                                       inodes);
 
        if (ret < 0)
                goto out;
@@ -3034,6 +3075,163 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
        return ret;
 }
 
+void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
+                              struct btrfs_ioctl_balance_args *bargs)
+{
+       struct btrfs_balance_control *bctl = fs_info->balance_ctl;
+
+       bargs->flags = bctl->flags;
+
+       if (atomic_read(&fs_info->balance_running))
+               bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
+       if (atomic_read(&fs_info->balance_pause_req))
+               bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
+       if (atomic_read(&fs_info->balance_cancel_req))
+               bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
+
+       memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
+       memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
+       memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
+
+       if (lock) {
+               spin_lock(&fs_info->balance_lock);
+               memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
+               spin_unlock(&fs_info->balance_lock);
+       } else {
+               memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
+       }
+}
+
+static long btrfs_ioctl_balance(struct btrfs_root *root, void __user *arg)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_ioctl_balance_args *bargs;
+       struct btrfs_balance_control *bctl;
+       int ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (fs_info->sb->s_flags & MS_RDONLY)
+               return -EROFS;
+
+       mutex_lock(&fs_info->volume_mutex);
+       mutex_lock(&fs_info->balance_mutex);
+
+       if (arg) {
+               bargs = memdup_user(arg, sizeof(*bargs));
+               if (IS_ERR(bargs)) {
+                       ret = PTR_ERR(bargs);
+                       goto out;
+               }
+
+               if (bargs->flags & BTRFS_BALANCE_RESUME) {
+                       if (!fs_info->balance_ctl) {
+                               ret = -ENOTCONN;
+                               goto out_bargs;
+                       }
+
+                       bctl = fs_info->balance_ctl;
+                       spin_lock(&fs_info->balance_lock);
+                       bctl->flags |= BTRFS_BALANCE_RESUME;
+                       spin_unlock(&fs_info->balance_lock);
+
+                       goto do_balance;
+               }
+       } else {
+               bargs = NULL;
+       }
+
+       if (fs_info->balance_ctl) {
+               ret = -EINPROGRESS;
+               goto out_bargs;
+       }
+
+       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
+       if (!bctl) {
+               ret = -ENOMEM;
+               goto out_bargs;
+       }
+
+       bctl->fs_info = fs_info;
+       if (arg) {
+               memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
+               memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
+               memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
+
+               bctl->flags = bargs->flags;
+       } else {
+               /* balance everything - no filters */
+               bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
+       }
+
+do_balance:
+       ret = btrfs_balance(bctl, bargs);
+       /*
+        * bctl is freed in __cancel_balance or in free_fs_info if
+        * restriper was paused all the way until unmount
+        */
+       if (arg) {
+               if (copy_to_user(arg, bargs, sizeof(*bargs)))
+                       ret = -EFAULT;
+       }
+
+out_bargs:
+       kfree(bargs);
+out:
+       mutex_unlock(&fs_info->balance_mutex);
+       mutex_unlock(&fs_info->volume_mutex);
+       return ret;
+}
+
+static long btrfs_ioctl_balance_ctl(struct btrfs_root *root, int cmd)
+{
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       switch (cmd) {
+       case BTRFS_BALANCE_CTL_PAUSE:
+               return btrfs_pause_balance(root->fs_info);
+       case BTRFS_BALANCE_CTL_CANCEL:
+               return btrfs_cancel_balance(root->fs_info);
+       }
+
+       return -EINVAL;
+}
+
+static long btrfs_ioctl_balance_progress(struct btrfs_root *root,
+                                        void __user *arg)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_ioctl_balance_args *bargs;
+       int ret = 0;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       mutex_lock(&fs_info->balance_mutex);
+       if (!fs_info->balance_ctl) {
+               ret = -ENOTCONN;
+               goto out;
+       }
+
+       bargs = kzalloc(sizeof(*bargs), GFP_NOFS);
+       if (!bargs) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       update_ioctl_balance_args(fs_info, 1, bargs);
+
+       if (copy_to_user(arg, bargs, sizeof(*bargs)))
+               ret = -EFAULT;
+
+       kfree(bargs);
+out:
+       mutex_unlock(&fs_info->balance_mutex);
+       return ret;
+}
+
 long btrfs_ioctl(struct file *file, unsigned int
                cmd, unsigned long arg)
 {
@@ -3078,7 +3276,7 @@ long btrfs_ioctl(struct file *file, unsigned int
        case BTRFS_IOC_DEV_INFO:
                return btrfs_ioctl_dev_info(root, argp);
        case BTRFS_IOC_BALANCE:
-               return btrfs_balance(root->fs_info->dev_root);
+               return btrfs_ioctl_balance(root, NULL);
        case BTRFS_IOC_CLONE:
                return btrfs_ioctl_clone(file, arg, 0, 0, 0);
        case BTRFS_IOC_CLONE_RANGE:
@@ -3110,6 +3308,12 @@ long btrfs_ioctl(struct file *file, unsigned int
                return btrfs_ioctl_scrub_cancel(root, argp);
        case BTRFS_IOC_SCRUB_PROGRESS:
                return btrfs_ioctl_scrub_progress(root, argp);
+       case BTRFS_IOC_BALANCE_V2:
+               return btrfs_ioctl_balance(root, argp);
+       case BTRFS_IOC_BALANCE_CTL:
+               return btrfs_ioctl_balance_ctl(root, arg);
+       case BTRFS_IOC_BALANCE_PROGRESS:
+               return btrfs_ioctl_balance_progress(root, argp);
        }
 
        return -ENOTTY;
index 252ae9915de8fcfa4b6b7a3a502735c28d1819f6..4f69028a68c486268bf5bcfc097a5412dbdd2ad0 100644 (file)
@@ -109,6 +109,55 @@ struct btrfs_ioctl_fs_info_args {
        __u64 reserved[124];                    /* pad to 1k */
 };
 
+/* balance control ioctl modes */
+#define BTRFS_BALANCE_CTL_PAUSE                1
+#define BTRFS_BALANCE_CTL_CANCEL       2
+
+/*
+ * this is packed, because it should be exactly the same as its disk
+ * byte order counterpart (struct btrfs_disk_balance_args)
+ */
+struct btrfs_balance_args {
+       __u64 profiles;
+       __u64 usage;
+       __u64 devid;
+       __u64 pstart;
+       __u64 pend;
+       __u64 vstart;
+       __u64 vend;
+
+       __u64 target;
+
+       __u64 flags;
+
+       __u64 unused[8];
+} __attribute__ ((__packed__));
+
+/* report balance progress to userspace */
+struct btrfs_balance_progress {
+       __u64 expected;         /* estimated # of chunks that will be
+                                * relocated to fulfill the request */
+       __u64 considered;       /* # of chunks we have considered so far */
+       __u64 completed;        /* # of chunks relocated so far */
+};
+
+#define BTRFS_BALANCE_STATE_RUNNING    (1ULL << 0)
+#define BTRFS_BALANCE_STATE_PAUSE_REQ  (1ULL << 1)
+#define BTRFS_BALANCE_STATE_CANCEL_REQ (1ULL << 2)
+
+struct btrfs_ioctl_balance_args {
+       __u64 flags;                            /* in/out */
+       __u64 state;                            /* out */
+
+       struct btrfs_balance_args data;         /* in/out */
+       struct btrfs_balance_args meta;         /* in/out */
+       struct btrfs_balance_args sys;          /* in/out */
+
+       struct btrfs_balance_progress stat;     /* out */
+
+       __u64 unused[72];                       /* pad to 1k */
+};
+
 #define BTRFS_INO_LOOKUP_PATH_MAX 4080
 struct btrfs_ioctl_ino_lookup_args {
        __u64 treeid;
@@ -272,6 +321,11 @@ struct btrfs_ioctl_logical_ino_args {
                                 struct btrfs_ioctl_dev_info_args)
 #define BTRFS_IOC_FS_INFO _IOR(BTRFS_IOCTL_MAGIC, 31, \
                               struct btrfs_ioctl_fs_info_args)
+#define BTRFS_IOC_BALANCE_V2 _IOWR(BTRFS_IOCTL_MAGIC, 32, \
+                                  struct btrfs_ioctl_balance_args)
+#define BTRFS_IOC_BALANCE_CTL _IOW(BTRFS_IOCTL_MAGIC, 33, int)
+#define BTRFS_IOC_BALANCE_PROGRESS _IOR(BTRFS_IOCTL_MAGIC, 34, \
+                                       struct btrfs_ioctl_balance_args)
 #define BTRFS_IOC_INO_PATHS _IOWR(BTRFS_IOCTL_MAGIC, 35, \
                                        struct btrfs_ioctl_ino_path_args)
 #define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \
index d77b67c4b275731417c11e04ad38b3dc9c4d456b..5e178d8f7167f496e928613b6c1f0000c2ea242e 100644 (file)
@@ -33,6 +33,14 @@ void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
  */
 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
 {
+       if (eb->lock_nested) {
+               read_lock(&eb->lock);
+               if (eb->lock_nested && current->pid == eb->lock_owner) {
+                       read_unlock(&eb->lock);
+                       return;
+               }
+               read_unlock(&eb->lock);
+       }
        if (rw == BTRFS_WRITE_LOCK) {
                if (atomic_read(&eb->blocking_writers) == 0) {
                        WARN_ON(atomic_read(&eb->spinning_writers) != 1);
@@ -57,6 +65,14 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
  */
 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 {
+       if (eb->lock_nested) {
+               read_lock(&eb->lock);
+               if (&eb->lock_nested && current->pid == eb->lock_owner) {
+                       read_unlock(&eb->lock);
+                       return;
+               }
+               read_unlock(&eb->lock);
+       }
        if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
                BUG_ON(atomic_read(&eb->blocking_writers) != 1);
                write_lock(&eb->lock);
@@ -81,12 +97,25 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 void btrfs_tree_read_lock(struct extent_buffer *eb)
 {
 again:
+       read_lock(&eb->lock);
+       if (atomic_read(&eb->blocking_writers) &&
+           current->pid == eb->lock_owner) {
+               /*
+                * This extent is already write-locked by our thread. We allow
+                * an additional read lock to be added because it's for the same
+                * thread. btrfs_find_all_roots() depends on this as it may be
+                * called on a partly (write-)locked tree.
+                */
+               BUG_ON(eb->lock_nested);
+               eb->lock_nested = 1;
+               read_unlock(&eb->lock);
+               return;
+       }
+       read_unlock(&eb->lock);
        wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
        read_lock(&eb->lock);
        if (atomic_read(&eb->blocking_writers)) {
                read_unlock(&eb->lock);
-               wait_event(eb->write_lock_wq,
-                          atomic_read(&eb->blocking_writers) == 0);
                goto again;
        }
        atomic_inc(&eb->read_locks);
@@ -129,6 +158,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
        }
        atomic_inc(&eb->write_locks);
        atomic_inc(&eb->spinning_writers);
+       eb->lock_owner = current->pid;
        return 1;
 }
 
@@ -137,6 +167,15 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
  */
 void btrfs_tree_read_unlock(struct extent_buffer *eb)
 {
+       if (eb->lock_nested) {
+               read_lock(&eb->lock);
+               if (eb->lock_nested && current->pid == eb->lock_owner) {
+                       eb->lock_nested = 0;
+                       read_unlock(&eb->lock);
+                       return;
+               }
+               read_unlock(&eb->lock);
+       }
        btrfs_assert_tree_read_locked(eb);
        WARN_ON(atomic_read(&eb->spinning_readers) == 0);
        atomic_dec(&eb->spinning_readers);
@@ -149,6 +188,15 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
  */
 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
 {
+       if (eb->lock_nested) {
+               read_lock(&eb->lock);
+               if (eb->lock_nested && current->pid == eb->lock_owner) {
+                       eb->lock_nested = 0;
+                       read_unlock(&eb->lock);
+                       return;
+               }
+               read_unlock(&eb->lock);
+       }
        btrfs_assert_tree_read_locked(eb);
        WARN_ON(atomic_read(&eb->blocking_readers) == 0);
        if (atomic_dec_and_test(&eb->blocking_readers))
@@ -181,6 +229,7 @@ int btrfs_tree_lock(struct extent_buffer *eb)
        WARN_ON(atomic_read(&eb->spinning_writers));
        atomic_inc(&eb->spinning_writers);
        atomic_inc(&eb->write_locks);
+       eb->lock_owner = current->pid;
        return 0;
 }
 
index cfb55434a46981fa64416e68fa3fd29cf58238f5..8c1aae2c845d49960fe352c809033f1bdf5ffb74 100644 (file)
@@ -1604,12 +1604,12 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
                ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
                                           num_bytes, parent,
                                           btrfs_header_owner(leaf),
-                                          key.objectid, key.offset);
+                                          key.objectid, key.offset, 1);
                BUG_ON(ret);
 
                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
                                        parent, btrfs_header_owner(leaf),
-                                       key.objectid, key.offset);
+                                       key.objectid, key.offset, 1);
                BUG_ON(ret);
        }
        if (dirty)
@@ -1778,21 +1778,23 @@ int replace_path(struct btrfs_trans_handle *trans,
 
                ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
                                        path->nodes[level]->start,
-                                       src->root_key.objectid, level - 1, 0);
+                                       src->root_key.objectid, level - 1, 0,
+                                       1);
                BUG_ON(ret);
                ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
                                        0, dest->root_key.objectid, level - 1,
-                                       0);
+                                       0, 1);
                BUG_ON(ret);
 
                ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
                                        path->nodes[level]->start,
-                                       src->root_key.objectid, level - 1, 0);
+                                       src->root_key.objectid, level - 1, 0,
+                                       1);
                BUG_ON(ret);
 
                ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
                                        0, dest->root_key.objectid, level - 1,
-                                       0);
+                                       0, 1);
                BUG_ON(ret);
 
                btrfs_unlock_up_safe(path, 0);
@@ -2244,7 +2246,7 @@ int merge_reloc_roots(struct reloc_control *rc)
                } else {
                        list_del_init(&reloc_root->root_list);
                }
-               btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0);
+               btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
        }
 
        if (found) {
@@ -2558,7 +2560,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
                                                node->eb->start, blocksize,
                                                upper->eb->start,
                                                btrfs_header_owner(upper->eb),
-                                               node->level, 0);
+                                               node->level, 0, 1);
                        BUG_ON(ret);
 
                        ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
@@ -2947,9 +2949,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
        index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
        last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
        while (index <= last_index) {
-               mutex_lock(&inode->i_mutex);
                ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
-               mutex_unlock(&inode->i_mutex);
                if (ret)
                        goto out;
 
index ddf2c90d3fc0c475cbfabf6397c84f734abcc5e8..9770cc5bfb76c6829f96924bb82f9b3b564ca646 100644 (file)
@@ -25,6 +25,7 @@
 #include "transaction.h"
 #include "backref.h"
 #include "extent_io.h"
+#include "check-integrity.h"
 
 /*
  * This is only the first step towards a full-features scrub. It reads all
@@ -309,7 +310,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
        u8 ref_level;
        unsigned long ptr = 0;
        const int bufsize = 4096;
-       u64 extent_offset;
+       u64 extent_item_pos;
 
        path = btrfs_alloc_path();
 
@@ -329,12 +330,13 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
        if (ret < 0)
                goto out;
 
-       extent_offset = swarn.logical - found_key.objectid;
+       extent_item_pos = swarn.logical - found_key.objectid;
        swarn.extent_item_size = found_key.offset;
 
        eb = path->nodes[0];
        ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
        item_size = btrfs_item_size_nr(eb, path->slots[0]);
+       btrfs_release_path(path);
 
        if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
                do {
@@ -351,7 +353,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
        } else {
                swarn.path = path;
                iterate_extent_inodes(fs_info, path, found_key.objectid,
-                                       extent_offset,
+                                       extent_item_pos,
                                        scrub_print_warning_inode, &swarn);
        }
 
@@ -732,7 +734,7 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
        bio_add_page(bio, page, PAGE_SIZE, 0);
        bio->bi_end_io = scrub_fixup_end_io;
        bio->bi_private = &complete;
-       submit_bio(rw, bio);
+       btrfsic_submit_bio(rw, bio);
 
        /* this will also unplug the queue */
        wait_for_completion(&complete);
@@ -958,7 +960,7 @@ static int scrub_submit(struct scrub_dev *sdev)
        sdev->curr = -1;
        atomic_inc(&sdev->in_flight);
 
-       submit_bio(READ, sbio->bio);
+       btrfsic_submit_bio(READ, sbio->bio);
 
        return 0;
 }
index ae488aa1966a1b32d6a3c4438424eb690740d4c9..3ce97b217cbeae21ef23e57fe2a605ed11572af1 100644 (file)
@@ -147,13 +147,13 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
 
 static void btrfs_put_super(struct super_block *sb)
 {
-       struct btrfs_root *root = btrfs_sb(sb);
-       int ret;
-
-       ret = close_ctree(root);
-       sb->s_fs_info = NULL;
-
-       (void)ret; /* FIXME: need to fix VFS to return error? */
+       (void)close_ctree(btrfs_sb(sb)->tree_root);
+       /* FIXME: need to fix VFS to return error? */
+       /* AV: return it _where_?  ->put_super() can be triggered by any number
+        * of async events, up to and including delivery of SIGKILL to the
+        * last process that kept it busy.  Or segfault in the aforementioned
+        * process...  Whom would you report that to?
+        */
 }
 
 enum {
@@ -163,8 +163,11 @@ enum {
        Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
        Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
        Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
-       Opt_enospc_debug, Opt_subvolrootid, Opt_defrag,
-       Opt_inode_cache, Opt_no_space_cache, Opt_recovery, Opt_err,
+       Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_inode_cache,
+       Opt_no_space_cache, Opt_recovery, Opt_skip_balance,
+       Opt_check_integrity, Opt_check_integrity_including_extent_data,
+       Opt_check_integrity_print_mask,
+       Opt_err,
 };
 
 static match_table_t tokens = {
@@ -199,6 +202,10 @@ static match_table_t tokens = {
        {Opt_inode_cache, "inode_cache"},
        {Opt_no_space_cache, "nospace_cache"},
        {Opt_recovery, "recovery"},
+       {Opt_skip_balance, "skip_balance"},
+       {Opt_check_integrity, "check_int"},
+       {Opt_check_integrity_including_extent_data, "check_int_data"},
+       {Opt_check_integrity_print_mask, "check_int_print_mask=%d"},
        {Opt_err, NULL},
 };
 
@@ -397,6 +404,40 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                        printk(KERN_INFO "btrfs: enabling auto recovery");
                        btrfs_set_opt(info->mount_opt, RECOVERY);
                        break;
+               case Opt_skip_balance:
+                       btrfs_set_opt(info->mount_opt, SKIP_BALANCE);
+                       break;
+#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+               case Opt_check_integrity_including_extent_data:
+                       printk(KERN_INFO "btrfs: enabling check integrity"
+                              " including extent data\n");
+                       btrfs_set_opt(info->mount_opt,
+                                     CHECK_INTEGRITY_INCLUDING_EXTENT_DATA);
+                       btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
+                       break;
+               case Opt_check_integrity:
+                       printk(KERN_INFO "btrfs: enabling check integrity\n");
+                       btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
+                       break;
+               case Opt_check_integrity_print_mask:
+                       intarg = 0;
+                       match_int(&args[0], &intarg);
+                       if (intarg) {
+                               info->check_integrity_print_mask = intarg;
+                               printk(KERN_INFO "btrfs:"
+                                      " check_integrity_print_mask 0x%x\n",
+                                      info->check_integrity_print_mask);
+                       }
+                       break;
+#else
+               case Opt_check_integrity_including_extent_data:
+               case Opt_check_integrity:
+               case Opt_check_integrity_print_mask:
+                       printk(KERN_ERR "btrfs: support for check_integrity*"
+                              " not compiled in!\n");
+                       ret = -EINVAL;
+                       goto out;
+#endif
                case Opt_err:
                        printk(KERN_INFO "btrfs: unrecognized mount option "
                               "'%s'\n", p);
@@ -500,7 +541,8 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
 static struct dentry *get_default_root(struct super_block *sb,
                                       u64 subvol_objectid)
 {
-       struct btrfs_root *root = sb->s_fs_info;
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       struct btrfs_root *root = fs_info->tree_root;
        struct btrfs_root *new_root;
        struct btrfs_dir_item *di;
        struct btrfs_path *path;
@@ -530,7 +572,7 @@ static struct dentry *get_default_root(struct super_block *sb,
         * will mount by default if we haven't been given a specific subvolume
         * to mount.
         */
-       dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
+       dir_id = btrfs_super_root_dir(fs_info->super_copy);
        di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
        if (IS_ERR(di)) {
                btrfs_free_path(path);
@@ -544,7 +586,7 @@ static struct dentry *get_default_root(struct super_block *sb,
                 */
                btrfs_free_path(path);
                dir_id = BTRFS_FIRST_FREE_OBJECTID;
-               new_root = root->fs_info->fs_root;
+               new_root = fs_info->fs_root;
                goto setup_root;
        }
 
@@ -552,7 +594,7 @@ static struct dentry *get_default_root(struct super_block *sb,
        btrfs_free_path(path);
 
 find_root:
-       new_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
+       new_root = btrfs_read_fs_root_no_name(fs_info, &location);
        if (IS_ERR(new_root))
                return ERR_CAST(new_root);
 
@@ -588,7 +630,7 @@ static int btrfs_fill_super(struct super_block *sb,
 {
        struct inode *inode;
        struct dentry *root_dentry;
-       struct btrfs_root *tree_root;
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
        struct btrfs_key key;
        int err;
 
@@ -603,18 +645,16 @@ static int btrfs_fill_super(struct super_block *sb,
        sb->s_flags |= MS_POSIXACL;
 #endif
 
-       tree_root = open_ctree(sb, fs_devices, (char *)data);
-
-       if (IS_ERR(tree_root)) {
+       err = open_ctree(sb, fs_devices, (char *)data);
+       if (err) {
                printk("btrfs: open_ctree failed\n");
-               return PTR_ERR(tree_root);
+               return err;
        }
-       sb->s_fs_info = tree_root;
 
        key.objectid = BTRFS_FIRST_FREE_OBJECTID;
        key.type = BTRFS_INODE_ITEM_KEY;
        key.offset = 0;
-       inode = btrfs_iget(sb, &key, tree_root->fs_info->fs_root, NULL);
+       inode = btrfs_iget(sb, &key, fs_info->fs_root, NULL);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                goto fail_close;
@@ -631,23 +671,25 @@ static int btrfs_fill_super(struct super_block *sb,
 
        save_mount_options(sb, data);
        cleancache_init_fs(sb);
+       sb->s_flags |= MS_ACTIVE;
        return 0;
 
 fail_close:
-       close_ctree(tree_root);
+       close_ctree(fs_info->tree_root);
        return err;
 }
 
 int btrfs_sync_fs(struct super_block *sb, int wait)
 {
        struct btrfs_trans_handle *trans;
-       struct btrfs_root *root = btrfs_sb(sb);
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       struct btrfs_root *root = fs_info->tree_root;
        int ret;
 
        trace_btrfs_sync_fs(wait);
 
        if (!wait) {
-               filemap_flush(root->fs_info->btree_inode->i_mapping);
+               filemap_flush(fs_info->btree_inode->i_mapping);
                return 0;
        }
 
@@ -663,8 +705,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
 
 static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
 {
-       struct btrfs_root *root = btrfs_sb(dentry->d_sb);
-       struct btrfs_fs_info *info = root->fs_info;
+       struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
+       struct btrfs_root *root = info->tree_root;
        char *compress_type;
 
        if (btrfs_test_opt(root, DEGRADED))
@@ -722,28 +764,25 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
                seq_puts(seq, ",autodefrag");
        if (btrfs_test_opt(root, INODE_MAP_CACHE))
                seq_puts(seq, ",inode_cache");
+       if (btrfs_test_opt(root, SKIP_BALANCE))
+               seq_puts(seq, ",skip_balance");
        return 0;
 }
 
 static int btrfs_test_super(struct super_block *s, void *data)
 {
-       struct btrfs_root *test_root = data;
-       struct btrfs_root *root = btrfs_sb(s);
+       struct btrfs_fs_info *p = data;
+       struct btrfs_fs_info *fs_info = btrfs_sb(s);
 
-       /*
-        * If this super block is going away, return false as it
-        * can't match as an existing super block.
-        */
-       if (!atomic_read(&s->s_active))
-               return 0;
-       return root->fs_info->fs_devices == test_root->fs_info->fs_devices;
+       return fs_info->fs_devices == p->fs_devices;
 }
 
 static int btrfs_set_super(struct super_block *s, void *data)
 {
-       s->s_fs_info = data;
-
-       return set_anon_super(s, data);
+       int err = set_anon_super(s, data);
+       if (!err)
+               s->s_fs_info = data;
+       return err;
 }
 
 /*
@@ -903,12 +942,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        if (!fs_info)
                return ERR_PTR(-ENOMEM);
 
-       fs_info->tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
-       if (!fs_info->tree_root) {
-               error = -ENOMEM;
-               goto error_fs_info;
-       }
-       fs_info->tree_root->fs_info = fs_info;
        fs_info->fs_devices = fs_devices;
 
        fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
@@ -928,43 +961,30 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        }
 
        bdev = fs_devices->latest_bdev;
-       s = sget(fs_type, btrfs_test_super, btrfs_set_super,
-                fs_info->tree_root);
+       s = sget(fs_type, btrfs_test_super, btrfs_set_super, fs_info);
        if (IS_ERR(s)) {
                error = PTR_ERR(s);
                goto error_close_devices;
        }
 
        if (s->s_root) {
-               if ((flags ^ s->s_flags) & MS_RDONLY) {
-                       deactivate_locked_super(s);
-                       error = -EBUSY;
-                       goto error_close_devices;
-               }
-
                btrfs_close_devices(fs_devices);
                free_fs_info(fs_info);
+               if ((flags ^ s->s_flags) & MS_RDONLY)
+                       error = -EBUSY;
        } else {
                char b[BDEVNAME_SIZE];
 
                s->s_flags = flags | MS_NOSEC;
                strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
-               btrfs_sb(s)->fs_info->bdev_holder = fs_type;
+               btrfs_sb(s)->bdev_holder = fs_type;
                error = btrfs_fill_super(s, fs_devices, data,
                                         flags & MS_SILENT ? 1 : 0);
-               if (error) {
-                       deactivate_locked_super(s);
-                       return ERR_PTR(error);
-               }
-
-               s->s_flags |= MS_ACTIVE;
        }
 
-       root = get_default_root(s, subvol_objectid);
-       if (IS_ERR(root)) {
+       root = !error ? get_default_root(s, subvol_objectid) : ERR_PTR(error);
+       if (IS_ERR(root))
                deactivate_locked_super(s);
-               return root;
-       }
 
        return root;
 
@@ -977,7 +997,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
 
 static int btrfs_remount(struct super_block *sb, int *flags, char *data)
 {
-       struct btrfs_root *root = btrfs_sb(sb);
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       struct btrfs_root *root = fs_info->tree_root;
        int ret;
 
        ret = btrfs_parse_options(root, data);
@@ -993,13 +1014,13 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                ret =  btrfs_commit_super(root);
                WARN_ON(ret);
        } else {
-               if (root->fs_info->fs_devices->rw_devices == 0)
+               if (fs_info->fs_devices->rw_devices == 0)
                        return -EACCES;
 
-               if (btrfs_super_log_root(root->fs_info->super_copy) != 0)
+               if (btrfs_super_log_root(fs_info->super_copy) != 0)
                        return -EINVAL;
 
-               ret = btrfs_cleanup_fs_roots(root->fs_info);
+               ret = btrfs_cleanup_fs_roots(fs_info);
                WARN_ON(ret);
 
                /* recover relocation */
@@ -1168,18 +1189,18 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
 
 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
-       struct btrfs_root *root = btrfs_sb(dentry->d_sb);
-       struct btrfs_super_block *disk_super = root->fs_info->super_copy;
-       struct list_head *head = &root->fs_info->space_info;
+       struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
+       struct btrfs_super_block *disk_super = fs_info->super_copy;
+       struct list_head *head = &fs_info->space_info;
        struct btrfs_space_info *found;
        u64 total_used = 0;
        u64 total_free_data = 0;
        int bits = dentry->d_sb->s_blocksize_bits;
-       __be32 *fsid = (__be32 *)root->fs_info->fsid;
+       __be32 *fsid = (__be32 *)fs_info->fsid;
        int ret;
 
        /* holding chunk_muext to avoid allocating new chunks */
-       mutex_lock(&root->fs_info->chunk_mutex);
+       mutex_lock(&fs_info->chunk_mutex);
        rcu_read_lock();
        list_for_each_entry_rcu(found, head, list) {
                if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
@@ -1198,14 +1219,14 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_bsize = dentry->d_sb->s_blocksize;
        buf->f_type = BTRFS_SUPER_MAGIC;
        buf->f_bavail = total_free_data;
-       ret = btrfs_calc_avail_data_space(root, &total_free_data);
+       ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data);
        if (ret) {
-               mutex_unlock(&root->fs_info->chunk_mutex);
+               mutex_unlock(&fs_info->chunk_mutex);
                return ret;
        }
        buf->f_bavail += total_free_data;
        buf->f_bavail = buf->f_bavail >> bits;
-       mutex_unlock(&root->fs_info->chunk_mutex);
+       mutex_unlock(&fs_info->chunk_mutex);
 
        /* We treat it as constant endianness (it doesn't matter _which_)
           because we want the fsid to come out the same whether mounted
@@ -1219,11 +1240,18 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
        return 0;
 }
 
+static void btrfs_kill_super(struct super_block *sb)
+{
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       kill_anon_super(sb);
+       free_fs_info(fs_info);
+}
+
 static struct file_system_type btrfs_fs_type = {
        .owner          = THIS_MODULE,
        .name           = "btrfs",
        .mount          = btrfs_mount,
-       .kill_sb        = kill_anon_super,
+       .kill_sb        = btrfs_kill_super,
        .fs_flags       = FS_REQUIRES_DEV,
 };
 
@@ -1257,17 +1285,17 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
 
 static int btrfs_freeze(struct super_block *sb)
 {
-       struct btrfs_root *root = btrfs_sb(sb);
-       mutex_lock(&root->fs_info->transaction_kthread_mutex);
-       mutex_lock(&root->fs_info->cleaner_mutex);
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       mutex_lock(&fs_info->transaction_kthread_mutex);
+       mutex_lock(&fs_info->cleaner_mutex);
        return 0;
 }
 
 static int btrfs_unfreeze(struct super_block *sb)
 {
-       struct btrfs_root *root = btrfs_sb(sb);
-       mutex_unlock(&root->fs_info->cleaner_mutex);
-       mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       mutex_unlock(&fs_info->cleaner_mutex);
+       mutex_unlock(&fs_info->transaction_kthread_mutex);
        return 0;
 }
 
index 81376d94cd3c6a4639ebef35df501dbefbfb2435..287a6728b1ad6ddc726b64122223bfc71e7e165d 100644 (file)
@@ -36,6 +36,8 @@ static noinline void put_transaction(struct btrfs_transaction *transaction)
        WARN_ON(atomic_read(&transaction->use_count) == 0);
        if (atomic_dec_and_test(&transaction->use_count)) {
                BUG_ON(!list_empty(&transaction->list));
+               WARN_ON(transaction->delayed_refs.root.rb_node);
+               WARN_ON(!list_empty(&transaction->delayed_refs.seq_head));
                memset(transaction, 0, sizeof(*transaction));
                kmem_cache_free(btrfs_transaction_cachep, transaction);
        }
@@ -108,8 +110,11 @@ static noinline int join_transaction(struct btrfs_root *root, int nofail)
        cur_trans->delayed_refs.num_heads = 0;
        cur_trans->delayed_refs.flushing = 0;
        cur_trans->delayed_refs.run_delayed_start = 0;
+       cur_trans->delayed_refs.seq = 1;
+       init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
        spin_lock_init(&cur_trans->commit_lock);
        spin_lock_init(&cur_trans->delayed_refs.lock);
+       INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
 
        INIT_LIST_HEAD(&cur_trans->pending_snapshots);
        list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
@@ -321,6 +326,8 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
        }
 
        if (num_bytes) {
+               trace_btrfs_space_reservation(root->fs_info, "transaction",
+                                             (u64)h, num_bytes, 1);
                h->block_rsv = &root->fs_info->trans_block_rsv;
                h->bytes_reserved = num_bytes;
        }
@@ -467,19 +474,12 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 
        btrfs_trans_release_metadata(trans, root);
        trans->block_rsv = NULL;
-       while (count < 4) {
+       while (count < 2) {
                unsigned long cur = trans->delayed_ref_updates;
                trans->delayed_ref_updates = 0;
                if (cur &&
                    trans->transaction->delayed_refs.num_heads_ready > 64) {
                        trans->delayed_ref_updates = 0;
-
-                       /*
-                        * do a full flush if the transaction is trying
-                        * to close
-                        */
-                       if (trans->transaction->delayed_refs.flushing)
-                               cur = 0;
                        btrfs_run_delayed_refs(trans, root, cur);
                } else {
                        break;
@@ -1393,9 +1393,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
 
                if (btrfs_header_backref_rev(root->node) <
                    BTRFS_MIXED_BACKREF_REV)
-                       btrfs_drop_snapshot(root, NULL, 0);
+                       btrfs_drop_snapshot(root, NULL, 0, 0);
                else
-                       btrfs_drop_snapshot(root, NULL, 1);
+                       btrfs_drop_snapshot(root, NULL, 1, 0);
        }
        return 0;
 }
index 3568374d419da8ee50eb17f4af5319964750614d..cb877e0886a71b80e0f44f60e0d884f57203a597 100644 (file)
@@ -589,7 +589,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
                                ret = btrfs_inc_extent_ref(trans, root,
                                                ins.objectid, ins.offset,
                                                0, root->root_key.objectid,
-                                               key->objectid, offset);
+                                               key->objectid, offset, 0);
                                BUG_ON(ret);
                        } else {
                                /*
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
new file mode 100644 (file)
index 0000000..12f5147
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2011 STRATO AG
+ * written by Arne Jansen <sensille@gmx.net>
+ * Distributed under the GNU GPL license version 2.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "ulist.h"
+
+/*
+ * ulist is a generic data structure to hold a collection of unique u64
+ * values. The only operations it supports is adding to the list and
+ * enumerating it.
+ * It is possible to store an auxiliary value along with the key.
+ *
+ * The implementation is preliminary and can probably be sped up
+ * significantly. A first step would be to store the values in an rbtree
+ * as soon as ULIST_SIZE is exceeded.
+ *
+ * A sample usage for ulists is the enumeration of directed graphs without
+ * visiting a node twice. The pseudo-code could look like this:
+ *
+ * ulist = ulist_alloc();
+ * ulist_add(ulist, root);
+ * elem = NULL;
+ *
+ * while ((elem = ulist_next(ulist, elem)) {
+ *     for (all child nodes n in elem)
+ *             ulist_add(ulist, n);
+ *     do something useful with the node;
+ * }
+ * ulist_free(ulist);
+ *
+ * This assumes the graph nodes are adressable by u64. This stems from the
+ * usage for tree enumeration in btrfs, where the logical addresses are
+ * 64 bit.
+ *
+ * It is also useful for tree enumeration which could be done elegantly
+ * recursively, but is not possible due to kernel stack limitations. The
+ * loop would be similar to the above.
+ */
+
+/**
+ * ulist_init - freshly initialize a ulist
+ * @ulist:     the ulist to initialize
+ *
+ * Note: don't use this function to init an already used ulist, use
+ * ulist_reinit instead.
+ */
+void ulist_init(struct ulist *ulist)
+{
+       ulist->nnodes = 0;
+       ulist->nodes = ulist->int_nodes;
+       ulist->nodes_alloced = ULIST_SIZE;
+}
+EXPORT_SYMBOL(ulist_init);
+
+/**
+ * ulist_fini - free up additionally allocated memory for the ulist
+ * @ulist:     the ulist from which to free the additional memory
+ *
+ * This is useful in cases where the base 'struct ulist' has been statically
+ * allocated.
+ */
+void ulist_fini(struct ulist *ulist)
+{
+       /*
+        * The first ULIST_SIZE elements are stored inline in struct ulist.
+        * Only if more elements are alocated they need to be freed.
+        */
+       if (ulist->nodes_alloced > ULIST_SIZE)
+               kfree(ulist->nodes);
+       ulist->nodes_alloced = 0;       /* in case ulist_fini is called twice */
+}
+EXPORT_SYMBOL(ulist_fini);
+
+/**
+ * ulist_reinit - prepare a ulist for reuse
+ * @ulist:     ulist to be reused
+ *
+ * Free up all additional memory allocated for the list elements and reinit
+ * the ulist.
+ */
+void ulist_reinit(struct ulist *ulist)
+{
+       ulist_fini(ulist);
+       ulist_init(ulist);
+}
+EXPORT_SYMBOL(ulist_reinit);
+
+/**
+ * ulist_alloc - dynamically allocate a ulist
+ * @gfp_mask:  allocation flags to for base allocation
+ *
+ * The allocated ulist will be returned in an initialized state.
+ */
+struct ulist *ulist_alloc(unsigned long gfp_mask)
+{
+       struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
+
+       if (!ulist)
+               return NULL;
+
+       ulist_init(ulist);
+
+       return ulist;
+}
+EXPORT_SYMBOL(ulist_alloc);
+
+/**
+ * ulist_free - free dynamically allocated ulist
+ * @ulist:     ulist to free
+ *
+ * It is not necessary to call ulist_fini before.
+ */
+void ulist_free(struct ulist *ulist)
+{
+       if (!ulist)
+               return;
+       ulist_fini(ulist);
+       kfree(ulist);
+}
+EXPORT_SYMBOL(ulist_free);
+
+/**
+ * ulist_add - add an element to the ulist
+ * @ulist:     ulist to add the element to
+ * @val:       value to add to ulist
+ * @aux:       auxiliary value to store along with val
+ * @gfp_mask:  flags to use for allocation
+ *
+ * Note: locking must be provided by the caller. In case of rwlocks write
+ *       locking is needed
+ *
+ * Add an element to a ulist. The @val will only be added if it doesn't
+ * already exist. If it is added, the auxiliary value @aux is stored along with
+ * it. In case @val already exists in the ulist, @aux is ignored, even if
+ * it differs from the already stored value.
+ *
+ * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
+ * inserted.
+ * In case of allocation failure -ENOMEM is returned and the ulist stays
+ * unaltered.
+ */
+int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
+             unsigned long gfp_mask)
+{
+       int i;
+
+       for (i = 0; i < ulist->nnodes; ++i) {
+               if (ulist->nodes[i].val == val)
+                       return 0;
+       }
+
+       if (ulist->nnodes >= ulist->nodes_alloced) {
+               u64 new_alloced = ulist->nodes_alloced + 128;
+               struct ulist_node *new_nodes;
+               void *old = NULL;
+
+               /*
+                * if nodes_alloced == ULIST_SIZE no memory has been allocated
+                * yet, so pass NULL to krealloc
+                */
+               if (ulist->nodes_alloced > ULIST_SIZE)
+                       old = ulist->nodes;
+
+               new_nodes = krealloc(old, sizeof(*new_nodes) * new_alloced,
+                                    gfp_mask);
+               if (!new_nodes)
+                       return -ENOMEM;
+
+               if (!old)
+                       memcpy(new_nodes, ulist->int_nodes,
+                              sizeof(ulist->int_nodes));
+
+               ulist->nodes = new_nodes;
+               ulist->nodes_alloced = new_alloced;
+       }
+       ulist->nodes[ulist->nnodes].val = val;
+       ulist->nodes[ulist->nnodes].aux = aux;
+       ++ulist->nnodes;
+
+       return 1;
+}
+EXPORT_SYMBOL(ulist_add);
+
+/**
+ * ulist_next - iterate ulist
+ * @ulist:     ulist to iterate
+ * @prev:      previously returned element or %NULL to start iteration
+ *
+ * Note: locking must be provided by the caller. In case of rwlocks only read
+ *       locking is needed
+ *
+ * This function is used to iterate an ulist. The iteration is started with
+ * @prev = %NULL. It returns the next element from the ulist or %NULL when the
+ * end is reached. No guarantee is made with respect to the order in which
+ * the elements are returned. They might neither be returned in order of
+ * addition nor in ascending order.
+ * It is allowed to call ulist_add during an enumeration. Newly added items
+ * are guaranteed to show up in the running enumeration.
+ */
+struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev)
+{
+       int next;
+
+       if (ulist->nnodes == 0)
+               return NULL;
+
+       if (!prev)
+               return &ulist->nodes[0];
+
+       next = (prev - ulist->nodes) + 1;
+       if (next < 0 || next >= ulist->nnodes)
+               return NULL;
+
+       return &ulist->nodes[next];
+}
+EXPORT_SYMBOL(ulist_next);
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
new file mode 100644 (file)
index 0000000..2e25dec
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2011 STRATO AG
+ * written by Arne Jansen <sensille@gmx.net>
+ * Distributed under the GNU GPL license version 2.
+ *
+ */
+
+#ifndef __ULIST__
+#define __ULIST__
+
+/*
+ * ulist is a generic data structure to hold a collection of unique u64
+ * values. The only operations it supports is adding to the list and
+ * enumerating it.
+ * It is possible to store an auxiliary value along with the key.
+ *
+ * The implementation is preliminary and can probably be sped up
+ * significantly. A first step would be to store the values in an rbtree
+ * as soon as ULIST_SIZE is exceeded.
+ */
+
+/*
+ * number of elements statically allocated inside struct ulist
+ */
+#define ULIST_SIZE 16
+
+/*
+ * element of the list
+ */
+struct ulist_node {
+       u64 val;                /* value to store */
+       unsigned long aux;      /* auxiliary value saved along with the val */
+};
+
+struct ulist {
+       /*
+        * number of elements stored in list
+        */
+       unsigned long nnodes;
+
+       /*
+        * number of nodes we already have room for
+        */
+       unsigned long nodes_alloced;
+
+       /*
+        * pointer to the array storing the elements. The first ULIST_SIZE
+        * elements are stored inline. In this case the it points to int_nodes.
+        * After exceeding ULIST_SIZE, dynamic memory is allocated.
+        */
+       struct ulist_node *nodes;
+
+       /*
+        * inline storage space for the first ULIST_SIZE entries
+        */
+       struct ulist_node int_nodes[ULIST_SIZE];
+};
+
+void ulist_init(struct ulist *ulist);
+void ulist_fini(struct ulist *ulist);
+void ulist_reinit(struct ulist *ulist);
+struct ulist *ulist_alloc(unsigned long gfp_mask);
+void ulist_free(struct ulist *ulist);
+int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
+             unsigned long gfp_mask);
+struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev);
+
+#endif
index f4b839fd3c9dd5cd854cb7bada4e3831d8ea1713..0b4e2af7954d3c209d8f1e581d4ee26c0cb60c2f 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/random.h>
 #include <linux/iocontext.h>
 #include <linux/capability.h>
+#include <linux/kthread.h>
 #include <asm/div64.h>
 #include "compat.h"
 #include "ctree.h"
@@ -32,6 +33,7 @@
 #include "print-tree.h"
 #include "volumes.h"
 #include "async-thread.h"
+#include "check-integrity.h"
 
 static int init_first_rw_device(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
@@ -246,7 +248,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
                        sync_pending = 0;
                }
 
-               submit_bio(cur->bi_rw, cur);
+               btrfsic_submit_bio(cur->bi_rw, cur);
                num_run++;
                batch_run++;
                if (need_resched())
@@ -706,8 +708,6 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
        u64 devid;
        u64 transid;
 
-       mutex_lock(&uuid_mutex);
-
        flags |= FMODE_EXCL;
        bdev = blkdev_get_by_path(path, flags, holder);
 
@@ -716,6 +716,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
                goto error;
        }
 
+       mutex_lock(&uuid_mutex);
        ret = set_blocksize(bdev, 4096);
        if (ret)
                goto error_close;
@@ -737,9 +738,9 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
 
        brelse(bh);
 error_close:
+       mutex_unlock(&uuid_mutex);
        blkdev_put(bdev, flags);
 error:
-       mutex_unlock(&uuid_mutex);
        return ret;
 }
 
@@ -829,7 +830,6 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
 
 /*
  * find_free_dev_extent - find free space in the specified device
- * @trans:     transaction handler
  * @device:    the device which we search the free space in
  * @num_bytes: the size of the free space that we need
  * @start:     store the start of the free space.
@@ -848,8 +848,7 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
  * But if we don't find suitable free space, it is used to store the size of
  * the max free space.
  */
-int find_free_dev_extent(struct btrfs_trans_handle *trans,
-                        struct btrfs_device *device, u64 num_bytes,
+int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
                         u64 *start, u64 *len)
 {
        struct btrfs_key key;
@@ -893,7 +892,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
        key.offset = search_start;
        key.type = BTRFS_DEV_EXTENT_KEY;
 
-       ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
+       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0)
                goto out;
        if (ret > 0) {
@@ -1282,7 +1281,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        bool clear_super = false;
 
        mutex_lock(&uuid_mutex);
-       mutex_lock(&root->fs_info->volume_mutex);
 
        all_avail = root->fs_info->avail_data_alloc_bits |
                root->fs_info->avail_system_alloc_bits |
@@ -1452,7 +1450,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        if (bdev)
                blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
 out:
-       mutex_unlock(&root->fs_info->volume_mutex);
        mutex_unlock(&uuid_mutex);
        return ret;
 error_undo:
@@ -1469,8 +1466,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
 /*
  * does all the dirty work required for changing file system's UUID.
  */
-static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
-                               struct btrfs_root *root)
+static int btrfs_prepare_sprout(struct btrfs_root *root)
 {
        struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
        struct btrfs_fs_devices *old_devices;
@@ -1629,7 +1625,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        }
 
        filemap_write_and_wait(bdev->bd_inode->i_mapping);
-       mutex_lock(&root->fs_info->volume_mutex);
 
        devices = &root->fs_info->fs_devices->devices;
        /*
@@ -1695,7 +1690,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
 
        if (seeding_dev) {
                sb->s_flags &= ~MS_RDONLY;
-               ret = btrfs_prepare_sprout(trans, root);
+               ret = btrfs_prepare_sprout(root);
                BUG_ON(ret);
        }
 
@@ -1757,8 +1752,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                ret = btrfs_relocate_sys_chunks(root);
                BUG_ON(ret);
        }
-out:
-       mutex_unlock(&root->fs_info->volume_mutex);
+
        return ret;
 error:
        blkdev_put(bdev, FMODE_EXCL);
@@ -1766,7 +1760,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                mutex_unlock(&uuid_mutex);
                up_write(&sb->s_umount);
        }
-       goto out;
+       return ret;
 }
 
 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
@@ -2077,6 +2071,362 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
        return ret;
 }
 
+static int insert_balance_item(struct btrfs_root *root,
+                              struct btrfs_balance_control *bctl)
+{
+       struct btrfs_trans_handle *trans;
+       struct btrfs_balance_item *item;
+       struct btrfs_disk_balance_args disk_bargs;
+       struct btrfs_path *path;
+       struct extent_buffer *leaf;
+       struct btrfs_key key;
+       int ret, err;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans)) {
+               btrfs_free_path(path);
+               return PTR_ERR(trans);
+       }
+
+       key.objectid = BTRFS_BALANCE_OBJECTID;
+       key.type = BTRFS_BALANCE_ITEM_KEY;
+       key.offset = 0;
+
+       ret = btrfs_insert_empty_item(trans, root, path, &key,
+                                     sizeof(*item));
+       if (ret)
+               goto out;
+
+       leaf = path->nodes[0];
+       item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
+
+       memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
+
+       btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
+       btrfs_set_balance_data(leaf, item, &disk_bargs);
+       btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
+       btrfs_set_balance_meta(leaf, item, &disk_bargs);
+       btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
+       btrfs_set_balance_sys(leaf, item, &disk_bargs);
+
+       btrfs_set_balance_flags(leaf, item, bctl->flags);
+
+       btrfs_mark_buffer_dirty(leaf);
+out:
+       btrfs_free_path(path);
+       err = btrfs_commit_transaction(trans, root);
+       if (err && !ret)
+               ret = err;
+       return ret;
+}
+
+static int del_balance_item(struct btrfs_root *root)
+{
+       struct btrfs_trans_handle *trans;
+       struct btrfs_path *path;
+       struct btrfs_key key;
+       int ret, err;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans)) {
+               btrfs_free_path(path);
+               return PTR_ERR(trans);
+       }
+
+       key.objectid = BTRFS_BALANCE_OBJECTID;
+       key.type = BTRFS_BALANCE_ITEM_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+       if (ret < 0)
+               goto out;
+       if (ret > 0) {
+               ret = -ENOENT;
+               goto out;
+       }
+
+       ret = btrfs_del_item(trans, root, path);
+out:
+       btrfs_free_path(path);
+       err = btrfs_commit_transaction(trans, root);
+       if (err && !ret)
+               ret = err;
+       return ret;
+}
+
+/*
+ * This is a heuristic used to reduce the number of chunks balanced on
+ * resume after balance was interrupted.
+ */
+static void update_balance_args(struct btrfs_balance_control *bctl)
+{
+       /*
+        * Turn on soft mode for chunk types that were being converted.
+        */
+       if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
+               bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
+       if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
+               bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
+       if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
+               bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
+
+       /*
+        * Turn on usage filter if is not already used.  The idea is
+        * that chunks that we have already balanced should be
+        * reasonably full.  Don't do it for chunks that are being
+        * converted - that will keep us from relocating unconverted
+        * (albeit full) chunks.
+        */
+       if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
+           !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
+               bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
+               bctl->data.usage = 90;
+       }
+       if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
+           !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
+               bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
+               bctl->sys.usage = 90;
+       }
+       if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
+           !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
+               bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
+               bctl->meta.usage = 90;
+       }
+}
+
+/*
+ * Should be called with both balance and volume mutexes held to
+ * serialize other volume operations (add_dev/rm_dev/resize) with
+ * restriper.  Same goes for unset_balance_control.
+ */
+static void set_balance_control(struct btrfs_balance_control *bctl)
+{
+       struct btrfs_fs_info *fs_info = bctl->fs_info;
+
+       BUG_ON(fs_info->balance_ctl);
+
+       spin_lock(&fs_info->balance_lock);
+       fs_info->balance_ctl = bctl;
+       spin_unlock(&fs_info->balance_lock);
+}
+
+static void unset_balance_control(struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_balance_control *bctl = fs_info->balance_ctl;
+
+       BUG_ON(!fs_info->balance_ctl);
+
+       spin_lock(&fs_info->balance_lock);
+       fs_info->balance_ctl = NULL;
+       spin_unlock(&fs_info->balance_lock);
+
+       kfree(bctl);
+}
+
+/*
+ * Balance filters.  Return 1 if chunk should be filtered out
+ * (should not be balanced).
+ */
+static int chunk_profiles_filter(u64 chunk_profile,
+                                struct btrfs_balance_args *bargs)
+{
+       chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+       if (chunk_profile == 0)
+               chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+
+       if (bargs->profiles & chunk_profile)
+               return 0;
+
+       return 1;
+}
+
+static u64 div_factor_fine(u64 num, int factor)
+{
+       if (factor <= 0)
+               return 0;
+       if (factor >= 100)
+               return num;
+
+       num *= factor;
+       do_div(num, 100);
+       return num;
+}
+
+static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+                             struct btrfs_balance_args *bargs)
+{
+       struct btrfs_block_group_cache *cache;
+       u64 chunk_used, user_thresh;
+       int ret = 1;
+
+       cache = btrfs_lookup_block_group(fs_info, chunk_offset);
+       chunk_used = btrfs_block_group_used(&cache->item);
+
+       user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
+       if (chunk_used < user_thresh)
+               ret = 0;
+
+       btrfs_put_block_group(cache);
+       return ret;
+}
+
+static int chunk_devid_filter(struct extent_buffer *leaf,
+                             struct btrfs_chunk *chunk,
+                             struct btrfs_balance_args *bargs)
+{
+       struct btrfs_stripe *stripe;
+       int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+       int i;
+
+       for (i = 0; i < num_stripes; i++) {
+               stripe = btrfs_stripe_nr(chunk, i);
+               if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
+                       return 0;
+       }
+
+       return 1;
+}
+
+/* [pstart, pend) */
+static int chunk_drange_filter(struct extent_buffer *leaf,
+                              struct btrfs_chunk *chunk,
+                              u64 chunk_offset,
+                              struct btrfs_balance_args *bargs)
+{
+       struct btrfs_stripe *stripe;
+       int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+       u64 stripe_offset;
+       u64 stripe_length;
+       int factor;
+       int i;
+
+       if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
+               return 0;
+
+       if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
+            BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
+               factor = 2;
+       else
+               factor = 1;
+       factor = num_stripes / factor;
+
+       for (i = 0; i < num_stripes; i++) {
+               stripe = btrfs_stripe_nr(chunk, i);
+               if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
+                       continue;
+
+               stripe_offset = btrfs_stripe_offset(leaf, stripe);
+               stripe_length = btrfs_chunk_length(leaf, chunk);
+               do_div(stripe_length, factor);
+
+               if (stripe_offset < bargs->pend &&
+                   stripe_offset + stripe_length > bargs->pstart)
+                       return 0;
+       }
+
+       return 1;
+}
+
+/* [vstart, vend) */
+static int chunk_vrange_filter(struct extent_buffer *leaf,
+                              struct btrfs_chunk *chunk,
+                              u64 chunk_offset,
+                              struct btrfs_balance_args *bargs)
+{
+       if (chunk_offset < bargs->vend &&
+           chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
+               /* at least part of the chunk is inside this vrange */
+               return 0;
+
+       return 1;
+}
+
+static int chunk_soft_convert_filter(u64 chunk_profile,
+                                    struct btrfs_balance_args *bargs)
+{
+       if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
+               return 0;
+
+       chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+       if (chunk_profile == 0)
+               chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+
+       if (bargs->target & chunk_profile)
+               return 1;
+
+       return 0;
+}
+
+static int should_balance_chunk(struct btrfs_root *root,
+                               struct extent_buffer *leaf,
+                               struct btrfs_chunk *chunk, u64 chunk_offset)
+{
+       struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
+       struct btrfs_balance_args *bargs = NULL;
+       u64 chunk_type = btrfs_chunk_type(leaf, chunk);
+
+       /* type filter */
+       if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
+             (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
+               return 0;
+       }
+
+       if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
+               bargs = &bctl->data;
+       else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
+               bargs = &bctl->sys;
+       else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
+               bargs = &bctl->meta;
+
+       /* profiles filter */
+       if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
+           chunk_profiles_filter(chunk_type, bargs)) {
+               return 0;
+       }
+
+       /* usage filter */
+       if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
+           chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
+               return 0;
+       }
+
+       /* devid filter */
+       if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
+           chunk_devid_filter(leaf, chunk, bargs)) {
+               return 0;
+       }
+
+       /* drange filter, makes sense only with devid filter */
+       if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
+           chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
+               return 0;
+       }
+
+       /* vrange filter */
+       if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
+           chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
+               return 0;
+       }
+
+       /* soft profile changing mode */
+       if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
+           chunk_soft_convert_filter(chunk_type, bargs)) {
+               return 0;
+       }
+
+       return 1;
+}
+
 static u64 div_factor(u64 num, int factor)
 {
        if (factor == 10)
@@ -2086,29 +2436,28 @@ static u64 div_factor(u64 num, int factor)
        return num;
 }
 
-int btrfs_balance(struct btrfs_root *dev_root)
+static int __btrfs_balance(struct btrfs_fs_info *fs_info)
 {
-       int ret;
-       struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
+       struct btrfs_balance_control *bctl = fs_info->balance_ctl;
+       struct btrfs_root *chunk_root = fs_info->chunk_root;
+       struct btrfs_root *dev_root = fs_info->dev_root;
+       struct list_head *devices;
        struct btrfs_device *device;
        u64 old_size;
        u64 size_to_free;
+       struct btrfs_chunk *chunk;
        struct btrfs_path *path;
        struct btrfs_key key;
-       struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
-       struct btrfs_trans_handle *trans;
        struct btrfs_key found_key;
-
-       if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
-               return -EROFS;
-
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
-       mutex_lock(&dev_root->fs_info->volume_mutex);
-       dev_root = dev_root->fs_info->dev_root;
+       struct btrfs_trans_handle *trans;
+       struct extent_buffer *leaf;
+       int slot;
+       int ret;
+       int enospc_errors = 0;
+       bool counting = true;
 
        /* step one make some room on all the devices */
+       devices = &fs_info->fs_devices->devices;
        list_for_each_entry(device, devices, dev_list) {
                old_size = device->total_bytes;
                size_to_free = div_factor(old_size, 1);
@@ -2137,11 +2486,23 @@ int btrfs_balance(struct btrfs_root *dev_root)
                ret = -ENOMEM;
                goto error;
        }
+
+       /* zero out stat counters */
+       spin_lock(&fs_info->balance_lock);
+       memset(&bctl->stat, 0, sizeof(bctl->stat));
+       spin_unlock(&fs_info->balance_lock);
+again:
        key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
        key.offset = (u64)-1;
        key.type = BTRFS_CHUNK_ITEM_KEY;
 
        while (1) {
+               if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
+                   atomic_read(&fs_info->balance_cancel_req)) {
+                       ret = -ECANCELED;
+                       goto error;
+               }
+
                ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
                if (ret < 0)
                        goto error;
@@ -2151,15 +2512,19 @@ int btrfs_balance(struct btrfs_root *dev_root)
                 * failed
                 */
                if (ret == 0)
-                       break;
+                       BUG(); /* FIXME break ? */
 
                ret = btrfs_previous_item(chunk_root, path, 0,
                                          BTRFS_CHUNK_ITEM_KEY);
-               if (ret)
+               if (ret) {
+                       ret = 0;
                        break;
+               }
+
+               leaf = path->nodes[0];
+               slot = path->slots[0];
+               btrfs_item_key_to_cpu(leaf, &found_key, slot);
 
-               btrfs_item_key_to_cpu(path->nodes[0], &found_key,
-                                     path->slots[0]);
                if (found_key.objectid != key.objectid)
                        break;
 
@@ -2167,22 +2532,375 @@ int btrfs_balance(struct btrfs_root *dev_root)
                if (found_key.offset == 0)
                        break;
 
+               chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
+
+               if (!counting) {
+                       spin_lock(&fs_info->balance_lock);
+                       bctl->stat.considered++;
+                       spin_unlock(&fs_info->balance_lock);
+               }
+
+               ret = should_balance_chunk(chunk_root, leaf, chunk,
+                                          found_key.offset);
                btrfs_release_path(path);
+               if (!ret)
+                       goto loop;
+
+               if (counting) {
+                       spin_lock(&fs_info->balance_lock);
+                       bctl->stat.expected++;
+                       spin_unlock(&fs_info->balance_lock);
+                       goto loop;
+               }
+
                ret = btrfs_relocate_chunk(chunk_root,
                                           chunk_root->root_key.objectid,
                                           found_key.objectid,
                                           found_key.offset);
                if (ret && ret != -ENOSPC)
                        goto error;
+               if (ret == -ENOSPC) {
+                       enospc_errors++;
+               } else {
+                       spin_lock(&fs_info->balance_lock);
+                       bctl->stat.completed++;
+                       spin_unlock(&fs_info->balance_lock);
+               }
+loop:
                key.offset = found_key.offset - 1;
        }
-       ret = 0;
+
+       if (counting) {
+               btrfs_release_path(path);
+               counting = false;
+               goto again;
+       }
 error:
        btrfs_free_path(path);
-       mutex_unlock(&dev_root->fs_info->volume_mutex);
+       if (enospc_errors) {
+               printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
+                      enospc_errors);
+               if (!ret)
+                       ret = -ENOSPC;
+       }
+
        return ret;
 }
 
+static inline int balance_need_close(struct btrfs_fs_info *fs_info)
+{
+       /* cancel requested || normal exit path */
+       return atomic_read(&fs_info->balance_cancel_req) ||
+               (atomic_read(&fs_info->balance_pause_req) == 0 &&
+                atomic_read(&fs_info->balance_cancel_req) == 0);
+}
+
+static void __cancel_balance(struct btrfs_fs_info *fs_info)
+{
+       int ret;
+
+       unset_balance_control(fs_info);
+       ret = del_balance_item(fs_info->tree_root);
+       BUG_ON(ret);
+}
+
+void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
+                              struct btrfs_ioctl_balance_args *bargs);
+
+/*
+ * Should be called with both balance and volume mutexes held
+ */
+int btrfs_balance(struct btrfs_balance_control *bctl,
+                 struct btrfs_ioctl_balance_args *bargs)
+{
+       struct btrfs_fs_info *fs_info = bctl->fs_info;
+       u64 allowed;
+       int ret;
+
+       if (btrfs_fs_closing(fs_info) ||
+           atomic_read(&fs_info->balance_pause_req) ||
+           atomic_read(&fs_info->balance_cancel_req)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /*
+        * In case of mixed groups both data and meta should be picked,
+        * and identical options should be given for both of them.
+        */
+       allowed = btrfs_super_incompat_flags(fs_info->super_copy);
+       if ((allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
+           (bctl->flags & (BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA))) {
+               if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
+                   !(bctl->flags & BTRFS_BALANCE_METADATA) ||
+                   memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
+                       printk(KERN_ERR "btrfs: with mixed groups data and "
+                              "metadata balance options must be the same\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+       /*
+        * Profile changing sanity checks.  Skip them if a simple
+        * balance is requested.
+        */
+       if (!((bctl->data.flags | bctl->sys.flags | bctl->meta.flags) &
+             BTRFS_BALANCE_ARGS_CONVERT))
+               goto do_balance;
+
+       allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+       if (fs_info->fs_devices->num_devices == 1)
+               allowed |= BTRFS_BLOCK_GROUP_DUP;
+       else if (fs_info->fs_devices->num_devices < 4)
+               allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
+       else
+               allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
+                               BTRFS_BLOCK_GROUP_RAID10);
+
+       if (!profile_is_valid(bctl->data.target, 1) ||
+           bctl->data.target & ~allowed) {
+               printk(KERN_ERR "btrfs: unable to start balance with target "
+                      "data profile %llu\n",
+                      (unsigned long long)bctl->data.target);
+               ret = -EINVAL;
+               goto out;
+       }
+       if (!profile_is_valid(bctl->meta.target, 1) ||
+           bctl->meta.target & ~allowed) {
+               printk(KERN_ERR "btrfs: unable to start balance with target "
+                      "metadata profile %llu\n",
+                      (unsigned long long)bctl->meta.target);
+               ret = -EINVAL;
+               goto out;
+       }
+       if (!profile_is_valid(bctl->sys.target, 1) ||
+           bctl->sys.target & ~allowed) {
+               printk(KERN_ERR "btrfs: unable to start balance with target "
+                      "system profile %llu\n",
+                      (unsigned long long)bctl->sys.target);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (bctl->data.target & BTRFS_BLOCK_GROUP_DUP) {
+               printk(KERN_ERR "btrfs: dup for data is not allowed\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* allow to reduce meta or sys integrity only if force set */
+       allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
+                       BTRFS_BLOCK_GROUP_RAID10;
+       if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+            (fs_info->avail_system_alloc_bits & allowed) &&
+            !(bctl->sys.target & allowed)) ||
+           ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+            (fs_info->avail_metadata_alloc_bits & allowed) &&
+            !(bctl->meta.target & allowed))) {
+               if (bctl->flags & BTRFS_BALANCE_FORCE) {
+                       printk(KERN_INFO "btrfs: force reducing metadata "
+                              "integrity\n");
+               } else {
+                       printk(KERN_ERR "btrfs: balance will reduce metadata "
+                              "integrity, use force if you want this\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+do_balance:
+       ret = insert_balance_item(fs_info->tree_root, bctl);
+       if (ret && ret != -EEXIST)
+               goto out;
+
+       if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
+               BUG_ON(ret == -EEXIST);
+               set_balance_control(bctl);
+       } else {
+               BUG_ON(ret != -EEXIST);
+               spin_lock(&fs_info->balance_lock);
+               update_balance_args(bctl);
+               spin_unlock(&fs_info->balance_lock);
+       }
+
+       atomic_inc(&fs_info->balance_running);
+       mutex_unlock(&fs_info->balance_mutex);
+
+       ret = __btrfs_balance(fs_info);
+
+       mutex_lock(&fs_info->balance_mutex);
+       atomic_dec(&fs_info->balance_running);
+
+       if (bargs) {
+               memset(bargs, 0, sizeof(*bargs));
+               update_ioctl_balance_args(fs_info, 0, bargs);
+       }
+
+       if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
+           balance_need_close(fs_info)) {
+               __cancel_balance(fs_info);
+       }
+
+       wake_up(&fs_info->balance_wait_q);
+
+       return ret;
+out:
+       if (bctl->flags & BTRFS_BALANCE_RESUME)
+               __cancel_balance(fs_info);
+       else
+               kfree(bctl);
+       return ret;
+}
+
+static int balance_kthread(void *data)
+{
+       struct btrfs_balance_control *bctl =
+                       (struct btrfs_balance_control *)data;
+       struct btrfs_fs_info *fs_info = bctl->fs_info;
+       int ret = 0;
+
+       mutex_lock(&fs_info->volume_mutex);
+       mutex_lock(&fs_info->balance_mutex);
+
+       set_balance_control(bctl);
+
+       if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
+               printk(KERN_INFO "btrfs: force skipping balance\n");
+       } else {
+               printk(KERN_INFO "btrfs: continuing balance\n");
+               ret = btrfs_balance(bctl, NULL);
+       }
+
+       mutex_unlock(&fs_info->balance_mutex);
+       mutex_unlock(&fs_info->volume_mutex);
+       return ret;
+}
+
+int btrfs_recover_balance(struct btrfs_root *tree_root)
+{
+       struct task_struct *tsk;
+       struct btrfs_balance_control *bctl;
+       struct btrfs_balance_item *item;
+       struct btrfs_disk_balance_args disk_bargs;
+       struct btrfs_path *path;
+       struct extent_buffer *leaf;
+       struct btrfs_key key;
+       int ret;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
+       if (!bctl) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       key.objectid = BTRFS_BALANCE_OBJECTID;
+       key.type = BTRFS_BALANCE_ITEM_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
+       if (ret < 0)
+               goto out_bctl;
+       if (ret > 0) { /* ret = -ENOENT; */
+               ret = 0;
+               goto out_bctl;
+       }
+
+       leaf = path->nodes[0];
+       item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
+
+       bctl->fs_info = tree_root->fs_info;
+       bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
+
+       btrfs_balance_data(leaf, item, &disk_bargs);
+       btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
+       btrfs_balance_meta(leaf, item, &disk_bargs);
+       btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
+       btrfs_balance_sys(leaf, item, &disk_bargs);
+       btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
+
+       tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
+       if (IS_ERR(tsk))
+               ret = PTR_ERR(tsk);
+       else
+               goto out;
+
+out_bctl:
+       kfree(bctl);
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
+{
+       int ret = 0;
+
+       mutex_lock(&fs_info->balance_mutex);
+       if (!fs_info->balance_ctl) {
+               mutex_unlock(&fs_info->balance_mutex);
+               return -ENOTCONN;
+       }
+
+       if (atomic_read(&fs_info->balance_running)) {
+               atomic_inc(&fs_info->balance_pause_req);
+               mutex_unlock(&fs_info->balance_mutex);
+
+               wait_event(fs_info->balance_wait_q,
+                          atomic_read(&fs_info->balance_running) == 0);
+
+               mutex_lock(&fs_info->balance_mutex);
+               /* we are good with balance_ctl ripped off from under us */
+               BUG_ON(atomic_read(&fs_info->balance_running));
+               atomic_dec(&fs_info->balance_pause_req);
+       } else {
+               ret = -ENOTCONN;
+       }
+
+       mutex_unlock(&fs_info->balance_mutex);
+       return ret;
+}
+
+int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
+{
+       mutex_lock(&fs_info->balance_mutex);
+       if (!fs_info->balance_ctl) {
+               mutex_unlock(&fs_info->balance_mutex);
+               return -ENOTCONN;
+       }
+
+       atomic_inc(&fs_info->balance_cancel_req);
+       /*
+        * if we are running just wait and return, balance item is
+        * deleted in btrfs_balance in this case
+        */
+       if (atomic_read(&fs_info->balance_running)) {
+               mutex_unlock(&fs_info->balance_mutex);
+               wait_event(fs_info->balance_wait_q,
+                          atomic_read(&fs_info->balance_running) == 0);
+               mutex_lock(&fs_info->balance_mutex);
+       } else {
+               /* __cancel_balance needs volume_mutex */
+               mutex_unlock(&fs_info->balance_mutex);
+               mutex_lock(&fs_info->volume_mutex);
+               mutex_lock(&fs_info->balance_mutex);
+
+               if (fs_info->balance_ctl)
+                       __cancel_balance(fs_info);
+
+               mutex_unlock(&fs_info->volume_mutex);
+       }
+
+       BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
+       atomic_dec(&fs_info->balance_cancel_req);
+       mutex_unlock(&fs_info->balance_mutex);
+       return 0;
+}
+
 /*
  * shrinking a device means finding all of the device extents past
  * the new size, and then following the back refs to the chunks.
@@ -2323,8 +3041,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
        return ret;
 }
 
-static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root,
+static int btrfs_add_system_chunk(struct btrfs_root *root,
                           struct btrfs_key *key,
                           struct btrfs_chunk *chunk, int item_size)
 {
@@ -2441,10 +3158,14 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                max_stripe_size = 1024 * 1024 * 1024;
                max_chunk_size = 10 * max_stripe_size;
        } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
-               max_stripe_size = 256 * 1024 * 1024;
+               /* for larger filesystems, use larger metadata chunks */
+               if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
+                       max_stripe_size = 1024 * 1024 * 1024;
+               else
+                       max_stripe_size = 256 * 1024 * 1024;
                max_chunk_size = max_stripe_size;
        } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
-               max_stripe_size = 8 * 1024 * 1024;
+               max_stripe_size = 32 * 1024 * 1024;
                max_chunk_size = 2 * max_stripe_size;
        } else {
                printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
@@ -2496,7 +3217,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                if (total_avail == 0)
                        continue;
 
-               ret = find_free_dev_extent(trans, device,
+               ret = find_free_dev_extent(device,
                                           max_stripe_size * dev_stripes,
                                           &dev_offset, &max_avail);
                if (ret && ret != -ENOSPC)
@@ -2687,7 +3408,7 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
        BUG_ON(ret);
 
        if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
-               ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
+               ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
                                             item_size);
                BUG_ON(ret);
        }
@@ -2752,8 +3473,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
                return ret;
 
        alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
-                       (fs_info->metadata_alloc_profile &
-                        fs_info->avail_metadata_alloc_bits);
+                               fs_info->avail_metadata_alloc_bits;
        alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
 
        ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
@@ -2763,8 +3483,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
        sys_chunk_offset = chunk_offset + chunk_size;
 
        alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
-                       (fs_info->system_alloc_profile &
-                        fs_info->avail_system_alloc_bits);
+                               fs_info->avail_system_alloc_bits;
        alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
 
        ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
@@ -2901,26 +3620,13 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
        u64 stripe_nr;
        u64 stripe_nr_orig;
        u64 stripe_nr_end;
-       int stripes_allocated = 8;
-       int stripes_required = 1;
        int stripe_index;
        int i;
+       int ret = 0;
        int num_stripes;
        int max_errors = 0;
        struct btrfs_bio *bbio = NULL;
 
-       if (bbio_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
-               stripes_allocated = 1;
-again:
-       if (bbio_ret) {
-               bbio = kzalloc(btrfs_bio_size(stripes_allocated),
-                               GFP_NOFS);
-               if (!bbio)
-                       return -ENOMEM;
-
-               atomic_set(&bbio->error, 0);
-       }
-
        read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, logical, *length);
        read_unlock(&em_tree->lock);
@@ -2939,32 +3645,6 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
        if (mirror_num > map->num_stripes)
                mirror_num = 0;
 
-       /* if our btrfs_bio struct is too small, back off and try again */
-       if (rw & REQ_WRITE) {
-               if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
-                                BTRFS_BLOCK_GROUP_DUP)) {
-                       stripes_required = map->num_stripes;
-                       max_errors = 1;
-               } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
-                       stripes_required = map->sub_stripes;
-                       max_errors = 1;
-               }
-       }
-       if (rw & REQ_DISCARD) {
-               if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
-                                BTRFS_BLOCK_GROUP_RAID1 |
-                                BTRFS_BLOCK_GROUP_DUP |
-                                BTRFS_BLOCK_GROUP_RAID10)) {
-                       stripes_required = map->num_stripes;
-               }
-       }
-       if (bbio_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
-           stripes_allocated < stripes_required) {
-               stripes_allocated = map->num_stripes;
-               free_extent_map(em);
-               kfree(bbio);
-               goto again;
-       }
        stripe_nr = offset;
        /*
         * stripe_nr counts the total number of stripes we have to stride
@@ -2980,10 +3660,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
 
        if (rw & REQ_DISCARD)
                *length = min_t(u64, em->len - offset, *length);
-       else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
-                             BTRFS_BLOCK_GROUP_RAID1 |
-                             BTRFS_BLOCK_GROUP_RAID10 |
-                             BTRFS_BLOCK_GROUP_DUP)) {
+       else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
                /* we limit the length of each bio to what fits in a stripe */
                *length = min_t(u64, em->len - offset,
                                map->stripe_len - stripe_offset);
@@ -3059,81 +3736,55 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
        }
        BUG_ON(stripe_index >= map->num_stripes);
 
+       bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
+       if (!bbio) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       atomic_set(&bbio->error, 0);
+
        if (rw & REQ_DISCARD) {
+               int factor = 0;
+               int sub_stripes = 0;
+               u64 stripes_per_dev = 0;
+               u32 remaining_stripes = 0;
+
+               if (map->type &
+                   (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
+                       if (map->type & BTRFS_BLOCK_GROUP_RAID0)
+                               sub_stripes = 1;
+                       else
+                               sub_stripes = map->sub_stripes;
+
+                       factor = map->num_stripes / sub_stripes;
+                       stripes_per_dev = div_u64_rem(stripe_nr_end -
+                                                     stripe_nr_orig,
+                                                     factor,
+                                                     &remaining_stripes);
+               }
+
                for (i = 0; i < num_stripes; i++) {
                        bbio->stripes[i].physical =
                                map->stripes[stripe_index].physical +
                                stripe_offset + stripe_nr * map->stripe_len;
                        bbio->stripes[i].dev = map->stripes[stripe_index].dev;
 
-                       if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
-                               u64 stripes;
-                               u32 last_stripe = 0;
-                               int j;
-
-                               div_u64_rem(stripe_nr_end - 1,
-                                           map->num_stripes,
-                                           &last_stripe);
-
-                               for (j = 0; j < map->num_stripes; j++) {
-                                       u32 test;
-
-                                       div_u64_rem(stripe_nr_end - 1 - j,
-                                                   map->num_stripes, &test);
-                                       if (test == stripe_index)
-                                               break;
-                               }
-                               stripes = stripe_nr_end - 1 - j;
-                               do_div(stripes, map->num_stripes);
-                               bbio->stripes[i].length = map->stripe_len *
-                                       (stripes - stripe_nr + 1);
-
-                               if (i == 0) {
-                                       bbio->stripes[i].length -=
-                                               stripe_offset;
-                                       stripe_offset = 0;
-                               }
-                               if (stripe_index == last_stripe)
-                                       bbio->stripes[i].length -=
-                                               stripe_end_offset;
-                       } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
-                               u64 stripes;
-                               int j;
-                               int factor = map->num_stripes /
-                                            map->sub_stripes;
-                               u32 last_stripe = 0;
-
-                               div_u64_rem(stripe_nr_end - 1,
-                                           factor, &last_stripe);
-                               last_stripe *= map->sub_stripes;
-
-                               for (j = 0; j < factor; j++) {
-                                       u32 test;
-
-                                       div_u64_rem(stripe_nr_end - 1 - j,
-                                                   factor, &test);
-
-                                       if (test ==
-                                           stripe_index / map->sub_stripes)
-                                               break;
-                               }
-                               stripes = stripe_nr_end - 1 - j;
-                               do_div(stripes, factor);
-                               bbio->stripes[i].length = map->stripe_len *
-                                       (stripes - stripe_nr + 1);
-
-                               if (i < map->sub_stripes) {
+                       if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
+                                        BTRFS_BLOCK_GROUP_RAID10)) {
+                               bbio->stripes[i].length = stripes_per_dev *
+                                                         map->stripe_len;
+                               if (i / sub_stripes < remaining_stripes)
+                                       bbio->stripes[i].length +=
+                                               map->stripe_len;
+                               if (i < sub_stripes)
                                        bbio->stripes[i].length -=
                                                stripe_offset;
-                                       if (i == map->sub_stripes - 1)
-                                               stripe_offset = 0;
-                               }
-                               if (stripe_index >= last_stripe &&
-                                   stripe_index <= (last_stripe +
-                                                    map->sub_stripes - 1)) {
+                               if ((i / sub_stripes + 1) %
+                                   sub_stripes == remaining_stripes)
                                        bbio->stripes[i].length -=
                                                stripe_end_offset;
-                               }
+                               if (i == sub_stripes - 1)
+                                       stripe_offset = 0;
                        } else
                                bbio->stripes[i].length = *length;
 
@@ -3155,15 +3806,22 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
                        stripe_index++;
                }
        }
-       if (bbio_ret) {
-               *bbio_ret = bbio;
-               bbio->num_stripes = num_stripes;
-               bbio->max_errors = max_errors;
-               bbio->mirror_num = mirror_num;
+
+       if (rw & REQ_WRITE) {
+               if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
+                                BTRFS_BLOCK_GROUP_RAID10 |
+                                BTRFS_BLOCK_GROUP_DUP)) {
+                       max_errors = 1;
+               }
        }
+
+       *bbio_ret = bbio;
+       bbio->num_stripes = num_stripes;
+       bbio->max_errors = max_errors;
+       bbio->mirror_num = mirror_num;
 out:
        free_extent_map(em);
-       return 0;
+       return ret;
 }
 
 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
@@ -3304,7 +3962,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
        /* don't bother with additional async steps for reads, right now */
        if (!(rw & REQ_WRITE)) {
                bio_get(bio);
-               submit_bio(rw, bio);
+               btrfsic_submit_bio(rw, bio);
                bio_put(bio);
                return 0;
        }
@@ -3399,7 +4057,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                        if (async_submit)
                                schedule_bio(root, dev, rw, bio);
                        else
-                               submit_bio(rw, bio);
+                               btrfsic_submit_bio(rw, bio);
                } else {
                        bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
                        bio->bi_sector = logical >> 9;
@@ -3568,7 +4226,7 @@ static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
        struct btrfs_fs_devices *fs_devices;
        int ret;
 
-       mutex_lock(&uuid_mutex);
+       BUG_ON(!mutex_is_locked(&uuid_mutex));
 
        fs_devices = root->fs_info->fs_devices->seed;
        while (fs_devices) {
@@ -3606,7 +4264,6 @@ static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
        fs_devices->seed = root->fs_info->fs_devices->seed;
        root->fs_info->fs_devices->seed = fs_devices;
 out:
-       mutex_unlock(&uuid_mutex);
        return ret;
 }
 
@@ -3749,6 +4406,9 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
        if (!path)
                return -ENOMEM;
 
+       mutex_lock(&uuid_mutex);
+       lock_chunks(root);
+
        /* first we search for all of the device items, and then we
         * read in all of the chunk items.  This way we can create chunk
         * mappings that reference all of the devices that are afound
@@ -3799,6 +4459,9 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
        }
        ret = 0;
 error:
+       unlock_chunks(root);
+       mutex_unlock(&uuid_mutex);
+
        btrfs_free_path(path);
        return ret;
 }
index 78f2d4d4f37fe81317395688a8b090b71e53a612..19ac95048b88596e44b6dd667b050fb796ab20e7 100644 (file)
@@ -186,6 +186,51 @@ struct map_lookup {
 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
                            (sizeof(struct btrfs_bio_stripe) * (n)))
 
+/*
+ * Restriper's general type filter
+ */
+#define BTRFS_BALANCE_DATA             (1ULL << 0)
+#define BTRFS_BALANCE_SYSTEM           (1ULL << 1)
+#define BTRFS_BALANCE_METADATA         (1ULL << 2)
+
+#define BTRFS_BALANCE_TYPE_MASK                (BTRFS_BALANCE_DATA |       \
+                                        BTRFS_BALANCE_SYSTEM |     \
+                                        BTRFS_BALANCE_METADATA)
+
+#define BTRFS_BALANCE_FORCE            (1ULL << 3)
+#define BTRFS_BALANCE_RESUME           (1ULL << 4)
+
+/*
+ * Balance filters
+ */
+#define BTRFS_BALANCE_ARGS_PROFILES    (1ULL << 0)
+#define BTRFS_BALANCE_ARGS_USAGE       (1ULL << 1)
+#define BTRFS_BALANCE_ARGS_DEVID       (1ULL << 2)
+#define BTRFS_BALANCE_ARGS_DRANGE      (1ULL << 3)
+#define BTRFS_BALANCE_ARGS_VRANGE      (1ULL << 4)
+
+/*
+ * Profile changing flags.  When SOFT is set we won't relocate chunk if
+ * it already has the target profile (even though it may be
+ * half-filled).
+ */
+#define BTRFS_BALANCE_ARGS_CONVERT     (1ULL << 8)
+#define BTRFS_BALANCE_ARGS_SOFT                (1ULL << 9)
+
+struct btrfs_balance_args;
+struct btrfs_balance_progress;
+struct btrfs_balance_control {
+       struct btrfs_fs_info *fs_info;
+
+       struct btrfs_balance_args data;
+       struct btrfs_balance_args meta;
+       struct btrfs_balance_args sys;
+
+       u64 flags;
+
+       struct btrfs_balance_progress stat;
+};
+
 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
                                   u64 end, u64 *length);
 
@@ -228,9 +273,12 @@ struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
                                       u8 *uuid, u8 *fsid);
 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
 int btrfs_init_new_device(struct btrfs_root *root, char *path);
-int btrfs_balance(struct btrfs_root *dev_root);
+int btrfs_balance(struct btrfs_balance_control *bctl,
+                 struct btrfs_ioctl_balance_args *bargs);
+int btrfs_recover_balance(struct btrfs_root *tree_root);
+int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
+int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
-int find_free_dev_extent(struct btrfs_trans_handle *trans,
-                        struct btrfs_device *device, u64 num_bytes,
+int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
                         u64 *start, u64 *max_avail);
 #endif
index 3848b04e310e4800f6768160c6ef5111734ad1d5..e7a5659087e66f93769bc750562d21294c9bd2b6 100644 (file)
@@ -200,7 +200,7 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
        ret = btrfs_update_inode(trans, root, inode);
        BUG_ON(ret);
 out:
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
        return ret;
 }
 
index c283a1ec008ee3cd40bb7a6bb106ac5df9f52f41..208c6aa4a989dade864a0ca6fcd3bd4b21ba8252 100644 (file)
@@ -140,21 +140,19 @@ static int do_getname(const char __user *filename, char *page)
 
 static char *getname_flags(const char __user *filename, int flags, int *empty)
 {
-       char *tmp, *result;
-
-       result = ERR_PTR(-ENOMEM);
-       tmp = __getname();
-       if (tmp)  {
-               int retval = do_getname(filename, tmp);
-
-               result = tmp;
-               if (retval < 0) {
-                       if (retval == -ENOENT && empty)
-                               *empty = 1;
-                       if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) {
-                               __putname(tmp);
-                               result = ERR_PTR(retval);
-                       }
+       char *result = __getname();
+       int retval;
+
+       if (!result)
+               return ERR_PTR(-ENOMEM);
+
+       retval = do_getname(filename, result);
+       if (retval < 0) {
+               if (retval == -ENOENT && empty)
+                       *empty = 1;
+               if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) {
+                       __putname(result);
+                       return ERR_PTR(retval);
                }
        }
        audit_getname(result);
index 5485a5388ecb2919ecb5033cbf99d1d22916a72b..9cde9edf9c4d40a4a3c20e3fb1c3758518092f98 100644 (file)
@@ -198,65 +198,7 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
        return result;
 }
 
-static struct mm_struct *__check_mem_permission(struct task_struct *task)
-{
-       struct mm_struct *mm;
-
-       mm = get_task_mm(task);
-       if (!mm)
-               return ERR_PTR(-EINVAL);
-
-       /*
-        * A task can always look at itself, in case it chooses
-        * to use system calls instead of load instructions.
-        */
-       if (task == current)
-               return mm;
-
-       /*
-        * If current is actively ptrace'ing, and would also be
-        * permitted to freshly attach with ptrace now, permit it.
-        */
-       if (task_is_stopped_or_traced(task)) {
-               int match;
-               rcu_read_lock();
-               match = (ptrace_parent(task) == current);
-               rcu_read_unlock();
-               if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
-                       return mm;
-       }
-
-       /*
-        * No one else is allowed.
-        */
-       mmput(mm);
-       return ERR_PTR(-EPERM);
-}
-
-/*
- * If current may access user memory in @task return a reference to the
- * corresponding mm, otherwise ERR_PTR.
- */
-static struct mm_struct *check_mem_permission(struct task_struct *task)
-{
-       struct mm_struct *mm;
-       int err;
-
-       /*
-        * Avoid racing if task exec's as we might get a new mm but validate
-        * against old credentials.
-        */
-       err = mutex_lock_killable(&task->signal->cred_guard_mutex);
-       if (err)
-               return ERR_PTR(err);
-
-       mm = __check_mem_permission(task);
-       mutex_unlock(&task->signal->cred_guard_mutex);
-
-       return mm;
-}
-
-struct mm_struct *mm_for_maps(struct task_struct *task)
+static struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
 {
        struct mm_struct *mm;
        int err;
@@ -267,7 +209,7 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
 
        mm = get_task_mm(task);
        if (mm && mm != current->mm &&
-                       !ptrace_may_access(task, PTRACE_MODE_READ)) {
+                       !ptrace_may_access(task, mode)) {
                mmput(mm);
                mm = ERR_PTR(-EACCES);
        }
@@ -276,6 +218,11 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
        return mm;
 }
 
+struct mm_struct *mm_for_maps(struct task_struct *task)
+{
+       return mm_access(task, PTRACE_MODE_READ);
+}
+
 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
 {
        int res = 0;
@@ -752,38 +699,39 @@ static const struct file_operations proc_single_file_operations = {
 
 static int mem_open(struct inode* inode, struct file* file)
 {
-       file->private_data = (void*)((long)current->self_exec_id);
+       struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+       struct mm_struct *mm;
+
+       if (!task)
+               return -ESRCH;
+
+       mm = mm_access(task, PTRACE_MODE_ATTACH);
+       put_task_struct(task);
+
+       if (IS_ERR(mm))
+               return PTR_ERR(mm);
+
        /* OK to pass negative loff_t, we can catch out-of-range */
        file->f_mode |= FMODE_UNSIGNED_OFFSET;
+       file->private_data = mm;
+
        return 0;
 }
 
 static ssize_t mem_read(struct file * file, char __user * buf,
                        size_t count, loff_t *ppos)
 {
-       struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+       int ret;
        char *page;
        unsigned long src = *ppos;
-       int ret = -ESRCH;
-       struct mm_struct *mm;
+       struct mm_struct *mm = file->private_data;
 
-       if (!task)
-               goto out_no_task;
+       if (!mm)
+               return 0;
 
-       ret = -ENOMEM;
        page = (char *)__get_free_page(GFP_TEMPORARY);
        if (!page)
-               goto out;
-
-       mm = check_mem_permission(task);
-       ret = PTR_ERR(mm);
-       if (IS_ERR(mm))
-               goto out_free;
-
-       ret = -EIO;
-       if (file->private_data != (void*)((long)current->self_exec_id))
-               goto out_put;
+               return -ENOMEM;
 
        ret = 0;
  
@@ -810,13 +758,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
        }
        *ppos = src;
 
-out_put:
-       mmput(mm);
-out_free:
        free_page((unsigned long) page);
-out:
-       put_task_struct(task);
-out_no_task:
        return ret;
 }
 
@@ -825,27 +767,15 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
 {
        int copied;
        char *page;
-       struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
        unsigned long dst = *ppos;
-       struct mm_struct *mm;
+       struct mm_struct *mm = file->private_data;
 
-       copied = -ESRCH;
-       if (!task)
-               goto out_no_task;
+       if (!mm)
+               return 0;
 
-       copied = -ENOMEM;
        page = (char *)__get_free_page(GFP_TEMPORARY);
        if (!page)
-               goto out_task;
-
-       mm = check_mem_permission(task);
-       copied = PTR_ERR(mm);
-       if (IS_ERR(mm))
-               goto out_free;
-
-       copied = -EIO;
-       if (file->private_data != (void *)((long)current->self_exec_id))
-               goto out_mm;
+               return -ENOMEM;
 
        copied = 0;
        while (count > 0) {
@@ -869,13 +799,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
        }
        *ppos = dst;
 
-out_mm:
-       mmput(mm);
-out_free:
        free_page((unsigned long) page);
-out_task:
-       put_task_struct(task);
-out_no_task:
        return copied;
 }
 
@@ -895,11 +819,20 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
        return file->f_pos;
 }
 
+static int mem_release(struct inode *inode, struct file *file)
+{
+       struct mm_struct *mm = file->private_data;
+
+       mmput(mm);
+       return 0;
+}
+
 static const struct file_operations proc_mem_operations = {
        .llseek         = mem_lseek,
        .read           = mem_read,
        .write          = mem_write,
        .open           = mem_open,
+       .release        = mem_release,
 };
 
 static ssize_t environ_read(struct file *file, char __user *buf,
@@ -1199,9 +1132,6 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
        ssize_t length;
        uid_t loginuid;
 
-       if (!capable(CAP_AUDIT_CONTROL))
-               return -EPERM;
-
        rcu_read_lock();
        if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
                rcu_read_unlock();
@@ -1230,7 +1160,7 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
                goto out_free_page;
 
        }
-       length = audit_set_loginuid(current, loginuid);
+       length = audit_set_loginuid(loginuid);
        if (likely(length == 0))
                length = count;
 
index 574d4ee9b6253ea3d589f23288aa7385907642bb..74b9baf36ac39038f827c8e262ea62aa33d81de8 100644 (file)
@@ -111,8 +111,7 @@ xfs_ioend_new_eof(
        xfs_fsize_t             bsize;
 
        bsize = ioend->io_offset + ioend->io_size;
-       isize = MAX(ip->i_size, ip->i_new_size);
-       isize = MIN(isize, bsize);
+       isize = MIN(i_size_read(VFS_I(ip)), bsize);
        return isize > ip->i_d.di_size ? isize : 0;
 }
 
@@ -126,11 +125,7 @@ static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
 }
 
 /*
- * Update on-disk file size now that data has been written to disk.  The
- * current in-memory file size is i_size.  If a write is beyond eof i_new_size
- * will be the intended file size until i_size is updated.  If this write does
- * not extend all the way to the valid file size then restrict this update to
- * the end of the write.
+ * Update on-disk file size now that data has been written to disk.
  *
  * This function does not block as blocking on the inode lock in IO completion
  * can lead to IO completion order dependency deadlocks.. If it can't get the
@@ -1278,6 +1273,15 @@ xfs_end_io_direct_write(
 {
        struct xfs_ioend        *ioend = iocb->private;
 
+       /*
+        * While the generic direct I/O code updates the inode size, it does
+        * so only after the end_io handler is called, which means our
+        * end_io handler thinks the on-disk size is outside the in-core
+        * size.  To prevent this just update it a little bit earlier here.
+        */
+       if (offset + size > i_size_read(ioend->io_inode))
+               i_size_write(ioend->io_inode, offset + size);
+
        /*
         * blockdev_direct_IO can return an error even after the I/O
         * completion handler was called.  Thus we need to protect
@@ -1340,12 +1344,11 @@ xfs_vm_write_failed(
 
        if (to > inode->i_size) {
                /*
-                * punch out the delalloc blocks we have already allocated. We
-                * don't call xfs_setattr() to do this as we may be in the
-                * middle of a multi-iovec write and so the vfs inode->i_size
-                * will not match the xfs ip->i_size and so it will zero too
-                * much. Hence we jus truncate the page cache to zero what is
-                * necessary and punch the delalloc blocks directly.
+                * Punch out the delalloc blocks we have already allocated.
+                *
+                * Don't bother with xfs_setattr given that nothing can have
+                * made it to disk yet as the page is still locked at this
+                * point.
                 */
                struct xfs_inode        *ip = XFS_I(inode);
                xfs_fileoff_t           start_fsb;
index 1e5d97f86ea819493b9acfd1584696fc7ce5d733..08b9ac644c3140f6f6cc007f6f3eab05331ff35b 100644 (file)
@@ -827,10 +827,6 @@ xfs_attr_inactive(xfs_inode_t *dp)
        if (error)
                goto out;
 
-       /*
-        * Commit the last in the sequence of transactions.
-        */
-       xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
        error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
        xfs_iunlock(dp, XFS_ILOCK_EXCL);
 
index c1b55e5965517a9407f678610b62f29fdabf33b3..d25eafd4d28de31d326078dbfdc21140c69b5acb 100644 (file)
@@ -271,10 +271,6 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
        dp = args->dp;
        mp = dp->i_mount;
        dp->i_d.di_forkoff = forkoff;
-       dp->i_df.if_ext_max =
-               XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
-       dp->i_afp->if_ext_max =
-               XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
 
        ifp = dp->i_afp;
        ASSERT(ifp->if_flags & XFS_IFINLINE);
@@ -326,7 +322,6 @@ xfs_attr_fork_reset(
        ASSERT(ip->i_d.di_anextents == 0);
        ASSERT(ip->i_afp == NULL);
 
-       ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 }
 
@@ -389,10 +384,6 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
                                (args->op_flags & XFS_DA_OP_ADDNAME) ||
                                !(mp->m_flags & XFS_MOUNT_ATTR2) ||
                                dp->i_d.di_format == XFS_DINODE_FMT_BTREE);
-               dp->i_afp->if_ext_max =
-                       XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
-               dp->i_df.if_ext_max =
-                       XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
                xfs_trans_log_inode(args->trans, dp,
                                        XFS_ILOG_CORE | XFS_ILOG_ADATA);
        }
index d0ab78837057815f17605150d31a633c2eeb2739..188ef2fbd62880614a29ea0432e20707d5cf45a2 100644 (file)
@@ -249,7 +249,27 @@ xfs_bmbt_lookup_ge(
 }
 
 /*
-* Update the record referred to by cur to the value given
+ * Check if the inode needs to be converted to btree format.
+ */
+static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
+{
+       return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
+               XFS_IFORK_NEXTENTS(ip, whichfork) >
+                       XFS_IFORK_MAXEXT(ip, whichfork);
+}
+
+/*
+ * Check if the inode should be converted to extent format.
+ */
+static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
+{
+       return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
+               XFS_IFORK_NEXTENTS(ip, whichfork) <=
+                       XFS_IFORK_MAXEXT(ip, whichfork);
+}
+
+/*
+ * Update the record referred to by cur to the value given
  * by [off, bno, len, state].
  * This either works (return 0) or gets an EFSCORRUPTED error.
  */
@@ -683,8 +703,8 @@ xfs_bmap_add_extent_delay_real(
                                goto done;
                        XFS_WANT_CORRUPTED_GOTO(i == 1, done);
                }
-               if (bma->ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-                   bma->ip->i_d.di_nextents > bma->ip->i_df.if_ext_max) {
+
+               if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
                        error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
                                        bma->firstblock, bma->flist,
                                        &bma->cur, 1, &tmp_rval, XFS_DATA_FORK);
@@ -767,8 +787,8 @@ xfs_bmap_add_extent_delay_real(
                                goto done;
                        XFS_WANT_CORRUPTED_GOTO(i == 1, done);
                }
-               if (bma->ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-                   bma->ip->i_d.di_nextents > bma->ip->i_df.if_ext_max) {
+
+               if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
                        error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
                                bma->firstblock, bma->flist, &bma->cur, 1,
                                &tmp_rval, XFS_DATA_FORK);
@@ -836,8 +856,8 @@ xfs_bmap_add_extent_delay_real(
                                goto done;
                        XFS_WANT_CORRUPTED_GOTO(i == 1, done);
                }
-               if (bma->ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-                   bma->ip->i_d.di_nextents > bma->ip->i_df.if_ext_max) {
+
+               if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
                        error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
                                        bma->firstblock, bma->flist, &bma->cur,
                                        1, &tmp_rval, XFS_DATA_FORK);
@@ -884,8 +904,7 @@ xfs_bmap_add_extent_delay_real(
        }
 
        /* convert to a btree if necessary */
-       if (XFS_IFORK_FORMAT(bma->ip, XFS_DATA_FORK) == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(bma->ip, XFS_DATA_FORK) > ifp->if_ext_max) {
+       if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
                int     tmp_logflags;   /* partial log flag return val */
 
                ASSERT(bma->cur == NULL);
@@ -1421,8 +1440,7 @@ xfs_bmap_add_extent_unwritten_real(
        }
 
        /* convert to a btree if necessary */
-       if (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > ifp->if_ext_max) {
+       if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
                int     tmp_logflags;   /* partial log flag return val */
 
                ASSERT(cur == NULL);
@@ -1812,8 +1830,7 @@ xfs_bmap_add_extent_hole_real(
        }
 
        /* convert to a btree if necessary */
-       if (XFS_IFORK_FORMAT(bma->ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(bma->ip, whichfork) > ifp->if_ext_max) {
+       if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
                int     tmp_logflags;   /* partial log flag return val */
 
                ASSERT(bma->cur == NULL);
@@ -3037,8 +3054,7 @@ xfs_bmap_extents_to_btree(
 
        ifp = XFS_IFORK_PTR(ip, whichfork);
        ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+
        /*
         * Make space in the inode incore.
         */
@@ -3184,13 +3200,8 @@ xfs_bmap_forkoff_reset(
            ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
                uint    dfl_forkoff = xfs_default_attroffset(ip) >> 3;
 
-               if (dfl_forkoff > ip->i_d.di_forkoff) {
+               if (dfl_forkoff > ip->i_d.di_forkoff)
                        ip->i_d.di_forkoff = dfl_forkoff;
-                       ip->i_df.if_ext_max =
-                               XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
-                       ip->i_afp->if_ext_max =
-                               XFS_IFORK_ASIZE(ip) / sizeof(xfs_bmbt_rec_t);
-               }
        }
 }
 
@@ -3430,8 +3441,6 @@ xfs_bmap_add_attrfork(
        int                     error;          /* error return value */
 
        ASSERT(XFS_IFORK_Q(ip) == 0);
-       ASSERT(ip->i_df.if_ext_max ==
-              XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
 
        mp = ip->i_mount;
        ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
@@ -3486,12 +3495,9 @@ xfs_bmap_add_attrfork(
                error = XFS_ERROR(EINVAL);
                goto error1;
        }
-       ip->i_df.if_ext_max =
-               XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
+
        ASSERT(ip->i_afp == NULL);
        ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
-       ip->i_afp->if_ext_max =
-               XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
        ip->i_afp->if_flags = XFS_IFEXTENTS;
        logflags = 0;
        xfs_bmap_init(&flist, &firstblock);
@@ -3535,20 +3541,17 @@ xfs_bmap_add_attrfork(
                } else
                        spin_unlock(&mp->m_sb_lock);
        }
-       if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
+
+       error = xfs_bmap_finish(&tp, &flist, &committed);
+       if (error)
                goto error2;
-       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-       ASSERT(ip->i_df.if_ext_max ==
-              XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
-       return error;
+       return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
 error2:
        xfs_bmap_cancel(&flist);
 error1:
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 error0:
        xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
-       ASSERT(ip->i_df.if_ext_max ==
-              XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
        return error;
 }
 
@@ -3994,11 +3997,8 @@ xfs_bmap_one_block(
        xfs_bmbt_irec_t s;              /* internal version of extent */
 
 #ifndef DEBUG
-       if (whichfork == XFS_DATA_FORK) {
-               return S_ISREG(ip->i_d.di_mode) ?
-                       (ip->i_size == ip->i_mount->m_sb.sb_blocksize) :
-                       (ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize);
-       }
+       if (whichfork == XFS_DATA_FORK)
+               return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
 #endif /* !DEBUG */
        if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
                return 0;
@@ -4010,7 +4010,7 @@ xfs_bmap_one_block(
        xfs_bmbt_get_all(ep, &s);
        rval = s.br_startoff == 0 && s.br_blockcount == 1;
        if (rval && whichfork == XFS_DATA_FORK)
-               ASSERT(ip->i_size == ip->i_mount->m_sb.sb_blocksize);
+               ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
        return rval;
 }
 
@@ -4379,8 +4379,6 @@ xfs_bmapi_read(
        XFS_STATS_INC(xs_blk_mapr);
 
        ifp = XFS_IFORK_PTR(ip, whichfork);
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
 
        if (!(ifp->if_flags & XFS_IFEXTENTS)) {
                error = xfs_iread_extents(NULL, ip, whichfork);
@@ -4871,8 +4869,6 @@ xfs_bmapi_write(
                return XFS_ERROR(EIO);
 
        ifp = XFS_IFORK_PTR(ip, whichfork);
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
 
        XFS_STATS_INC(xs_blk_mapw);
 
@@ -4981,8 +4977,7 @@ xfs_bmapi_write(
        /*
         * Transform from btree to extents, give it cur.
         */
-       if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
-           XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
+       if (xfs_bmap_wants_extents(ip, whichfork)) {
                int             tmp_logflags = 0;
 
                ASSERT(bma.cur);
@@ -4992,10 +4987,10 @@ xfs_bmapi_write(
                if (error)
                        goto error0;
        }
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+
        ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
-              XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
+              XFS_IFORK_NEXTENTS(ip, whichfork) >
+               XFS_IFORK_MAXEXT(ip, whichfork));
        error = 0;
 error0:
        /*
@@ -5095,8 +5090,7 @@ xfs_bunmapi(
 
        ASSERT(len > 0);
        ASSERT(nexts >= 0);
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+
        if (!(ifp->if_flags & XFS_IFEXTENTS) &&
            (error = xfs_iread_extents(tp, ip, whichfork)))
                return error;
@@ -5322,7 +5316,8 @@ xfs_bunmapi(
                 */
                if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
                    XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
-                   XFS_IFORK_NEXTENTS(ip, whichfork) >= ifp->if_ext_max &&
+                   XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
+                       XFS_IFORK_MAXEXT(ip, whichfork) &&
                    del.br_startoff > got.br_startoff &&
                    del.br_startoff + del.br_blockcount <
                    got.br_startoff + got.br_blockcount) {
@@ -5353,13 +5348,11 @@ xfs_bunmapi(
                }
        }
        *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+
        /*
         * Convert to a btree if necessary.
         */
-       if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
+       if (xfs_bmap_needs_btree(ip, whichfork)) {
                ASSERT(cur == NULL);
                error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
                        &cur, 0, &tmp_logflags, whichfork);
@@ -5370,8 +5363,7 @@ xfs_bunmapi(
        /*
         * transform from btree to extents, give it cur
         */
-       else if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
-                XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
+       else if (xfs_bmap_wants_extents(ip, whichfork)) {
                ASSERT(cur != NULL);
                error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
                        whichfork);
@@ -5382,8 +5374,6 @@ xfs_bunmapi(
        /*
         * transform from extents to local?
         */
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
        error = 0;
 error0:
        /*
@@ -5434,7 +5424,7 @@ xfs_getbmapx_fix_eof_hole(
        if (startblock == HOLESTARTBLOCK) {
                mp = ip->i_mount;
                out->bmv_block = -1;
-               fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, ip->i_size));
+               fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
                fixlen -= out->bmv_offset;
                if (prealloced && out->bmv_offset + out->bmv_length == end) {
                        /* Came to hole at EOF. Trim it. */
@@ -5522,7 +5512,7 @@ xfs_getbmap(
                        fixlen = XFS_MAXIOFFSET(mp);
                } else {
                        prealloced = 0;
-                       fixlen = ip->i_size;
+                       fixlen = XFS_ISIZE(ip);
                }
        }
 
@@ -5551,7 +5541,7 @@ xfs_getbmap(
 
        xfs_ilock(ip, XFS_IOLOCK_SHARED);
        if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
-               if (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size) {
+               if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) {
                        error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF);
                        if (error)
                                goto out_unlock_iolock;
index 654dc6f05bac7781f288f63fbcb1a5a970b06faa..dd974a55c77daee6de56a44c527e871d7cfe7fca 100644 (file)
@@ -163,12 +163,14 @@ xfs_swap_extents_check_format(
 
        /* Check temp in extent form to max in target */
        if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > ip->i_df.if_ext_max)
+           XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
+                       XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
                return EINVAL;
 
        /* Check target in extent form to max in temp */
        if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > tip->i_df.if_ext_max)
+           XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
+                       XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
                return EINVAL;
 
        /*
@@ -180,18 +182,25 @@ xfs_swap_extents_check_format(
         * (a common defrag case) which will occur when the temp inode is in
         * extent format...
         */
-       if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
-           ((XFS_IFORK_BOFF(ip) &&
-             tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip)) ||
-            XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= ip->i_df.if_ext_max))
-               return EINVAL;
+       if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+               if (XFS_IFORK_BOFF(ip) &&
+                   tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip))
+                       return EINVAL;
+               if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
+                   XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
+                       return EINVAL;
+       }
 
        /* Reciprocal target->temp btree format checks */
-       if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
-           ((XFS_IFORK_BOFF(tip) &&
-             ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip)) ||
-            XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= tip->i_df.if_ext_max))
-               return EINVAL;
+       if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+               if (XFS_IFORK_BOFF(tip) &&
+                   ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip))
+                       return EINVAL;
+
+               if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
+                   XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
+                       return EINVAL;
+       }
 
        return 0;
 }
@@ -348,16 +357,6 @@ xfs_swap_extents(
        *ifp = *tifp;           /* struct copy */
        *tifp = *tempifp;       /* struct copy */
 
-       /*
-        * Fix the in-memory data fork values that are dependent on the fork
-        * offset in the inode. We can't assume they remain the same as attr2
-        * has dynamic fork offsets.
-        */
-       ifp->if_ext_max = XFS_IFORK_SIZE(ip, XFS_DATA_FORK) /
-                                       (uint)sizeof(xfs_bmbt_rec_t);
-       tifp->if_ext_max = XFS_IFORK_SIZE(tip, XFS_DATA_FORK) /
-                                       (uint)sizeof(xfs_bmbt_rec_t);
-
        /*
         * Fix the on-disk inode values
         */
index f675f3d9d7b3b3693cfabc0437d52ca31a4f598a..7e5bc872f2b4fb12d67f3da3796f3c5b86ac162c 100644 (file)
@@ -327,7 +327,7 @@ xfs_file_aio_read(
                                mp->m_rtdev_targp : mp->m_ddev_targp;
                if ((iocb->ki_pos & target->bt_smask) ||
                    (size & target->bt_smask)) {
-                       if (iocb->ki_pos == ip->i_size)
+                       if (iocb->ki_pos == i_size_read(inode))
                                return 0;
                        return -XFS_ERROR(EINVAL);
                }
@@ -412,51 +412,6 @@ xfs_file_splice_read(
        return ret;
 }
 
-STATIC void
-xfs_aio_write_isize_update(
-       struct inode    *inode,
-       loff_t          *ppos,
-       ssize_t         bytes_written)
-{
-       struct xfs_inode        *ip = XFS_I(inode);
-       xfs_fsize_t             isize = i_size_read(inode);
-
-       if (bytes_written > 0)
-               XFS_STATS_ADD(xs_write_bytes, bytes_written);
-
-       if (unlikely(bytes_written < 0 && bytes_written != -EFAULT &&
-                                       *ppos > isize))
-               *ppos = isize;
-
-       if (*ppos > ip->i_size) {
-               xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
-               if (*ppos > ip->i_size)
-                       ip->i_size = *ppos;
-               xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
-       }
-}
-
-/*
- * If this was a direct or synchronous I/O that failed (such as ENOSPC) then
- * part of the I/O may have been written to disk before the error occurred.  In
- * this case the on-disk file size may have been adjusted beyond the in-memory
- * file size and now needs to be truncated back.
- */
-STATIC void
-xfs_aio_write_newsize_update(
-       struct xfs_inode        *ip,
-       xfs_fsize_t             new_size)
-{
-       if (new_size == ip->i_new_size) {
-               xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
-               if (new_size == ip->i_new_size)
-                       ip->i_new_size = 0;
-               if (ip->i_d.di_size > ip->i_size)
-                       ip->i_d.di_size = ip->i_size;
-               xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
-       }
-}
-
 /*
  * xfs_file_splice_write() does not use xfs_rw_ilock() because
  * generic_file_splice_write() takes the i_mutex itself. This, in theory,
@@ -475,7 +430,6 @@ xfs_file_splice_write(
 {
        struct inode            *inode = outfilp->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
-       xfs_fsize_t             new_size;
        int                     ioflags = 0;
        ssize_t                 ret;
 
@@ -489,19 +443,12 @@ xfs_file_splice_write(
 
        xfs_ilock(ip, XFS_IOLOCK_EXCL);
 
-       new_size = *ppos + count;
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       if (new_size > ip->i_size)
-               ip->i_new_size = new_size;
-       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
        trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
 
        ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
+       if (ret > 0)
+               XFS_STATS_ADD(xs_write_bytes, ret);
 
-       xfs_aio_write_isize_update(inode, ppos, ret);
-       xfs_aio_write_newsize_update(ip, new_size);
        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
        return ret;
 }
@@ -689,28 +636,26 @@ xfs_zero_eof(
 /*
  * Common pre-write limit and setup checks.
  *
- * Returns with iolock held according to @iolock.
+ * Called with the iolocked held either shared and exclusive according to
+ * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
+ * if called for a direct write beyond i_size.
  */
 STATIC ssize_t
 xfs_file_aio_write_checks(
        struct file             *file,
        loff_t                  *pos,
        size_t                  *count,
-       xfs_fsize_t             *new_sizep,
        int                     *iolock)
 {
        struct inode            *inode = file->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
-       xfs_fsize_t             new_size;
        int                     error = 0;
 
        xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
-       *new_sizep = 0;
 restart:
        error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
        if (error) {
-               xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
-               *iolock = 0;
+               xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
                return error;
        }
 
@@ -720,36 +665,21 @@ xfs_file_aio_write_checks(
        /*
         * If the offset is beyond the size of the file, we need to zero any
         * blocks that fall between the existing EOF and the start of this
-        * write. There is no need to issue zeroing if another in-flght IO ends
-        * at or before this one If zeronig is needed and we are currently
-        * holding the iolock shared, we need to update it to exclusive which
-        * involves dropping all locks and relocking to maintain correct locking
-        * order. If we do this, restart the function to ensure all checks and
-        * values are still valid.
+        * write.  If zeroing is needed and we are currently holding the
+        * iolock shared, we need to update it to exclusive which involves
+        * dropping all locks and relocking to maintain correct locking order.
+        * If we do this, restart the function to ensure all checks and values
+        * are still valid.
         */
-       if ((ip->i_new_size && *pos > ip->i_new_size) ||
-           (!ip->i_new_size && *pos > ip->i_size)) {
+       if (*pos > i_size_read(inode)) {
                if (*iolock == XFS_IOLOCK_SHARED) {
                        xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
                        *iolock = XFS_IOLOCK_EXCL;
                        xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
                        goto restart;
                }
-               error = -xfs_zero_eof(ip, *pos, ip->i_size);
+               error = -xfs_zero_eof(ip, *pos, i_size_read(inode));
        }
-
-       /*
-        * If this IO extends beyond EOF, we may need to update ip->i_new_size.
-        * We have already zeroed space beyond EOF (if necessary).  Only update
-        * ip->i_new_size if this IO ends beyond any other in-flight writes.
-        */
-       new_size = *pos + *count;
-       if (new_size > ip->i_size) {
-               if (new_size > ip->i_new_size)
-                       ip->i_new_size = new_size;
-               *new_sizep = new_size;
-       }
-
        xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
        if (error)
                return error;
@@ -794,9 +724,7 @@ xfs_file_dio_aio_write(
        const struct iovec      *iovp,
        unsigned long           nr_segs,
        loff_t                  pos,
-       size_t                  ocount,
-       xfs_fsize_t             *new_size,
-       int                     *iolock)
+       size_t                  ocount)
 {
        struct file             *file = iocb->ki_filp;
        struct address_space    *mapping = file->f_mapping;
@@ -806,10 +734,10 @@ xfs_file_dio_aio_write(
        ssize_t                 ret = 0;
        size_t                  count = ocount;
        int                     unaligned_io = 0;
+       int                     iolock;
        struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
                                        mp->m_rtdev_targp : mp->m_ddev_targp;
 
-       *iolock = 0;
        if ((pos & target->bt_smask) || (count & target->bt_smask))
                return -XFS_ERROR(EINVAL);
 
@@ -824,31 +752,31 @@ xfs_file_dio_aio_write(
         * EOF zeroing cases and fill out the new inode size as appropriate.
         */
        if (unaligned_io || mapping->nrpages)
-               *iolock = XFS_IOLOCK_EXCL;
+               iolock = XFS_IOLOCK_EXCL;
        else
-               *iolock = XFS_IOLOCK_SHARED;
-       xfs_rw_ilock(ip, *iolock);
+               iolock = XFS_IOLOCK_SHARED;
+       xfs_rw_ilock(ip, iolock);
 
        /*
         * Recheck if there are cached pages that need invalidate after we got
         * the iolock to protect against other threads adding new pages while
         * we were waiting for the iolock.
         */
-       if (mapping->nrpages && *iolock == XFS_IOLOCK_SHARED) {
-               xfs_rw_iunlock(ip, *iolock);
-               *iolock = XFS_IOLOCK_EXCL;
-               xfs_rw_ilock(ip, *iolock);
+       if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
+               xfs_rw_iunlock(ip, iolock);
+               iolock = XFS_IOLOCK_EXCL;
+               xfs_rw_ilock(ip, iolock);
        }
 
-       ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock);
+       ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
        if (ret)
-               return ret;
+               goto out;
 
        if (mapping->nrpages) {
                ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
                                                        FI_REMAPF_LOCKED);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        /*
@@ -857,15 +785,18 @@ xfs_file_dio_aio_write(
         */
        if (unaligned_io)
                inode_dio_wait(inode);
-       else if (*iolock == XFS_IOLOCK_EXCL) {
+       else if (iolock == XFS_IOLOCK_EXCL) {
                xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
-               *iolock = XFS_IOLOCK_SHARED;
+               iolock = XFS_IOLOCK_SHARED;
        }
 
        trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
        ret = generic_file_direct_write(iocb, iovp,
                        &nr_segs, pos, &iocb->ki_pos, count, ocount);
 
+out:
+       xfs_rw_iunlock(ip, iolock);
+
        /* No fallback to buffered IO on errors for XFS. */
        ASSERT(ret < 0 || ret == count);
        return ret;
@@ -877,9 +808,7 @@ xfs_file_buffered_aio_write(
        const struct iovec      *iovp,
        unsigned long           nr_segs,
        loff_t                  pos,
-       size_t                  ocount,
-       xfs_fsize_t             *new_size,
-       int                     *iolock)
+       size_t                  ocount)
 {
        struct file             *file = iocb->ki_filp;
        struct address_space    *mapping = file->f_mapping;
@@ -887,14 +816,14 @@ xfs_file_buffered_aio_write(
        struct xfs_inode        *ip = XFS_I(inode);
        ssize_t                 ret;
        int                     enospc = 0;
+       int                     iolock = XFS_IOLOCK_EXCL;
        size_t                  count = ocount;
 
-       *iolock = XFS_IOLOCK_EXCL;
-       xfs_rw_ilock(ip, *iolock);
+       xfs_rw_ilock(ip, iolock);
 
-       ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock);
+       ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
        if (ret)
-               return ret;
+               goto out;
 
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = mapping->backing_dev_info;
@@ -908,13 +837,15 @@ xfs_file_buffered_aio_write(
         * page locks and retry *once*
         */
        if (ret == -ENOSPC && !enospc) {
-               ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
-               if (ret)
-                       return ret;
                enospc = 1;
-               goto write_retry;
+               ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
+               if (!ret)
+                       goto write_retry;
        }
+
        current->backing_dev_info = NULL;
+out:
+       xfs_rw_iunlock(ip, iolock);
        return ret;
 }
 
@@ -930,9 +861,7 @@ xfs_file_aio_write(
        struct inode            *inode = mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
        ssize_t                 ret;
-       int                     iolock;
        size_t                  ocount = 0;
-       xfs_fsize_t             new_size = 0;
 
        XFS_STATS_INC(xs_write_calls);
 
@@ -951,33 +880,22 @@ xfs_file_aio_write(
                return -EIO;
 
        if (unlikely(file->f_flags & O_DIRECT))
-               ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos,
-                                               ocount, &new_size, &iolock);
+               ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount);
        else
                ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
-                                               ocount, &new_size, &iolock);
-
-       xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret);
+                                                 ocount);
 
-       if (ret <= 0)
-               goto out_unlock;
+       if (ret > 0) {
+               ssize_t err;
 
-       /* Handle various SYNC-type writes */
-       if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
-               loff_t end = pos + ret - 1;
-               int error;
+               XFS_STATS_ADD(xs_write_bytes, ret);
 
-               xfs_rw_iunlock(ip, iolock);
-               error = xfs_file_fsync(file, pos, end,
-                                     (file->f_flags & __O_SYNC) ? 0 : 1);
-               xfs_rw_ilock(ip, iolock);
-               if (error)
-                       ret = error;
+               /* Handle various SYNC-type writes */
+               err = generic_write_sync(file, pos, ret);
+               if (err < 0)
+                       ret = err;
        }
 
-out_unlock:
-       xfs_aio_write_newsize_update(ip, new_size);
-       xfs_rw_iunlock(ip, iolock);
        return ret;
 }
 
index ed88ed16811c6ba33c1d211b3195bd1dcea18484..652b875a9d4c441bd3c759df185c153dfee0e8f5 100644 (file)
@@ -90,7 +90,7 @@ xfs_wait_on_pages(
 
        if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
                return -filemap_fdatawait_range(mapping, first,
-                                       last == -1 ? ip->i_size - 1 : last);
+                                       last == -1 ? XFS_ISIZE(ip) - 1 : last);
        }
        return 0;
 }
index 3960a066d7ffcb06a02aadaa34d874a92c0b643b..8c3e46394d484c3fbd798913c6862d90b3f820e7 100644 (file)
@@ -77,7 +77,7 @@ xfs_inode_alloc(
 
        ASSERT(atomic_read(&ip->i_pincount) == 0);
        ASSERT(!spin_is_locked(&ip->i_flags_lock));
-       ASSERT(completion_done(&ip->i_flush));
+       ASSERT(!xfs_isiflocked(ip));
        ASSERT(ip->i_ino == 0);
 
        mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
@@ -94,8 +94,6 @@ xfs_inode_alloc(
        ip->i_update_core = 0;
        ip->i_delayed_blks = 0;
        memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
-       ip->i_size = 0;
-       ip->i_new_size = 0;
 
        return ip;
 }
@@ -150,7 +148,7 @@ xfs_inode_free(
        /* asserts to verify all state is correct here */
        ASSERT(atomic_read(&ip->i_pincount) == 0);
        ASSERT(!spin_is_locked(&ip->i_flags_lock));
-       ASSERT(completion_done(&ip->i_flush));
+       ASSERT(!xfs_isiflocked(ip));
 
        /*
         * Because we use RCU freeing we need to ensure the inode always
@@ -450,8 +448,6 @@ xfs_iget(
 
        *ipp = ip;
 
-       ASSERT(ip->i_df.if_ext_max ==
-              XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
        /*
         * If we have a real type for an on-disk inode, we can set ops(&unlock)
         * now.  If it's a new inode being created, xfs_ialloc will handle it.
@@ -715,3 +711,19 @@ xfs_isilocked(
        return 0;
 }
 #endif
+
+void
+__xfs_iflock(
+       struct xfs_inode        *ip)
+{
+       wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
+       DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
+
+       do {
+               prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+               if (xfs_isiflocked(ip))
+                       io_schedule();
+       } while (!xfs_iflock_nowait(ip));
+
+       finish_wait(wq, &wait.wait);
+}
index 9dda7cc328485014eb86baf48a013f3833faf27f..b21022499c2e8f302699f80ca2af344301fee941 100644 (file)
@@ -299,11 +299,8 @@ xfs_iformat(
 {
        xfs_attr_shortform_t    *atp;
        int                     size;
-       int                     error;
+       int                     error = 0;
        xfs_fsize_t             di_size;
-       ip->i_df.if_ext_max =
-               XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
-       error = 0;
 
        if (unlikely(be32_to_cpu(dip->di_nextents) +
                     be16_to_cpu(dip->di_anextents) >
@@ -350,7 +347,6 @@ xfs_iformat(
                        return XFS_ERROR(EFSCORRUPTED);
                }
                ip->i_d.di_size = 0;
-               ip->i_size = 0;
                ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
                break;
 
@@ -409,10 +405,10 @@ xfs_iformat(
        }
        if (!XFS_DFORK_Q(dip))
                return 0;
+
        ASSERT(ip->i_afp == NULL);
        ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
-       ip->i_afp->if_ext_max =
-               XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
+
        switch (dip->di_aformat) {
        case XFS_DINODE_FMT_LOCAL:
                atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
@@ -604,10 +600,11 @@ xfs_iformat_btree(
         * or the number of extents is greater than the number of
         * blocks.
         */
-       if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
-           || XFS_BMDR_SPACE_CALC(nrecs) >
-                       XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
-           || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
+       if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <=
+                       XFS_IFORK_MAXEXT(ip, whichfork) ||
+                    XFS_BMDR_SPACE_CALC(nrecs) >
+                       XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) ||
+                    XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
                xfs_warn(ip->i_mount, "corrupt inode %Lu (btree).",
                        (unsigned long long) ip->i_ino);
                XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
@@ -835,12 +832,6 @@ xfs_iread(
                 * with the uninitialized part of it.
                 */
                ip->i_d.di_mode = 0;
-               /*
-                * Initialize the per-fork minima and maxima for a new
-                * inode here.  xfs_iformat will do it for old inodes.
-                */
-               ip->i_df.if_ext_max =
-                       XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
        }
 
        /*
@@ -861,7 +852,6 @@ xfs_iread(
        }
 
        ip->i_delayed_blks = 0;
-       ip->i_size = ip->i_d.di_size;
 
        /*
         * Mark the buffer containing the inode as something to keep
@@ -1051,7 +1041,6 @@ xfs_ialloc(
        }
 
        ip->i_d.di_size = 0;
-       ip->i_size = 0;
        ip->i_d.di_nextents = 0;
        ASSERT(ip->i_d.di_nblocks == 0);
 
@@ -1165,52 +1154,6 @@ xfs_ialloc(
        return 0;
 }
 
-/*
- * Check to make sure that there are no blocks allocated to the
- * file beyond the size of the file.  We don't check this for
- * files with fixed size extents or real time extents, but we
- * at least do it for regular files.
- */
-#ifdef DEBUG
-STATIC void
-xfs_isize_check(
-       struct xfs_inode        *ip,
-       xfs_fsize_t             isize)
-{
-       struct xfs_mount        *mp = ip->i_mount;
-       xfs_fileoff_t           map_first;
-       int                     nimaps;
-       xfs_bmbt_irec_t         imaps[2];
-       int                     error;
-
-       if (!S_ISREG(ip->i_d.di_mode))
-               return;
-
-       if (XFS_IS_REALTIME_INODE(ip))
-               return;
-
-       if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
-               return;
-
-       nimaps = 2;
-       map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
-       /*
-        * The filesystem could be shutting down, so bmapi may return
-        * an error.
-        */
-       error = xfs_bmapi_read(ip, map_first,
-                        (XFS_B_TO_FSB(mp,
-                              (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) - map_first),
-                        imaps, &nimaps, XFS_BMAPI_ENTIRE);
-       if (error)
-               return;
-       ASSERT(nimaps == 1);
-       ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
-}
-#else  /* DEBUG */
-#define xfs_isize_check(ip, isize)
-#endif /* DEBUG */
-
 /*
  * Free up the underlying blocks past new_size.  The new size must be smaller
  * than the current size.  This routine can be used both for the attribute and
@@ -1252,12 +1195,14 @@ xfs_itruncate_extents(
        int                     done = 0;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
-       ASSERT(new_size <= ip->i_size);
+       ASSERT(new_size <= XFS_ISIZE(ip));
        ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
        ASSERT(ip->i_itemp != NULL);
        ASSERT(ip->i_itemp->ili_lock_flags == 0);
        ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
 
+       trace_xfs_itruncate_extents_start(ip, new_size);
+
        /*
         * Since it is possible for space to become allocated beyond
         * the end of the file (in a crash where the space is allocated
@@ -1325,6 +1270,14 @@ xfs_itruncate_extents(
                        goto out;
        }
 
+       /*
+        * Always re-log the inode so that our permanent transaction can keep
+        * on rolling it forward in the log.
+        */
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       trace_xfs_itruncate_extents_end(ip, new_size);
+
 out:
        *tpp = tp;
        return error;
@@ -1338,74 +1291,6 @@ xfs_itruncate_extents(
        goto out;
 }
 
-int
-xfs_itruncate_data(
-       struct xfs_trans        **tpp,
-       struct xfs_inode        *ip,
-       xfs_fsize_t             new_size)
-{
-       int                     error;
-
-       trace_xfs_itruncate_data_start(ip, new_size);
-
-       /*
-        * The first thing we do is set the size to new_size permanently on
-        * disk.  This way we don't have to worry about anyone ever being able
-        * to look at the data being freed even in the face of a crash.
-        * What we're getting around here is the case where we free a block, it
-        * is allocated to another file, it is written to, and then we crash.
-        * If the new data gets written to the file but the log buffers
-        * containing the free and reallocation don't, then we'd end up with
-        * garbage in the blocks being freed.  As long as we make the new_size
-        * permanent before actually freeing any blocks it doesn't matter if
-        * they get written to.
-        */
-       if (ip->i_d.di_nextents > 0) {
-               /*
-                * If we are not changing the file size then do not update
-                * the on-disk file size - we may be called from
-                * xfs_inactive_free_eofblocks().  If we update the on-disk
-                * file size and then the system crashes before the contents
-                * of the file are flushed to disk then the files may be
-                * full of holes (ie NULL files bug).
-                */
-               if (ip->i_size != new_size) {
-                       ip->i_d.di_size = new_size;
-                       ip->i_size = new_size;
-                       xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
-               }
-       }
-
-       error = xfs_itruncate_extents(tpp, ip, XFS_DATA_FORK, new_size);
-       if (error)
-               return error;
-
-       /*
-        * If we are not changing the file size then do not update the on-disk
-        * file size - we may be called from xfs_inactive_free_eofblocks().
-        * If we update the on-disk file size and then the system crashes
-        * before the contents of the file are flushed to disk then the files
-        * may be full of holes (ie NULL files bug).
-        */
-       xfs_isize_check(ip, new_size);
-       if (ip->i_size != new_size) {
-               ip->i_d.di_size = new_size;
-               ip->i_size = new_size;
-       }
-
-       ASSERT(new_size != 0 || ip->i_delayed_blks == 0);
-       ASSERT(new_size != 0 || ip->i_d.di_nextents == 0);
-
-       /*
-        * Always re-log the inode so that our permanent transaction can keep
-        * on rolling it forward in the log.
-        */
-       xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
-
-       trace_xfs_itruncate_data_end(ip, new_size);
-       return 0;
-}
-
 /*
  * This is called when the inode's link count goes to 0.
  * We place the on-disk inode on a list in the AGI.  It
@@ -1824,8 +1709,7 @@ xfs_ifree(
        ASSERT(ip->i_d.di_nlink == 0);
        ASSERT(ip->i_d.di_nextents == 0);
        ASSERT(ip->i_d.di_anextents == 0);
-       ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
-              (!S_ISREG(ip->i_d.di_mode)));
+       ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
        ASSERT(ip->i_d.di_nblocks == 0);
 
        /*
@@ -1844,8 +1728,6 @@ xfs_ifree(
        ip->i_d.di_flags = 0;
        ip->i_d.di_dmevmask = 0;
        ip->i_d.di_forkoff = 0;         /* mark the attr fork not in use */
-       ip->i_df.if_ext_max =
-               XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
        ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
        ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
        /*
@@ -2151,7 +2033,7 @@ xfs_idestroy_fork(
  * once someone is waiting for it to be unpinned.
  */
 static void
-xfs_iunpin_nowait(
+xfs_iunpin(
        struct xfs_inode        *ip)
 {
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
@@ -2163,14 +2045,29 @@ xfs_iunpin_nowait(
 
 }
 
+static void
+__xfs_iunpin_wait(
+       struct xfs_inode        *ip)
+{
+       wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
+       DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
+
+       xfs_iunpin(ip);
+
+       do {
+               prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+               if (xfs_ipincount(ip))
+                       io_schedule();
+       } while (xfs_ipincount(ip));
+       finish_wait(wq, &wait.wait);
+}
+
 void
 xfs_iunpin_wait(
        struct xfs_inode        *ip)
 {
-       if (xfs_ipincount(ip)) {
-               xfs_iunpin_nowait(ip);
-               wait_event(ip->i_ipin_wait, (xfs_ipincount(ip) == 0));
-       }
+       if (xfs_ipincount(ip))
+               __xfs_iunpin_wait(ip);
 }
 
 /*
@@ -2510,9 +2407,9 @@ xfs_iflush(
        XFS_STATS_INC(xs_iflush_count);
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
-       ASSERT(!completion_done(&ip->i_flush));
+       ASSERT(xfs_isiflocked(ip));
        ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
-              ip->i_d.di_nextents > ip->i_df.if_ext_max);
+              ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
 
        iip = ip->i_itemp;
        mp = ip->i_mount;
@@ -2529,7 +2426,7 @@ xfs_iflush(
         * out for us if they occur after the log force completes.
         */
        if (!(flags & SYNC_WAIT) && xfs_ipincount(ip)) {
-               xfs_iunpin_nowait(ip);
+               xfs_iunpin(ip);
                xfs_ifunlock(ip);
                return EAGAIN;
        }
@@ -2626,9 +2523,9 @@ xfs_iflush_int(
 #endif
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
-       ASSERT(!completion_done(&ip->i_flush));
+       ASSERT(xfs_isiflocked(ip));
        ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
-              ip->i_d.di_nextents > ip->i_df.if_ext_max);
+              ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
 
        iip = ip->i_itemp;
        mp = ip->i_mount;
index f0e6b151ba37e4d0c8cc92f12a84ac03848dd0a1..2f27b745408520b73bab9bd8a1a2ca4ed1f96ea0 100644 (file)
@@ -66,7 +66,6 @@ typedef struct xfs_ifork {
        struct xfs_btree_block  *if_broot;      /* file's incore btree root */
        short                   if_broot_bytes; /* bytes allocated for root */
        unsigned char           if_flags;       /* per-fork flags */
-       unsigned char           if_ext_max;     /* max # of extent records */
        union {
                xfs_bmbt_rec_host_t *if_extents;/* linear map file exts */
                xfs_ext_irec_t  *if_ext_irec;   /* irec map file exts */
@@ -206,12 +205,12 @@ typedef struct xfs_icdinode {
        ((w) == XFS_DATA_FORK ? \
                ((ip)->i_d.di_nextents = (n)) : \
                ((ip)->i_d.di_anextents = (n)))
-
+#define XFS_IFORK_MAXEXT(ip, w) \
+       (XFS_IFORK_SIZE(ip, w) / sizeof(xfs_bmbt_rec_t))
 
 
 #ifdef __KERNEL__
 
-struct bhv_desc;
 struct xfs_buf;
 struct xfs_bmap_free;
 struct xfs_bmbt_irec;
@@ -220,12 +219,6 @@ struct xfs_mount;
 struct xfs_trans;
 struct xfs_dquot;
 
-typedef struct dm_attrs_s {
-       __uint32_t      da_dmevmask;    /* DMIG event mask */
-       __uint16_t      da_dmstate;     /* DMIG state info */
-       __uint16_t      da_pad;         /* DMIG extra padding */
-} dm_attrs_t;
-
 typedef struct xfs_inode {
        /* Inode linking and identification information. */
        struct xfs_mount        *i_mount;       /* fs mount struct ptr */
@@ -244,27 +237,19 @@ typedef struct xfs_inode {
        struct xfs_inode_log_item *i_itemp;     /* logging information */
        mrlock_t                i_lock;         /* inode lock */
        mrlock_t                i_iolock;       /* inode IO lock */
-       struct completion       i_flush;        /* inode flush completion q */
        atomic_t                i_pincount;     /* inode pin count */
-       wait_queue_head_t       i_ipin_wait;    /* inode pinning wait queue */
        spinlock_t              i_flags_lock;   /* inode i_flags lock */
        /* Miscellaneous state. */
-       unsigned short          i_flags;        /* see defined flags below */
+       unsigned long           i_flags;        /* see defined flags below */
        unsigned char           i_update_core;  /* timestamps/size is dirty */
        unsigned int            i_delayed_blks; /* count of delay alloc blks */
 
        xfs_icdinode_t          i_d;            /* most of ondisk inode */
 
-       xfs_fsize_t             i_size;         /* in-memory size */
-       xfs_fsize_t             i_new_size;     /* size when write completes */
-
        /* VFS inode */
        struct inode            i_vnode;        /* embedded VFS inode */
 } xfs_inode_t;
 
-#define XFS_ISIZE(ip)  S_ISREG((ip)->i_d.di_mode) ? \
-                               (ip)->i_size : (ip)->i_d.di_size;
-
 /* Convert from vfs inode to xfs inode */
 static inline struct xfs_inode *XFS_I(struct inode *inode)
 {
@@ -277,6 +262,18 @@ static inline struct inode *VFS_I(struct xfs_inode *ip)
        return &ip->i_vnode;
 }
 
+/*
+ * For regular files we only update the on-disk filesize when actually
+ * writing data back to disk.  Until then only the copy in the VFS inode
+ * is uptodate.
+ */
+static inline xfs_fsize_t XFS_ISIZE(struct xfs_inode *ip)
+{
+       if (S_ISREG(ip->i_d.di_mode))
+               return i_size_read(VFS_I(ip));
+       return ip->i_d.di_size;
+}
+
 /*
  * i_flags helper functions
  */
@@ -331,6 +328,19 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags)
        return ret;
 }
 
+static inline int
+xfs_iflags_test_and_set(xfs_inode_t *ip, unsigned short flags)
+{
+       int ret;
+
+       spin_lock(&ip->i_flags_lock);
+       ret = ip->i_flags & flags;
+       if (!ret)
+               ip->i_flags |= flags;
+       spin_unlock(&ip->i_flags_lock);
+       return ret;
+}
+
 /*
  * Project quota id helpers (previously projid was 16bit only
  * and using two 16bit values to hold new 32bit projid was chosen
@@ -350,36 +360,20 @@ xfs_set_projid(struct xfs_inode *ip,
        ip->i_d.di_projid_lo = (__uint16_t) (projid & 0xffff);
 }
 
-/*
- * Manage the i_flush queue embedded in the inode.  This completion
- * queue synchronizes processes attempting to flush the in-core
- * inode back to disk.
- */
-static inline void xfs_iflock(xfs_inode_t *ip)
-{
-       wait_for_completion(&ip->i_flush);
-}
-
-static inline int xfs_iflock_nowait(xfs_inode_t *ip)
-{
-       return try_wait_for_completion(&ip->i_flush);
-}
-
-static inline void xfs_ifunlock(xfs_inode_t *ip)
-{
-       complete(&ip->i_flush);
-}
-
 /*
  * In-core inode flags.
  */
-#define XFS_IRECLAIM           0x0001  /* started reclaiming this inode */
-#define XFS_ISTALE             0x0002  /* inode has been staled */
-#define XFS_IRECLAIMABLE       0x0004  /* inode can be reclaimed */
-#define XFS_INEW               0x0008  /* inode has just been allocated */
-#define XFS_IFILESTREAM                0x0010  /* inode is in a filestream directory */
-#define XFS_ITRUNCATED         0x0020  /* truncated down so flush-on-close */
-#define XFS_IDIRTY_RELEASE     0x0040  /* dirty release already seen */
+#define XFS_IRECLAIM           (1 << 0) /* started reclaiming this inode */
+#define XFS_ISTALE             (1 << 1) /* inode has been staled */
+#define XFS_IRECLAIMABLE       (1 << 2) /* inode can be reclaimed */
+#define XFS_INEW               (1 << 3) /* inode has just been allocated */
+#define XFS_IFILESTREAM                (1 << 4) /* inode is in a filestream dir. */
+#define XFS_ITRUNCATED         (1 << 5) /* truncated down so flush-on-close */
+#define XFS_IDIRTY_RELEASE     (1 << 6) /* dirty release already seen */
+#define __XFS_IFLOCK_BIT       7        /* inode is being flushed right now */
+#define XFS_IFLOCK             (1 << __XFS_IFLOCK_BIT)
+#define __XFS_IPINNED_BIT      8        /* wakeup key for zero pin count */
+#define XFS_IPINNED            (1 << __XFS_IPINNED_BIT)
 
 /*
  * Per-lifetime flags need to be reset when re-using a reclaimable inode during
@@ -391,6 +385,34 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
         XFS_IDIRTY_RELEASE | XFS_ITRUNCATED | \
         XFS_IFILESTREAM);
 
+/*
+ * Synchronize processes attempting to flush the in-core inode back to disk.
+ */
+
+extern void __xfs_iflock(struct xfs_inode *ip);
+
+static inline int xfs_iflock_nowait(struct xfs_inode *ip)
+{
+       return !xfs_iflags_test_and_set(ip, XFS_IFLOCK);
+}
+
+static inline void xfs_iflock(struct xfs_inode *ip)
+{
+       if (!xfs_iflock_nowait(ip))
+               __xfs_iflock(ip);
+}
+
+static inline void xfs_ifunlock(struct xfs_inode *ip)
+{
+       xfs_iflags_clear(ip, XFS_IFLOCK);
+       wake_up_bit(&ip->i_flags, __XFS_IFLOCK_BIT);
+}
+
+static inline int xfs_isiflocked(struct xfs_inode *ip)
+{
+       return xfs_iflags_test(ip, XFS_IFLOCK);
+}
+
 /*
  * Flags for inode locking.
  * Bit ranges: 1<<1  - 1<<16-1 -- iolock/ilock modes (bitfield)
@@ -491,8 +513,6 @@ int         xfs_ifree(struct xfs_trans *, xfs_inode_t *,
                           struct xfs_bmap_free *);
 int            xfs_itruncate_extents(struct xfs_trans **, struct xfs_inode *,
                                      int, xfs_fsize_t);
-int            xfs_itruncate_data(struct xfs_trans **, struct xfs_inode *,
-                                  xfs_fsize_t);
 int            xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
 
 void           xfs_iext_realloc(xfs_inode_t *, int, int);
index cfd6c7f8cc3c09450e1ad6dfff372f12938d1d42..91d71dcd4852eed6339bd1ceb54a8dbdf04cd27a 100644 (file)
@@ -79,8 +79,6 @@ xfs_inode_item_size(
                break;
 
        case XFS_DINODE_FMT_BTREE:
-               ASSERT(ip->i_df.if_ext_max ==
-                      XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
                iip->ili_format.ilf_fields &=
                        ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT |
                          XFS_ILOG_DEV | XFS_ILOG_UUID);
@@ -557,7 +555,7 @@ xfs_inode_item_unpin(
        trace_xfs_inode_unpin(ip, _RET_IP_);
        ASSERT(atomic_read(&ip->i_pincount) > 0);
        if (atomic_dec_and_test(&ip->i_pincount))
-               wake_up(&ip->i_ipin_wait);
+               wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
 }
 
 /*
@@ -719,7 +717,7 @@ xfs_inode_item_pushbuf(
         * If a flush is not in progress anymore, chances are that the
         * inode was taken off the AIL. So, just get out.
         */
-       if (completion_done(&ip->i_flush) ||
+       if (!xfs_isiflocked(ip) ||
            !(lip->li_flags & XFS_LI_IN_AIL)) {
                xfs_iunlock(ip, XFS_ILOCK_SHARED);
                return true;
@@ -752,7 +750,7 @@ xfs_inode_item_push(
        struct xfs_inode        *ip = iip->ili_inode;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
-       ASSERT(!completion_done(&ip->i_flush));
+       ASSERT(xfs_isiflocked(ip));
 
        /*
         * Since we were able to lock the inode's flush lock and
index 9afa282aa937b473aad0d90fc6f2a0bfb659dd29..246c7d57c6f96c876778128e8d21c90fca692ce9 100644 (file)
@@ -57,26 +57,26 @@ xfs_iomap_eof_align_last_fsb(
        xfs_fileoff_t   *last_fsb)
 {
        xfs_fileoff_t   new_last_fsb = 0;
-       xfs_extlen_t    align;
+       xfs_extlen_t    align = 0;
        int             eof, error;
 
-       if (XFS_IS_REALTIME_INODE(ip))
-               ;
-       /*
-        * If mounted with the "-o swalloc" option, roundup the allocation
-        * request to a stripe width boundary if the file size is >=
-        * stripe width and we are allocating past the allocation eof.
-        */
-       else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) &&
-               (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_swidth)))
-               new_last_fsb = roundup_64(*last_fsb, mp->m_swidth);
-       /*
-        * Roundup the allocation request to a stripe unit (m_dalign) boundary
-        * if the file size is >= stripe unit size, and we are allocating past
-        * the allocation eof.
-        */
-       else if (mp->m_dalign && (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_dalign)))
-               new_last_fsb = roundup_64(*last_fsb, mp->m_dalign);
+       if (!XFS_IS_REALTIME_INODE(ip)) {
+               /*
+                * Round up the allocation request to a stripe unit
+                * (m_dalign) boundary if the file size is >= stripe unit
+                * size, and we are allocating past the allocation eof.
+                *
+                * If mounted with the "-o swalloc" option the alignment is
+                * increased from the strip unit size to the stripe width.
+                */
+               if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
+                       align = mp->m_swidth;
+               else if (mp->m_dalign)
+                       align = mp->m_dalign;
+
+               if (align && XFS_ISIZE(ip) >= XFS_FSB_TO_B(mp, align))
+                       new_last_fsb = roundup_64(*last_fsb, align);
+       }
 
        /*
         * Always round up the allocation request to an extent boundary
@@ -154,7 +154,7 @@ xfs_iomap_write_direct(
 
        offset_fsb = XFS_B_TO_FSBT(mp, offset);
        last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
-       if ((offset + count) > ip->i_size) {
+       if ((offset + count) > XFS_ISIZE(ip)) {
                error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
                if (error)
                        goto error_out;
@@ -211,7 +211,7 @@ xfs_iomap_write_direct(
        xfs_trans_ijoin(tp, ip, 0);
 
        bmapi_flag = 0;
-       if (offset < ip->i_size || extsz)
+       if (offset < XFS_ISIZE(ip) || extsz)
                bmapi_flag |= XFS_BMAPI_PREALLOC;
 
        /*
@@ -286,7 +286,7 @@ xfs_iomap_eof_want_preallocate(
        int             found_delalloc = 0;
 
        *prealloc = 0;
-       if ((offset + count) <= ip->i_size)
+       if (offset + count <= XFS_ISIZE(ip))
                return 0;
 
        /*
@@ -340,7 +340,7 @@ xfs_iomap_prealloc_size(
                 * if we pass in alloc_blocks = 0. Hence the "+ 1" to
                 * ensure we always pass in a non-zero value.
                 */
-               alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size) + 1;
+               alloc_blocks = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)) + 1;
                alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
                                        rounddown_pow_of_two(alloc_blocks));
 
@@ -564,7 +564,7 @@ xfs_iomap_write_allocate(
                         * back....
                         */
                        nimaps = 1;
-                       end_fsb = XFS_B_TO_FSB(mp, ip->i_size);
+                       end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
                        error = xfs_bmap_last_offset(NULL, ip, &last_block,
                                                        XFS_DATA_FORK);
                        if (error)
index f9babd17922377e422024ac07d9910cb78bc1708..ab302539e5b9603b8a67bb9f4399c03625fe1fd7 100644 (file)
@@ -750,6 +750,7 @@ xfs_setattr_size(
        struct xfs_mount        *mp = ip->i_mount;
        struct inode            *inode = VFS_I(ip);
        int                     mask = iattr->ia_valid;
+       xfs_off_t               oldsize, newsize;
        struct xfs_trans        *tp;
        int                     error;
        uint                    lock_flags;
@@ -777,11 +778,13 @@ xfs_setattr_size(
                lock_flags |= XFS_IOLOCK_EXCL;
        xfs_ilock(ip, lock_flags);
 
+       oldsize = inode->i_size;
+       newsize = iattr->ia_size;
+
        /*
         * Short circuit the truncate case for zero length files.
         */
-       if (iattr->ia_size == 0 &&
-           ip->i_size == 0 && ip->i_d.di_nextents == 0) {
+       if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) {
                if (!(mask & (ATTR_CTIME|ATTR_MTIME)))
                        goto out_unlock;
 
@@ -807,14 +810,14 @@ xfs_setattr_size(
         * the inode to the transaction, because the inode cannot be unlocked
         * once it is a part of the transaction.
         */
-       if (iattr->ia_size > ip->i_size) {
+       if (newsize > oldsize) {
                /*
                 * Do the first part of growing a file: zero any data in the
                 * last block that is beyond the old EOF.  We need to do this
                 * before the inode is joined to the transaction to modify
                 * i_size.
                 */
-               error = xfs_zero_eof(ip, iattr->ia_size, ip->i_size);
+               error = xfs_zero_eof(ip, newsize, oldsize);
                if (error)
                        goto out_unlock;
        }
@@ -833,8 +836,8 @@ xfs_setattr_size(
         * here and prevents waiting for other data not within the range we
         * care about here.
         */
-       if (ip->i_size != ip->i_d.di_size && iattr->ia_size > ip->i_d.di_size) {
-               error = xfs_flush_pages(ip, ip->i_d.di_size, iattr->ia_size, 0,
+       if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) {
+               error = xfs_flush_pages(ip, ip->i_d.di_size, newsize, 0,
                                        FI_NONE);
                if (error)
                        goto out_unlock;
@@ -845,8 +848,7 @@ xfs_setattr_size(
         */
        inode_dio_wait(inode);
 
-       error = -block_truncate_page(inode->i_mapping, iattr->ia_size,
-                                    xfs_get_blocks);
+       error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks);
        if (error)
                goto out_unlock;
 
@@ -857,7 +859,7 @@ xfs_setattr_size(
        if (error)
                goto out_trans_cancel;
 
-       truncate_setsize(inode, iattr->ia_size);
+       truncate_setsize(inode, newsize);
 
        commit_flags = XFS_TRANS_RELEASE_LOG_RES;
        lock_flags |= XFS_ILOCK_EXCL;
@@ -876,19 +878,29 @@ xfs_setattr_size(
         * these flags set.  For all other operations the VFS set these flags
         * explicitly if it wants a timestamp update.
         */
-       if (iattr->ia_size != ip->i_size &&
-           (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
+       if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
                iattr->ia_ctime = iattr->ia_mtime =
                        current_fs_time(inode->i_sb);
                mask |= ATTR_CTIME | ATTR_MTIME;
        }
 
-       if (iattr->ia_size > ip->i_size) {
-               ip->i_d.di_size = iattr->ia_size;
-               ip->i_size = iattr->ia_size;
-       } else if (iattr->ia_size <= ip->i_size ||
-                  (iattr->ia_size == 0 && ip->i_d.di_nextents)) {
-               error = xfs_itruncate_data(&tp, ip, iattr->ia_size);
+       /*
+        * The first thing we do is set the size to new_size permanently on
+        * disk.  This way we don't have to worry about anyone ever being able
+        * to look at the data being freed even in the face of a crash.
+        * What we're getting around here is the case where we free a block, it
+        * is allocated to another file, it is written to, and then we crash.
+        * If the new data gets written to the file but the log buffers
+        * containing the free and reallocation don't, then we'd end up with
+        * garbage in the blocks being freed.  As long as we make the new size
+        * permanent before actually freeing any blocks it doesn't matter if
+        * they get written to.
+        */
+       ip->i_d.di_size = newsize;
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       if (newsize <= oldsize) {
+               error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, newsize);
                if (error)
                        goto out_trans_abort;
 
index 5cc3dde1bc9039de237a102c466b1fb4aa249cf8..eafbcff81f3af43c9dae0a73175bc71abba4a7c5 100644 (file)
@@ -31,6 +31,7 @@
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
+#include "xfs_inode_item.h"
 #include "xfs_itable.h"
 #include "xfs_bmap.h"
 #include "xfs_rtalloc.h"
@@ -263,13 +264,18 @@ xfs_qm_scall_trunc_qfile(
        xfs_ilock(ip, XFS_ILOCK_EXCL);
        xfs_trans_ijoin(tp, ip, 0);
 
-       error = xfs_itruncate_data(&tp, ip, 0);
+       ip->i_d.di_size = 0;
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
        if (error) {
                xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
                                     XFS_TRANS_ABORT);
                goto out_unlock;
        }
 
+       ASSERT(ip->i_d.di_nextents == 0);
+
        xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
        error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
 
index 281961c1d81a73df18ab8dcf31c39e6be3d94588..ee5b695c99a700275683d26ac78acd6351c9cc1f 100644 (file)
@@ -828,14 +828,6 @@ xfs_fs_inode_init_once(
        /* xfs inode */
        atomic_set(&ip->i_pincount, 0);
        spin_lock_init(&ip->i_flags_lock);
-       init_waitqueue_head(&ip->i_ipin_wait);
-       /*
-        * Because we want to use a counting completion, complete
-        * the flush completion once to allow a single access to
-        * the flush completion without blocking.
-        */
-       init_completion(&ip->i_flush);
-       complete(&ip->i_flush);
 
        mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
                     "xfsino", ip->i_ino);
index 72c01a1c16e7d16ca0a49e284addf7d884dbfea6..40b75eecd2b4b376253e0e9408e42bc475e63f9b 100644 (file)
@@ -707,14 +707,13 @@ xfs_reclaim_inode_grab(
                return 1;
 
        /*
-        * do some unlocked checks first to avoid unnecessary lock traffic.
-        * The first is a flush lock check, the second is a already in reclaim
-        * check. Only do these checks if we are not going to block on locks.
+        * If we are asked for non-blocking operation, do unlocked checks to
+        * see if the inode already is being flushed or in reclaim to avoid
+        * lock traffic.
         */
        if ((flags & SYNC_TRYLOCK) &&
-           (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) {
+           __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
                return 1;
-       }
 
        /*
         * The radix tree lock here protects a thread in xfs_iget from racing
index a9d5b1e06efee95920e0bc0dd7b75c6f867b537b..6b6df5802e957009f8c3f657411bf6e6e89cfad6 100644 (file)
@@ -891,7 +891,6 @@ DECLARE_EVENT_CLASS(xfs_file_class,
                __field(dev_t, dev)
                __field(xfs_ino_t, ino)
                __field(xfs_fsize_t, size)
-               __field(xfs_fsize_t, new_size)
                __field(loff_t, offset)
                __field(size_t, count)
                __field(int, flags)
@@ -900,17 +899,15 @@ DECLARE_EVENT_CLASS(xfs_file_class,
                __entry->dev = VFS_I(ip)->i_sb->s_dev;
                __entry->ino = ip->i_ino;
                __entry->size = ip->i_d.di_size;
-               __entry->new_size = ip->i_new_size;
                __entry->offset = offset;
                __entry->count = count;
                __entry->flags = flags;
        ),
-       TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
+       TP_printk("dev %d:%d ino 0x%llx size 0x%llx "
                  "offset 0x%llx count 0x%zx ioflags %s",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->ino,
                  __entry->size,
-                 __entry->new_size,
                  __entry->offset,
                  __entry->count,
                  __print_flags(__entry->flags, "|", XFS_IO_FLAGS))
@@ -978,7 +975,6 @@ DECLARE_EVENT_CLASS(xfs_imap_class,
                __field(dev_t, dev)
                __field(xfs_ino_t, ino)
                __field(loff_t, size)
-               __field(loff_t, new_size)
                __field(loff_t, offset)
                __field(size_t, count)
                __field(int, type)
@@ -990,7 +986,6 @@ DECLARE_EVENT_CLASS(xfs_imap_class,
                __entry->dev = VFS_I(ip)->i_sb->s_dev;
                __entry->ino = ip->i_ino;
                __entry->size = ip->i_d.di_size;
-               __entry->new_size = ip->i_new_size;
                __entry->offset = offset;
                __entry->count = count;
                __entry->type = type;
@@ -998,13 +993,11 @@ DECLARE_EVENT_CLASS(xfs_imap_class,
                __entry->startblock = irec ? irec->br_startblock : 0;
                __entry->blockcount = irec ? irec->br_blockcount : 0;
        ),
-       TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
-                 "offset 0x%llx count %zd type %s "
-                 "startoff 0x%llx startblock %lld blockcount 0x%llx",
+       TP_printk("dev %d:%d ino 0x%llx size 0x%llx offset 0x%llx count %zd "
+                 "type %s startoff 0x%llx startblock %lld blockcount 0x%llx",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->ino,
                  __entry->size,
-                 __entry->new_size,
                  __entry->offset,
                  __entry->count,
                  __print_symbolic(__entry->type, XFS_IO_TYPES),
@@ -1031,26 +1024,23 @@ DECLARE_EVENT_CLASS(xfs_simple_io_class,
                __field(xfs_ino_t, ino)
                __field(loff_t, isize)
                __field(loff_t, disize)
-               __field(loff_t, new_size)
                __field(loff_t, offset)
                __field(size_t, count)
        ),
        TP_fast_assign(
                __entry->dev = VFS_I(ip)->i_sb->s_dev;
                __entry->ino = ip->i_ino;
-               __entry->isize = ip->i_size;
+               __entry->isize = VFS_I(ip)->i_size;
                __entry->disize = ip->i_d.di_size;
-               __entry->new_size = ip->i_new_size;
                __entry->offset = offset;
                __entry->count = count;
        ),
-       TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx new_size 0x%llx "
+       TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx "
                  "offset 0x%llx count %zd",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->ino,
                  __entry->isize,
                  __entry->disize,
-                 __entry->new_size,
                  __entry->offset,
                  __entry->count)
 );
@@ -1090,8 +1080,8 @@ DECLARE_EVENT_CLASS(xfs_itrunc_class,
 DEFINE_EVENT(xfs_itrunc_class, name, \
        TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
        TP_ARGS(ip, new_size))
-DEFINE_ITRUNC_EVENT(xfs_itruncate_data_start);
-DEFINE_ITRUNC_EVENT(xfs_itruncate_data_end);
+DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_start);
+DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_end);
 
 TRACE_EVENT(xfs_pagecache_inval,
        TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
@@ -1568,7 +1558,6 @@ DECLARE_EVENT_CLASS(xfs_swap_extent_class,
                __field(xfs_ino_t, ino)
                __field(int, format)
                __field(int, nex)
-               __field(int, max_nex)
                __field(int, broot_size)
                __field(int, fork_off)
        ),
@@ -1578,18 +1567,16 @@ DECLARE_EVENT_CLASS(xfs_swap_extent_class,
                __entry->ino = ip->i_ino;
                __entry->format = ip->i_d.di_format;
                __entry->nex = ip->i_d.di_nextents;
-               __entry->max_nex = ip->i_df.if_ext_max;
                __entry->broot_size = ip->i_df.if_broot_bytes;
                __entry->fork_off = XFS_IFORK_BOFF(ip);
        ),
        TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %d, "
-                 "Max in-fork extents %d, broot size %d, fork offset %d",
+                 "broot size %d, fork offset %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->ino,
                  __print_symbolic(__entry->which, XFS_SWAPEXT_INODES),
                  __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR),
                  __entry->nex,
-                 __entry->max_nex,
                  __entry->broot_size,
                  __entry->fork_off)
 )
index f2fea868d4db5da562804b2a0a9bd212608af058..0cf52da9d2468a547a614f55cf0191a396f82574 100644 (file)
@@ -175,7 +175,7 @@ xfs_free_eofblocks(
         * Figure out if there are any blocks beyond the end
         * of the file.  If not, then there is nothing to do.
         */
-       end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_size));
+       end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
        last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
        if (last_fsb <= end_fsb)
                return 0;
@@ -226,7 +226,14 @@ xfs_free_eofblocks(
                xfs_ilock(ip, XFS_ILOCK_EXCL);
                xfs_trans_ijoin(tp, ip, 0);
 
-               error = xfs_itruncate_data(&tp, ip, ip->i_size);
+               /*
+                * Do not update the on-disk file size.  If we update the
+                * on-disk file size and then the system crashes before the
+                * contents of the file are flushed to disk then the files
+                * may be full of holes (ie NULL files bug).
+                */
+               error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
+                                             XFS_ISIZE(ip));
                if (error) {
                        /*
                         * If we get an error at this point we simply don't
@@ -540,8 +547,8 @@ xfs_release(
                return 0;
 
        if ((S_ISREG(ip->i_d.di_mode) &&
-            ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
-              ip->i_delayed_blks > 0)) &&
+            (VFS_I(ip)->i_size > 0 ||
+             (VN_CACHED(VFS_I(ip)) > 0 || ip->i_delayed_blks > 0)) &&
             (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
            (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
 
@@ -618,7 +625,7 @@ xfs_inactive(
         * only one with a reference to the inode.
         */
        truncate = ((ip->i_d.di_nlink == 0) &&
-           ((ip->i_d.di_size != 0) || (ip->i_size != 0) ||
+           ((ip->i_d.di_size != 0) || XFS_ISIZE(ip) != 0 ||
             (ip->i_d.di_nextents > 0) || (ip->i_delayed_blks > 0)) &&
            S_ISREG(ip->i_d.di_mode));
 
@@ -632,12 +639,12 @@ xfs_inactive(
 
        if (ip->i_d.di_nlink != 0) {
                if ((S_ISREG(ip->i_d.di_mode) &&
-                     ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
-                       ip->i_delayed_blks > 0)) &&
-                     (ip->i_df.if_flags & XFS_IFEXTENTS) &&
-                    (!(ip->i_d.di_flags &
+                   (VFS_I(ip)->i_size > 0 ||
+                    (VN_CACHED(VFS_I(ip)) > 0 || ip->i_delayed_blks > 0)) &&
+                   (ip->i_df.if_flags & XFS_IFEXTENTS) &&
+                   (!(ip->i_d.di_flags &
                                (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
-                     (ip->i_delayed_blks != 0)))) {
+                    ip->i_delayed_blks != 0))) {
                        error = xfs_free_eofblocks(mp, ip, 0);
                        if (error)
                                return VN_INACTIVE_CACHE;
@@ -670,13 +677,18 @@ xfs_inactive(
                xfs_ilock(ip, XFS_ILOCK_EXCL);
                xfs_trans_ijoin(tp, ip, 0);
 
-               error = xfs_itruncate_data(&tp, ip, 0);
+               ip->i_d.di_size = 0;
+               xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+               error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
                if (error) {
                        xfs_trans_cancel(tp,
                                XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
                        xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
                        return VN_INACTIVE_CACHE;
                }
+
+               ASSERT(ip->i_d.di_nextents == 0);
        } else if (S_ISLNK(ip->i_d.di_mode)) {
 
                /*
@@ -1961,11 +1973,11 @@ xfs_zero_remaining_bytes(
         * since nothing can read beyond eof.  The space will
         * be zeroed when the file is extended anyway.
         */
-       if (startoff >= ip->i_size)
+       if (startoff >= XFS_ISIZE(ip))
                return 0;
 
-       if (endoff > ip->i_size)
-               endoff = ip->i_size;
+       if (endoff > XFS_ISIZE(ip))
+               endoff = XFS_ISIZE(ip);
 
        bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
                                        mp->m_rtdev_targp : mp->m_ddev_targp,
@@ -2260,7 +2272,7 @@ xfs_change_file_space(
                bf->l_start += offset;
                break;
        case 2: /*SEEK_END*/
-               bf->l_start += ip->i_size;
+               bf->l_start += XFS_ISIZE(ip);
                break;
        default:
                return XFS_ERROR(EINVAL);
@@ -2277,7 +2289,7 @@ xfs_change_file_space(
        bf->l_whence = 0;
 
        startoffset = bf->l_start;
-       fsize = ip->i_size;
+       fsize = XFS_ISIZE(ip);
 
        /*
         * XFS_IOC_RESVSP and XFS_IOC_UNRESVSP will reserve or unreserve
index 9eabffbc4e50c4b2ae7a1049970d7f7bc39f12ed..033f6aa670de5086ba64dcde7ee6ab7c80e269a7 100644 (file)
@@ -134,7 +134,7 @@ struct pl08x_txd {
        struct dma_async_tx_descriptor tx;
        struct list_head node;
        struct list_head dsg_list;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        dma_addr_t llis_bus;
        struct pl08x_lli *llis_va;
        /* Default cctl value for LLIs */
@@ -197,7 +197,7 @@ struct pl08x_dma_chan {
        dma_addr_t dst_addr;
        u32 src_cctl;
        u32 dst_cctl;
-       enum dma_data_direction runtime_direction;
+       enum dma_transfer_direction runtime_direction;
        dma_cookie_t lc;
        struct list_head pend_list;
        struct pl08x_txd *at;
index 426ab9f4dd853b18d3e8df1b6a3fd2831fec0a57..9ff7a2c48b508103576d175d88fed1c5b3593592 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <linux/types.h>
 #include <linux/elf-em.h>
+#include <linux/ptrace.h>
 
 /* The netlink messages for the audit system is divided into blocks:
  * 1000 - 1099 are for commanding the audit system
  * AUDIT_UNUSED_BITS is updated if need be. */
 #define AUDIT_UNUSED_BITS      0x07FFFC00
 
+/* AUDIT_FIELD_COMPARE rule list */
+#define AUDIT_COMPARE_UID_TO_OBJ_UID   1
+#define AUDIT_COMPARE_GID_TO_OBJ_GID   2
+#define AUDIT_COMPARE_EUID_TO_OBJ_UID  3
+#define AUDIT_COMPARE_EGID_TO_OBJ_GID  4
+#define AUDIT_COMPARE_AUID_TO_OBJ_UID  5
+#define AUDIT_COMPARE_SUID_TO_OBJ_UID  6
+#define AUDIT_COMPARE_SGID_TO_OBJ_GID  7
+#define AUDIT_COMPARE_FSUID_TO_OBJ_UID 8
+#define AUDIT_COMPARE_FSGID_TO_OBJ_GID 9
+
+#define AUDIT_COMPARE_UID_TO_AUID      10
+#define AUDIT_COMPARE_UID_TO_EUID      11
+#define AUDIT_COMPARE_UID_TO_FSUID     12
+#define AUDIT_COMPARE_UID_TO_SUID      13
+
+#define AUDIT_COMPARE_AUID_TO_FSUID    14
+#define AUDIT_COMPARE_AUID_TO_SUID     15
+#define AUDIT_COMPARE_AUID_TO_EUID     16
+
+#define AUDIT_COMPARE_EUID_TO_SUID     17
+#define AUDIT_COMPARE_EUID_TO_FSUID    18
+
+#define AUDIT_COMPARE_SUID_TO_FSUID    19
+
+#define AUDIT_COMPARE_GID_TO_EGID      20
+#define AUDIT_COMPARE_GID_TO_FSGID     21
+#define AUDIT_COMPARE_GID_TO_SGID      22
+
+#define AUDIT_COMPARE_EGID_TO_FSGID    23
+#define AUDIT_COMPARE_EGID_TO_SGID     24
+#define AUDIT_COMPARE_SGID_TO_FSGID    25
+
+#define AUDIT_MAX_FIELD_COMPARE                AUDIT_COMPARE_SGID_TO_FSGID
 
 /* Rule fields */
                                /* These are useful when checking the
 #define AUDIT_PERM     106
 #define AUDIT_DIR      107
 #define AUDIT_FILETYPE 108
+#define AUDIT_OBJ_UID  109
+#define AUDIT_OBJ_GID  110
+#define AUDIT_FIELD_COMPARE    111
 
 #define AUDIT_ARG0      200
 #define AUDIT_ARG1      (AUDIT_ARG0+1)
@@ -408,28 +446,24 @@ struct audit_field {
        void                            *lsm_rule;
 };
 
-#define AUDITSC_INVALID 0
-#define AUDITSC_SUCCESS 1
-#define AUDITSC_FAILURE 2
-#define AUDITSC_RESULT(x) ( ((long)(x))<0?AUDITSC_FAILURE:AUDITSC_SUCCESS )
 extern int __init audit_register_class(int class, unsigned *list);
 extern int audit_classify_syscall(int abi, unsigned syscall);
 extern int audit_classify_arch(int arch);
 #ifdef CONFIG_AUDITSYSCALL
 /* These are defined in auditsc.c */
                                /* Public API */
-extern void audit_finish_fork(struct task_struct *child);
 extern int  audit_alloc(struct task_struct *task);
-extern void audit_free(struct task_struct *task);
-extern void audit_syscall_entry(int arch,
-                               int major, unsigned long a0, unsigned long a1,
-                               unsigned long a2, unsigned long a3);
-extern void audit_syscall_exit(int failed, long return_code);
+extern void __audit_free(struct task_struct *task);
+extern void __audit_syscall_entry(int arch,
+                                 int major, unsigned long a0, unsigned long a1,
+                                 unsigned long a2, unsigned long a3);
+extern void __audit_syscall_exit(int ret_success, long ret_value);
 extern void __audit_getname(const char *name);
 extern void audit_putname(const char *name);
 extern void __audit_inode(const char *name, const struct dentry *dentry);
 extern void __audit_inode_child(const struct dentry *dentry,
                                const struct inode *parent);
+extern void __audit_seccomp(unsigned long syscall);
 extern void __audit_ptrace(struct task_struct *t);
 
 static inline int audit_dummy_context(void)
@@ -437,6 +471,27 @@ static inline int audit_dummy_context(void)
        void *p = current->audit_context;
        return !p || *(int *)p;
 }
+static inline void audit_free(struct task_struct *task)
+{
+       if (unlikely(task->audit_context))
+               __audit_free(task);
+}
+static inline void audit_syscall_entry(int arch, int major, unsigned long a0,
+                                      unsigned long a1, unsigned long a2,
+                                      unsigned long a3)
+{
+       if (unlikely(!audit_dummy_context()))
+               __audit_syscall_entry(arch, major, a0, a1, a2, a3);
+}
+static inline void audit_syscall_exit(void *pt_regs)
+{
+       if (unlikely(current->audit_context)) {
+               int success = is_syscall_success(pt_regs);
+               int return_code = regs_return_value(pt_regs);
+
+               __audit_syscall_exit(success, return_code);
+       }
+}
 static inline void audit_getname(const char *name)
 {
        if (unlikely(!audit_dummy_context()))
@@ -453,6 +508,12 @@ static inline void audit_inode_child(const struct dentry *dentry,
 }
 void audit_core_dumps(long signr);
 
+static inline void audit_seccomp(unsigned long syscall)
+{
+       if (unlikely(!audit_dummy_context()))
+               __audit_seccomp(syscall);
+}
+
 static inline void audit_ptrace(struct task_struct *t)
 {
        if (unlikely(!audit_dummy_context()))
@@ -463,17 +524,16 @@ static inline void audit_ptrace(struct task_struct *t)
 extern unsigned int audit_serial(void);
 extern int auditsc_get_stamp(struct audit_context *ctx,
                              struct timespec *t, unsigned int *serial);
-extern int  audit_set_loginuid(struct task_struct *task, uid_t loginuid);
+extern int  audit_set_loginuid(uid_t loginuid);
 #define audit_get_loginuid(t) ((t)->loginuid)
 #define audit_get_sessionid(t) ((t)->sessionid)
 extern void audit_log_task_context(struct audit_buffer *ab);
 extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
 extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
-extern int audit_bprm(struct linux_binprm *bprm);
-extern void audit_socketcall(int nargs, unsigned long *args);
-extern int audit_sockaddr(int len, void *addr);
+extern int __audit_bprm(struct linux_binprm *bprm);
+extern void __audit_socketcall(int nargs, unsigned long *args);
+extern int __audit_sockaddr(int len, void *addr);
 extern void __audit_fd_pair(int fd1, int fd2);
-extern int audit_set_macxattr(const char *name);
 extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr);
 extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout);
 extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification);
@@ -499,6 +559,23 @@ static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid
        if (unlikely(!audit_dummy_context()))
                __audit_ipc_set_perm(qbytes, uid, gid, mode);
 }
+static inline int audit_bprm(struct linux_binprm *bprm)
+{
+       if (unlikely(!audit_dummy_context()))
+               return __audit_bprm(bprm);
+       return 0;
+}
+static inline void audit_socketcall(int nargs, unsigned long *args)
+{
+       if (unlikely(!audit_dummy_context()))
+               __audit_socketcall(nargs, args);
+}
+static inline int audit_sockaddr(int len, void *addr)
+{
+       if (unlikely(!audit_dummy_context()))
+               return __audit_sockaddr(len, addr);
+       return 0;
+}
 static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
 {
        if (unlikely(!audit_dummy_context()))
@@ -544,12 +621,11 @@ static inline void audit_mmap_fd(int fd, int flags)
 
 extern int audit_n_rules;
 extern int audit_signals;
-#else
-#define audit_finish_fork(t)
+#else /* CONFIG_AUDITSYSCALL */
 #define audit_alloc(t) ({ 0; })
 #define audit_free(t) do { ; } while (0)
 #define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0)
-#define audit_syscall_exit(f,r) do { ; } while (0)
+#define audit_syscall_exit(r) do { ; } while (0)
 #define audit_dummy_context() 1
 #define audit_getname(n) do { ; } while (0)
 #define audit_putname(n) do { ; } while (0)
@@ -558,6 +634,7 @@ extern int audit_signals;
 #define audit_inode(n,d) do { (void)(d); } while (0)
 #define audit_inode_child(i,p) do { ; } while (0)
 #define audit_core_dumps(i) do { ; } while (0)
+#define audit_seccomp(i) do { ; } while (0)
 #define auditsc_get_stamp(c,t,s) (0)
 #define audit_get_loginuid(t) (-1)
 #define audit_get_sessionid(t) (-1)
@@ -568,7 +645,6 @@ extern int audit_signals;
 #define audit_socketcall(n,a) ((void)0)
 #define audit_fd_pair(n,a) ((void)0)
 #define audit_sockaddr(len, addr) ({ 0; })
-#define audit_set_macxattr(n) do { ; } while (0)
 #define audit_mq_open(o,m,a) ((void)0)
 #define audit_mq_sendrecv(d,l,p,t) ((void)0)
 #define audit_mq_notify(d,n) ((void)0)
@@ -579,7 +655,7 @@ extern int audit_signals;
 #define audit_ptrace(t) ((void)0)
 #define audit_n_rules 0
 #define audit_signals 0
-#endif
+#endif /* CONFIG_AUDITSYSCALL */
 
 #ifdef CONFIG_AUDIT
 /* These are defined in audit.c */
index f4b8346b1a331f90fc9f29431b84b9176e4e6acb..83c209f39493adf3ef4ae659c2cc46b3770c2948 100644 (file)
@@ -162,7 +162,7 @@ struct bcma_driver {
 
        int (*probe)(struct bcma_device *dev);
        void (*remove)(struct bcma_device *dev);
-       int (*suspend)(struct bcma_device *dev, pm_message_t state);
+       int (*suspend)(struct bcma_device *dev);
        int (*resume)(struct bcma_device *dev);
        void (*shutdown)(struct bcma_device *dev);
 
index efae755017d7de93d4bc40b28c29488aa959bf46..b01558b15814f6d94be21272d385df3a1c4d23b5 100644 (file)
@@ -46,7 +46,7 @@ struct signature_hdr {
        char            mpi[0];
 } __packed;
 
-#if defined(CONFIG_DIGSIG) || defined(CONFIG_DIGSIG_MODULE)
+#if defined(CONFIG_SIGNATURE) || defined(CONFIG_SIGNATURE_MODULE)
 
 int digsig_verify(struct key *keyring, const char *sig, int siglen,
                                        const char *digest, int digestlen);
@@ -59,6 +59,6 @@ static inline int digsig_verify(struct key *keyring, const char *sig,
        return -EOPNOTSUPP;
 }
 
-#endif /* CONFIG_DIGSIG */
+#endif /* CONFIG_SIGNATURE */
 
 #endif /* _DIGSIG_H */
index 75f53f874b24a0c0abb790f501f2f60ace48e17e..679b349d9b66695f65bb3597b2a530e3e9709600 100644 (file)
@@ -23,7 +23,6 @@
 
 #include <linux/device.h>
 #include <linux/uio.h>
-#include <linux/dma-direction.h>
 #include <linux/scatterlist.h>
 #include <linux/bitmap.h>
 #include <asm/page.h>
@@ -72,11 +71,93 @@ enum dma_transaction_type {
        DMA_ASYNC_TX,
        DMA_SLAVE,
        DMA_CYCLIC,
+       DMA_INTERLEAVE,
+/* last transaction type for creation of the capabilities mask */
+       DMA_TX_TYPE_END,
 };
 
-/* last transaction type for creation of the capabilities mask */
-#define DMA_TX_TYPE_END (DMA_CYCLIC + 1)
+/**
+ * enum dma_transfer_direction - dma transfer mode and direction indicator
+ * @DMA_MEM_TO_MEM: Async/Memcpy mode
+ * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
+ * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
+ * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
+ */
+enum dma_transfer_direction {
+       DMA_MEM_TO_MEM,
+       DMA_MEM_TO_DEV,
+       DMA_DEV_TO_MEM,
+       DMA_DEV_TO_DEV,
+       DMA_TRANS_NONE,
+};
+
+/**
+ * Interleaved Transfer Request
+ * ----------------------------
+ * A chunk is collection of contiguous bytes to be transfered.
+ * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
+ * ICGs may or maynot change between chunks.
+ * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
+ *  that when repeated an integral number of times, specifies the transfer.
+ * A transfer template is specification of a Frame, the number of times
+ *  it is to be repeated and other per-transfer attributes.
+ *
+ * Practically, a client driver would have ready a template for each
+ *  type of transfer it is going to need during its lifetime and
+ *  set only 'src_start' and 'dst_start' before submitting the requests.
+ *
+ *
+ *  |      Frame-1        |       Frame-2       | ~ |       Frame-'numf'  |
+ *  |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
+ *
+ *    ==  Chunk size
+ *    ... ICG
+ */
+
+/**
+ * struct data_chunk - Element of scatter-gather list that makes a frame.
+ * @size: Number of bytes to read from source.
+ *       size_dst := fn(op, size_src), so doesn't mean much for destination.
+ * @icg: Number of bytes to jump after last src/dst address of this
+ *      chunk and before first src/dst address for next chunk.
+ *      Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
+ *      Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
+ */
+struct data_chunk {
+       size_t size;
+       size_t icg;
+};
 
+/**
+ * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
+ *      and attributes.
+ * @src_start: Bus address of source for the first chunk.
+ * @dst_start: Bus address of destination for the first chunk.
+ * @dir: Specifies the type of Source and Destination.
+ * @src_inc: If the source address increments after reading from it.
+ * @dst_inc: If the destination address increments after writing to it.
+ * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
+ *             Otherwise, source is read contiguously (icg ignored).
+ *             Ignored if src_inc is false.
+ * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
+ *             Otherwise, destination is filled contiguously (icg ignored).
+ *             Ignored if dst_inc is false.
+ * @numf: Number of frames in this template.
+ * @frame_size: Number of chunks in a frame i.e, size of sgl[].
+ * @sgl: Array of {chunk,icg} pairs that make up a frame.
+ */
+struct dma_interleaved_template {
+       dma_addr_t src_start;
+       dma_addr_t dst_start;
+       enum dma_transfer_direction dir;
+       bool src_inc;
+       bool dst_inc;
+       bool src_sgl;
+       bool dst_sgl;
+       size_t numf;
+       size_t frame_size;
+       struct data_chunk sgl[0];
+};
 
 /**
  * enum dma_ctrl_flags - DMA flags to augment operation preparation,
@@ -269,7 +350,7 @@ enum dma_slave_buswidth {
  * struct, if applicable.
  */
 struct dma_slave_config {
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        dma_addr_t src_addr;
        dma_addr_t dst_addr;
        enum dma_slave_buswidth src_addr_width;
@@ -433,6 +514,7 @@ struct dma_tx_state {
  * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
  *     The function takes a buffer of size buf_len. The callback function will
  *     be called after period_len bytes have been transferred.
+ * @device_prep_interleaved_dma: Transfer expression in a generic way.
  * @device_control: manipulate all pending operations on a channel, returns
  *     zero or error code
  * @device_tx_status: poll for transaction completion, the optional
@@ -492,11 +574,14 @@ struct dma_device {
 
        struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
                struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags);
        struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
                struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
-               size_t period_len, enum dma_data_direction direction);
+               size_t period_len, enum dma_transfer_direction direction);
+       struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
+               struct dma_chan *chan, struct dma_interleaved_template *xt,
+               unsigned long flags);
        int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                unsigned long arg);
 
@@ -522,7 +607,7 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
 
 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
        struct dma_chan *chan, void *buf, size_t len,
-       enum dma_data_direction dir, unsigned long flags)
+       enum dma_transfer_direction dir, unsigned long flags)
 {
        struct scatterlist sg;
        sg_init_one(&sg, buf, len);
index 4bfe0a2f7d50cc218bce24040154286a582aaf94..f2c64f92c4a006394e21e022bcf6b5c03590c930 100644 (file)
@@ -127,7 +127,7 @@ struct dw_cyclic_desc {
 
 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
                dma_addr_t buf_addr, size_t buf_len, size_t period_len,
-               enum dma_data_direction direction);
+               enum dma_transfer_direction direction);
 void dw_dma_cyclic_free(struct dma_chan *chan);
 int dw_dma_cyclic_start(struct dma_chan *chan);
 void dw_dma_cyclic_stop(struct dma_chan *chan);
index 183a6af7715d341bb5ffa9c8ebb3428d44f93cc8..bfc014c57351c5bf1d96e850691b1aad5ad4b284 100644 (file)
@@ -293,6 +293,9 @@ static inline bool key_is_instantiated(const struct key *key)
        (rcu_dereference_protected((KEY)->payload.rcudata,              \
                                   rwsem_is_locked(&((struct key *)(KEY))->sem)))
 
+#define rcu_assign_keypointer(KEY, PAYLOAD)                            \
+       (rcu_assign_pointer((KEY)->payload.rcudata, PAYLOAD))
+
 #ifdef CONFIG_SYSCTL
 extern ctl_table key_sysctls[];
 #endif
index abc0120b09b772ff90fe539016295a00dfb08107..9c07dcebded747493041c8e6a5c18d68bc43fabf 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/bug.h>
 #include <linux/atomic.h>
+#include <linux/kernel.h>
 
 struct kref {
        atomic_t refcount;
index 32085249e9cbf54d0d65ebc63e0e08cb8b6a132f..0549d2115507124405b06f5ea96f072880a3e475 100644 (file)
@@ -42,6 +42,7 @@
 #define AUTOFS_MINOR           235
 #define MAPPER_CTRL_MINOR      236
 #define LOOP_CTRL_MINOR                237
+#define VHOST_NET_MINOR                238
 #define MISC_DYNAMIC_MINOR     255
 
 struct device;
diff --git a/include/linux/mtd/gpmi-nand.h b/include/linux/mtd/gpmi-nand.h
new file mode 100644 (file)
index 0000000..69b6dbf
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef __MACH_MXS_GPMI_NAND_H__
+#define __MACH_MXS_GPMI_NAND_H__
+
+/* The size of the resources is fixed. */
+#define GPMI_NAND_RES_SIZE     6
+
+/* Resource names for the GPMI NAND driver. */
+#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "GPMI NAND GPMI Registers"
+#define GPMI_NAND_GPMI_INTERRUPT_RES_NAME  "GPMI NAND GPMI Interrupt"
+#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "GPMI NAND BCH Registers"
+#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "GPMI NAND BCH Interrupt"
+#define GPMI_NAND_DMA_CHANNELS_RES_NAME    "GPMI NAND DMA Channels"
+#define GPMI_NAND_DMA_INTERRUPT_RES_NAME   "GPMI NAND DMA Interrupt"
+
+/**
+ * struct gpmi_nand_platform_data - GPMI NAND driver platform data.
+ *
+ * This structure communicates platform-specific information to the GPMI NAND
+ * driver that can't be expressed as resources.
+ *
+ * @platform_init:           A pointer to a function the driver will call to
+ *                           initialize the platform (e.g., set up the pin mux).
+ * @min_prop_delay_in_ns:    Minimum propagation delay of GPMI signals to and
+ *                           from the NAND Flash device, in nanoseconds.
+ * @max_prop_delay_in_ns:    Maximum propagation delay of GPMI signals to and
+ *                           from the NAND Flash device, in nanoseconds.
+ * @max_chip_count:          The maximum number of chips for which the driver
+ *                           should configure the hardware. This value most
+ *                           likely reflects the number of pins that are
+ *                           connected to a NAND Flash device. If this is
+ *                           greater than the SoC hardware can support, the
+ *                           driver will print a message and fail to initialize.
+ * @partitions:              An optional pointer to an array of partition
+ *                           descriptions.
+ * @partition_count:         The number of elements in the partitions array.
+ */
+struct gpmi_nand_platform_data {
+       /* SoC hardware information. */
+       int             (*platform_init)(void);
+
+       /* NAND Flash information. */
+       unsigned int    min_prop_delay_in_ns;
+       unsigned int    max_prop_delay_in_ns;
+       unsigned int    max_chip_count;
+
+       /* Medium information. */
+       struct          mtd_partition *partitions;
+       unsigned        partition_count;
+};
+#endif
index 9e3a2838291bfe6aee8f6b3e0d88b43ace455abf..0d3dd66322ecbb24529303f6634f36e5ce6f390d 100644 (file)
@@ -83,10 +83,6 @@ enum ip_conntrack_status {
        /* Conntrack is a fake untracked entry */
        IPS_UNTRACKED_BIT = 12,
        IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
-
-       /* Conntrack has a userspace helper. */
-       IPS_USERSPACE_HELPER_BIT = 13,
-       IPS_USERSPACE_HELPER = (1 << IPS_USERSPACE_HELPER_BIT),
 };
 
 /* Connection tracking event types */
index 6390f0992f36f0723393d282c6d39d3f68abb12e..b56e76811c04380e9779dbe82c2cfa4a5b0c6abd 100644 (file)
@@ -3,8 +3,7 @@
 
 #include <linux/types.h>
 
-#define XT_CT_NOTRACK          0x1
-#define XT_CT_USERSPACE_HELPER 0x2
+#define XT_CT_NOTRACK  0x1
 
 struct xt_ct_target_info {
        __u16 flags;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
new file mode 100644 (file)
index 0000000..9490a00
--- /dev/null
@@ -0,0 +1,434 @@
+/*
+ * Definitions for the NVM Express interface
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _LINUX_NVME_H
+#define _LINUX_NVME_H
+
+#include <linux/types.h>
+
+struct nvme_bar {
+       __u64                   cap;    /* Controller Capabilities */
+       __u32                   vs;     /* Version */
+       __u32                   intms;  /* Interrupt Mask Set */
+       __u32                   intmc;  /* Interrupt Mask Clear */
+       __u32                   cc;     /* Controller Configuration */
+       __u32                   rsvd1;  /* Reserved */
+       __u32                   csts;   /* Controller Status */
+       __u32                   rsvd2;  /* Reserved */
+       __u32                   aqa;    /* Admin Queue Attributes */
+       __u64                   asq;    /* Admin SQ Base Address */
+       __u64                   acq;    /* Admin CQ Base Address */
+};
+
+#define NVME_CAP_TIMEOUT(cap)  (((cap) >> 24) & 0xff)
+#define NVME_CAP_STRIDE(cap)   (((cap) >> 32) & 0xf)
+
+enum {
+       NVME_CC_ENABLE          = 1 << 0,
+       NVME_CC_CSS_NVM         = 0 << 4,
+       NVME_CC_MPS_SHIFT       = 7,
+       NVME_CC_ARB_RR          = 0 << 11,
+       NVME_CC_ARB_WRRU        = 1 << 11,
+       NVME_CC_ARB_VS          = 7 << 11,
+       NVME_CC_SHN_NONE        = 0 << 14,
+       NVME_CC_SHN_NORMAL      = 1 << 14,
+       NVME_CC_SHN_ABRUPT      = 2 << 14,
+       NVME_CC_IOSQES          = 6 << 16,
+       NVME_CC_IOCQES          = 4 << 20,
+       NVME_CSTS_RDY           = 1 << 0,
+       NVME_CSTS_CFS           = 1 << 1,
+       NVME_CSTS_SHST_NORMAL   = 0 << 2,
+       NVME_CSTS_SHST_OCCUR    = 1 << 2,
+       NVME_CSTS_SHST_CMPLT    = 2 << 2,
+};
+
+struct nvme_id_power_state {
+       __le16                  max_power;      /* centiwatts */
+       __u16                   rsvd2;
+       __le32                  entry_lat;      /* microseconds */
+       __le32                  exit_lat;       /* microseconds */
+       __u8                    read_tput;
+       __u8                    read_lat;
+       __u8                    write_tput;
+       __u8                    write_lat;
+       __u8                    rsvd16[16];
+};
+
+#define NVME_VS(major, minor)  (major << 16 | minor)
+
+struct nvme_id_ctrl {
+       __le16                  vid;
+       __le16                  ssvid;
+       char                    sn[20];
+       char                    mn[40];
+       char                    fr[8];
+       __u8                    rab;
+       __u8                    ieee[3];
+       __u8                    mic;
+       __u8                    mdts;
+       __u8                    rsvd78[178];
+       __le16                  oacs;
+       __u8                    acl;
+       __u8                    aerl;
+       __u8                    frmw;
+       __u8                    lpa;
+       __u8                    elpe;
+       __u8                    npss;
+       __u8                    rsvd264[248];
+       __u8                    sqes;
+       __u8                    cqes;
+       __u8                    rsvd514[2];
+       __le32                  nn;
+       __le16                  oncs;
+       __le16                  fuses;
+       __u8                    fna;
+       __u8                    vwc;
+       __le16                  awun;
+       __le16                  awupf;
+       __u8                    rsvd530[1518];
+       struct nvme_id_power_state      psd[32];
+       __u8                    vs[1024];
+};
+
+struct nvme_lbaf {
+       __le16                  ms;
+       __u8                    ds;
+       __u8                    rp;
+};
+
+struct nvme_id_ns {
+       __le64                  nsze;
+       __le64                  ncap;
+       __le64                  nuse;
+       __u8                    nsfeat;
+       __u8                    nlbaf;
+       __u8                    flbas;
+       __u8                    mc;
+       __u8                    dpc;
+       __u8                    dps;
+       __u8                    rsvd30[98];
+       struct nvme_lbaf        lbaf[16];
+       __u8                    rsvd192[192];
+       __u8                    vs[3712];
+};
+
+enum {
+       NVME_NS_FEAT_THIN       = 1 << 0,
+       NVME_LBAF_RP_BEST       = 0,
+       NVME_LBAF_RP_BETTER     = 1,
+       NVME_LBAF_RP_GOOD       = 2,
+       NVME_LBAF_RP_DEGRADED   = 3,
+};
+
+struct nvme_lba_range_type {
+       __u8                    type;
+       __u8                    attributes;
+       __u8                    rsvd2[14];
+       __u64                   slba;
+       __u64                   nlb;
+       __u8                    guid[16];
+       __u8                    rsvd48[16];
+};
+
+enum {
+       NVME_LBART_TYPE_FS      = 0x01,
+       NVME_LBART_TYPE_RAID    = 0x02,
+       NVME_LBART_TYPE_CACHE   = 0x03,
+       NVME_LBART_TYPE_SWAP    = 0x04,
+
+       NVME_LBART_ATTRIB_TEMP  = 1 << 0,
+       NVME_LBART_ATTRIB_HIDE  = 1 << 1,
+};
+
+/* I/O commands */
+
+enum nvme_opcode {
+       nvme_cmd_flush          = 0x00,
+       nvme_cmd_write          = 0x01,
+       nvme_cmd_read           = 0x02,
+       nvme_cmd_write_uncor    = 0x04,
+       nvme_cmd_compare        = 0x05,
+       nvme_cmd_dsm            = 0x09,
+};
+
+struct nvme_common_command {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u32                   cdw2[2];
+       __le64                  metadata;
+       __le64                  prp1;
+       __le64                  prp2;
+       __u32                   cdw10[6];
+};
+
+struct nvme_rw_command {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u64                   rsvd2;
+       __le64                  metadata;
+       __le64                  prp1;
+       __le64                  prp2;
+       __le64                  slba;
+       __le16                  length;
+       __le16                  control;
+       __le32                  dsmgmt;
+       __le32                  reftag;
+       __le16                  apptag;
+       __le16                  appmask;
+};
+
+enum {
+       NVME_RW_LR                      = 1 << 15,
+       NVME_RW_FUA                     = 1 << 14,
+       NVME_RW_DSM_FREQ_UNSPEC         = 0,
+       NVME_RW_DSM_FREQ_TYPICAL        = 1,
+       NVME_RW_DSM_FREQ_RARE           = 2,
+       NVME_RW_DSM_FREQ_READS          = 3,
+       NVME_RW_DSM_FREQ_WRITES         = 4,
+       NVME_RW_DSM_FREQ_RW             = 5,
+       NVME_RW_DSM_FREQ_ONCE           = 6,
+       NVME_RW_DSM_FREQ_PREFETCH       = 7,
+       NVME_RW_DSM_FREQ_TEMP           = 8,
+       NVME_RW_DSM_LATENCY_NONE        = 0 << 4,
+       NVME_RW_DSM_LATENCY_IDLE        = 1 << 4,
+       NVME_RW_DSM_LATENCY_NORM        = 2 << 4,
+       NVME_RW_DSM_LATENCY_LOW         = 3 << 4,
+       NVME_RW_DSM_SEQ_REQ             = 1 << 6,
+       NVME_RW_DSM_COMPRESSED          = 1 << 7,
+};
+
+/* Admin commands */
+
+enum nvme_admin_opcode {
+       nvme_admin_delete_sq            = 0x00,
+       nvme_admin_create_sq            = 0x01,
+       nvme_admin_get_log_page         = 0x02,
+       nvme_admin_delete_cq            = 0x04,
+       nvme_admin_create_cq            = 0x05,
+       nvme_admin_identify             = 0x06,
+       nvme_admin_abort_cmd            = 0x08,
+       nvme_admin_set_features         = 0x09,
+       nvme_admin_get_features         = 0x0a,
+       nvme_admin_async_event          = 0x0c,
+       nvme_admin_activate_fw          = 0x10,
+       nvme_admin_download_fw          = 0x11,
+       nvme_admin_format_nvm           = 0x80,
+       nvme_admin_security_send        = 0x81,
+       nvme_admin_security_recv        = 0x82,
+};
+
+enum {
+       NVME_QUEUE_PHYS_CONTIG  = (1 << 0),
+       NVME_CQ_IRQ_ENABLED     = (1 << 1),
+       NVME_SQ_PRIO_URGENT     = (0 << 1),
+       NVME_SQ_PRIO_HIGH       = (1 << 1),
+       NVME_SQ_PRIO_MEDIUM     = (2 << 1),
+       NVME_SQ_PRIO_LOW        = (3 << 1),
+       NVME_FEAT_ARBITRATION   = 0x01,
+       NVME_FEAT_POWER_MGMT    = 0x02,
+       NVME_FEAT_LBA_RANGE     = 0x03,
+       NVME_FEAT_TEMP_THRESH   = 0x04,
+       NVME_FEAT_ERR_RECOVERY  = 0x05,
+       NVME_FEAT_VOLATILE_WC   = 0x06,
+       NVME_FEAT_NUM_QUEUES    = 0x07,
+       NVME_FEAT_IRQ_COALESCE  = 0x08,
+       NVME_FEAT_IRQ_CONFIG    = 0x09,
+       NVME_FEAT_WRITE_ATOMIC  = 0x0a,
+       NVME_FEAT_ASYNC_EVENT   = 0x0b,
+       NVME_FEAT_SW_PROGRESS   = 0x0c,
+};
+
+struct nvme_identify {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u64                   rsvd2[2];
+       __le64                  prp1;
+       __le64                  prp2;
+       __le32                  cns;
+       __u32                   rsvd11[5];
+};
+
+struct nvme_features {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u64                   rsvd2[2];
+       __le64                  prp1;
+       __le64                  prp2;
+       __le32                  fid;
+       __le32                  dword11;
+       __u32                   rsvd12[4];
+};
+
+struct nvme_create_cq {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __u32                   rsvd1[5];
+       __le64                  prp1;
+       __u64                   rsvd8;
+       __le16                  cqid;
+       __le16                  qsize;
+       __le16                  cq_flags;
+       __le16                  irq_vector;
+       __u32                   rsvd12[4];
+};
+
+struct nvme_create_sq {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __u32                   rsvd1[5];
+       __le64                  prp1;
+       __u64                   rsvd8;
+       __le16                  sqid;
+       __le16                  qsize;
+       __le16                  sq_flags;
+       __le16                  cqid;
+       __u32                   rsvd12[4];
+};
+
+struct nvme_delete_queue {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __u32                   rsvd1[9];
+       __le16                  qid;
+       __u16                   rsvd10;
+       __u32                   rsvd11[5];
+};
+
+struct nvme_download_firmware {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __u32                   rsvd1[5];
+       __le64                  prp1;
+       __le64                  prp2;
+       __le32                  numd;
+       __le32                  offset;
+       __u32                   rsvd12[4];
+};
+
+struct nvme_command {
+       union {
+               struct nvme_common_command common;
+               struct nvme_rw_command rw;
+               struct nvme_identify identify;
+               struct nvme_features features;
+               struct nvme_create_cq create_cq;
+               struct nvme_create_sq create_sq;
+               struct nvme_delete_queue delete_queue;
+               struct nvme_download_firmware dlfw;
+       };
+};
+
+enum {
+       NVME_SC_SUCCESS                 = 0x0,
+       NVME_SC_INVALID_OPCODE          = 0x1,
+       NVME_SC_INVALID_FIELD           = 0x2,
+       NVME_SC_CMDID_CONFLICT          = 0x3,
+       NVME_SC_DATA_XFER_ERROR         = 0x4,
+       NVME_SC_POWER_LOSS              = 0x5,
+       NVME_SC_INTERNAL                = 0x6,
+       NVME_SC_ABORT_REQ               = 0x7,
+       NVME_SC_ABORT_QUEUE             = 0x8,
+       NVME_SC_FUSED_FAIL              = 0x9,
+       NVME_SC_FUSED_MISSING           = 0xa,
+       NVME_SC_INVALID_NS              = 0xb,
+       NVME_SC_LBA_RANGE               = 0x80,
+       NVME_SC_CAP_EXCEEDED            = 0x81,
+       NVME_SC_NS_NOT_READY            = 0x82,
+       NVME_SC_CQ_INVALID              = 0x100,
+       NVME_SC_QID_INVALID             = 0x101,
+       NVME_SC_QUEUE_SIZE              = 0x102,
+       NVME_SC_ABORT_LIMIT             = 0x103,
+       NVME_SC_ABORT_MISSING           = 0x104,
+       NVME_SC_ASYNC_LIMIT             = 0x105,
+       NVME_SC_FIRMWARE_SLOT           = 0x106,
+       NVME_SC_FIRMWARE_IMAGE          = 0x107,
+       NVME_SC_INVALID_VECTOR          = 0x108,
+       NVME_SC_INVALID_LOG_PAGE        = 0x109,
+       NVME_SC_INVALID_FORMAT          = 0x10a,
+       NVME_SC_BAD_ATTRIBUTES          = 0x180,
+       NVME_SC_WRITE_FAULT             = 0x280,
+       NVME_SC_READ_ERROR              = 0x281,
+       NVME_SC_GUARD_CHECK             = 0x282,
+       NVME_SC_APPTAG_CHECK            = 0x283,
+       NVME_SC_REFTAG_CHECK            = 0x284,
+       NVME_SC_COMPARE_FAILED          = 0x285,
+       NVME_SC_ACCESS_DENIED           = 0x286,
+};
+
+struct nvme_completion {
+       __le32  result;         /* Used by admin commands to return data */
+       __u32   rsvd;
+       __le16  sq_head;        /* how much of this queue may be reclaimed */
+       __le16  sq_id;          /* submission queue that generated this entry */
+       __u16   command_id;     /* of the command which completed */
+       __le16  status;         /* did the command fail, and if so, why? */
+};
+
+struct nvme_user_io {
+       __u8    opcode;
+       __u8    flags;
+       __u16   control;
+       __u16   nblocks;
+       __u16   rsvd;
+       __u64   metadata;
+       __u64   addr;
+       __u64   slba;
+       __u32   dsmgmt;
+       __u32   reftag;
+       __u16   apptag;
+       __u16   appmask;
+};
+
+struct nvme_admin_cmd {
+       __u8    opcode;
+       __u8    flags;
+       __u16   rsvd1;
+       __u32   nsid;
+       __u32   cdw2;
+       __u32   cdw3;
+       __u64   metadata;
+       __u64   addr;
+       __u32   metadata_len;
+       __u32   data_len;
+       __u32   cdw10;
+       __u32   cdw11;
+       __u32   cdw12;
+       __u32   cdw13;
+       __u32   cdw14;
+       __u32   cdw15;
+       __u32   timeout_ms;
+       __u32   result;
+};
+
+#define NVME_IOCTL_ID          _IO('N', 0x40)
+#define NVME_IOCTL_ADMIN_CMD   _IOWR('N', 0x41, struct nvme_admin_cmd)
+#define NVME_IOCTL_SUBMIT_IO   _IOW('N', 0x42, struct nvme_user_io)
+
+#endif /* _LINUX_NVME_H */
index a27e56ca41a4cf2ae505e08710c88f745f476510..c2f1f6a5fcb8a67f52c90c8397c7384534d3010a 100644 (file)
 
 #include <linux/compiler.h>            /* For unlikely.  */
 #include <linux/sched.h>               /* For struct task_struct.  */
+#include <linux/err.h>                 /* for IS_ERR_VALUE */
 
 
 extern long arch_ptrace(struct task_struct *child, long request,
@@ -266,6 +267,15 @@ static inline void ptrace_release_task(struct task_struct *task)
 #define force_successful_syscall_return() do { } while (0)
 #endif
 
+#ifndef is_syscall_success
+/*
+ * On most systems we can tell if a syscall is a success based on if the retval
+ * is an error value.  On some systems like ia64 and powerpc they have different
+ * indicators of success/failure and must define their own.
+ */
+#define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
+#endif
+
 /*
  * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
  *
index cb2dd118cc0ffb91e62ad7746d230b0376f442dd..8cd7fe59cf1a6f957c9376f4c5b5333fdf4f817e 100644 (file)
@@ -30,7 +30,7 @@ struct sh_desc {
        struct sh_dmae_regs hw;
        struct list_head node;
        struct dma_async_tx_descriptor async_tx;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        dma_cookie_t cookie;
        size_t partial;
        int chunks;
@@ -48,6 +48,7 @@ struct sh_dmae_channel {
        unsigned int    offset;
        unsigned int    dmars;
        unsigned int    dmars_bit;
+       unsigned int    chclr_offset;
 };
 
 struct sh_dmae_pdata {
@@ -68,6 +69,7 @@ struct sh_dmae_pdata {
        unsigned int dmaor_is_32bit:1;
        unsigned int needs_tend_set:1;
        unsigned int no_dmars:1;
+       unsigned int chclr_present:1;
 };
 
 /* DMA register */
diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
new file mode 100644 (file)
index 0000000..29d9593
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _SIRFSOC_DMA_H_
+#define _SIRFSOC_DMA_H_
+
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
+
+#endif
index ecdaeb98b293727274b6511ee7ef523c00324564..5cf685086dd3f6d728b6a0374f096a080b1bfc1d 100644 (file)
@@ -312,7 +312,6 @@ struct tty_driver {
         */
        struct tty_struct **ttys;
        struct ktermios **termios;
-       struct ktermios **termios_locked;
        void *driver_state;
 
        /*
index 89c290b69a5c6bf345f7d6e27d3b1921301b87fc..29e1920e7339867124e3605e1afd1a2777d66e8a 100644 (file)
 #define TUNER_PHILIPS_FMD1216MEX_MK3   78
 #define TUNER_PHILIPS_FM1216MK5                79
 #define TUNER_PHILIPS_FQ1216LME_MK3    80      /* Active loopthrough, no FM */
-#define TUNER_XC4000                   81      /* Xceive Silicon Tuner */
 
 #define TUNER_PARTSNIC_PTI_5NF05       81
 #define TUNER_PHILIPS_CU1216L           82
 #define TUNER_PHILIPS_FQ1236_MK5       85      /* NTSC, TDA9885, no FM radio */
 #define TUNER_TENA_TNF_5337            86
 
+#define TUNER_XC4000                   87      /* Xceive Silicon Tuner */
+
 /* tv card specific */
 #define TDA9887_PRESENT                (1<<0)
 #define TDA9887_PORT1_INACTIVE                 (1<<1)
index da1f064a81b3744688545acef85fcaf507fe6c82..9b582437fbeab11535ef3d024ba11c381ea777b1 100644 (file)
@@ -78,7 +78,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
                                      __u32 mark, __u8 tos, __u8 scope,
                                      __u8 proto, __u8 flags,
                                      __be32 daddr, __be32 saddr,
-                                     __be16 dport, __be32 sport)
+                                     __be16 dport, __be16 sport)
 {
        fl4->flowi4_oif = oif;
        fl4->flowi4_iif = 0;
index 5d1a758e05950a7669157914a15b6025da11a7cc..6a3922fe0be0b840c3d8a416cb6919aa28f78b7a 100644 (file)
@@ -857,7 +857,7 @@ struct fc_lport {
        enum fc_lport_state            state;
        unsigned long                  boot_time;
        struct fc_host_statistics      host_stats;
-       struct fcoe_dev_stats          *dev_stats;
+       struct fcoe_dev_stats __percpu *dev_stats;
        u8                             retry_count;
 
        /* Fabric information */
index b31702ac15beb3f31ea7f2b5c1c36dd2b64b2f70..84f3001a568d9edf435c0465b7d421bbc74e858c 100644 (file)
@@ -16,6 +16,8 @@ struct btrfs_delayed_ref_node;
 struct btrfs_delayed_tree_ref;
 struct btrfs_delayed_data_ref;
 struct btrfs_delayed_ref_head;
+struct btrfs_block_group_cache;
+struct btrfs_free_cluster;
 struct map_lookup;
 struct extent_buffer;
 
@@ -44,6 +46,17 @@ struct extent_buffer;
        obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) ||                \
              (obj <= BTRFS_CSUM_TREE_OBJECTID )) ? __show_root_type(obj) : "-"
 
+#define BTRFS_GROUP_FLAGS      \
+       { BTRFS_BLOCK_GROUP_DATA,       "DATA"}, \
+       { BTRFS_BLOCK_GROUP_SYSTEM,     "SYSTEM"}, \
+       { BTRFS_BLOCK_GROUP_METADATA,   "METADATA"}, \
+       { BTRFS_BLOCK_GROUP_RAID0,      "RAID0"}, \
+       { BTRFS_BLOCK_GROUP_RAID1,      "RAID1"}, \
+       { BTRFS_BLOCK_GROUP_DUP,        "DUP"}, \
+       { BTRFS_BLOCK_GROUP_RAID10,     "RAID10"}
+
+#define BTRFS_UUID_SIZE 16
+
 TRACE_EVENT(btrfs_transaction_commit,
 
        TP_PROTO(struct btrfs_root *root),
@@ -621,6 +634,34 @@ TRACE_EVENT(btrfs_cow_block,
                  __entry->cow_level)
 );
 
+TRACE_EVENT(btrfs_space_reservation,
+
+       TP_PROTO(struct btrfs_fs_info *fs_info, char *type, u64 val,
+                u64 bytes, int reserve),
+
+       TP_ARGS(fs_info, type, val, bytes, reserve),
+
+       TP_STRUCT__entry(
+               __array(        u8,     fsid,   BTRFS_UUID_SIZE )
+               __string(       type,   type                    )
+               __field(        u64,    val                     )
+               __field(        u64,    bytes                   )
+               __field(        int,    reserve                 )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->fsid, fs_info->fsid, BTRFS_UUID_SIZE);
+               __assign_str(type, type);
+               __entry->val            = val;
+               __entry->bytes          = bytes;
+               __entry->reserve        = reserve;
+       ),
+
+       TP_printk("%pU: %s: %Lu %s %Lu", __entry->fsid, __get_str(type),
+                 __entry->val, __entry->reserve ? "reserve" : "release",
+                 __entry->bytes)
+);
+
 DECLARE_EVENT_CLASS(btrfs__reserved_extent,
 
        TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
@@ -659,6 +700,168 @@ DEFINE_EVENT(btrfs__reserved_extent,  btrfs_reserved_extent_free,
        TP_ARGS(root, start, len)
 );
 
+TRACE_EVENT(find_free_extent,
+
+       TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
+                u64 data),
+
+       TP_ARGS(root, num_bytes, empty_size, data),
+
+       TP_STRUCT__entry(
+               __field(        u64,    root_objectid           )
+               __field(        u64,    num_bytes               )
+               __field(        u64,    empty_size              )
+               __field(        u64,    data                    )
+       ),
+
+       TP_fast_assign(
+               __entry->root_objectid  = root->root_key.objectid;
+               __entry->num_bytes      = num_bytes;
+               __entry->empty_size     = empty_size;
+               __entry->data           = data;
+       ),
+
+       TP_printk("root = %Lu(%s), len = %Lu, empty_size = %Lu, "
+                 "flags = %Lu(%s)", show_root_type(__entry->root_objectid),
+                 __entry->num_bytes, __entry->empty_size, __entry->data,
+                 __print_flags((unsigned long)__entry->data, "|",
+                                BTRFS_GROUP_FLAGS))
+);
+
+DECLARE_EVENT_CLASS(btrfs__reserve_extent,
+
+       TP_PROTO(struct btrfs_root *root,
+                struct btrfs_block_group_cache *block_group, u64 start,
+                u64 len),
+
+       TP_ARGS(root, block_group, start, len),
+
+       TP_STRUCT__entry(
+               __field(        u64,    root_objectid           )
+               __field(        u64,    bg_objectid             )
+               __field(        u64,    flags                   )
+               __field(        u64,    start                   )
+               __field(        u64,    len                     )
+       ),
+
+       TP_fast_assign(
+               __entry->root_objectid  = root->root_key.objectid;
+               __entry->bg_objectid    = block_group->key.objectid;
+               __entry->flags          = block_group->flags;
+               __entry->start          = start;
+               __entry->len            = len;
+       ),
+
+       TP_printk("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
+                 "start = %Lu, len = %Lu",
+                 show_root_type(__entry->root_objectid), __entry->bg_objectid,
+                 __entry->flags, __print_flags((unsigned long)__entry->flags,
+                                               "|", BTRFS_GROUP_FLAGS),
+                 __entry->start, __entry->len)
+);
+
+DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
+
+       TP_PROTO(struct btrfs_root *root,
+                struct btrfs_block_group_cache *block_group, u64 start,
+                u64 len),
+
+       TP_ARGS(root, block_group, start, len)
+);
+
+DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
+
+       TP_PROTO(struct btrfs_root *root,
+                struct btrfs_block_group_cache *block_group, u64 start,
+                u64 len),
+
+       TP_ARGS(root, block_group, start, len)
+);
+
+TRACE_EVENT(btrfs_find_cluster,
+
+       TP_PROTO(struct btrfs_block_group_cache *block_group, u64 start,
+                u64 bytes, u64 empty_size, u64 min_bytes),
+
+       TP_ARGS(block_group, start, bytes, empty_size, min_bytes),
+
+       TP_STRUCT__entry(
+               __field(        u64,    bg_objectid             )
+               __field(        u64,    flags                   )
+               __field(        u64,    start                   )
+               __field(        u64,    bytes                   )
+               __field(        u64,    empty_size              )
+               __field(        u64,    min_bytes               )
+       ),
+
+       TP_fast_assign(
+               __entry->bg_objectid    = block_group->key.objectid;
+               __entry->flags          = block_group->flags;
+               __entry->start          = start;
+               __entry->bytes          = bytes;
+               __entry->empty_size     = empty_size;
+               __entry->min_bytes      = min_bytes;
+       ),
+
+       TP_printk("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
+                 " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
+                 __entry->flags,
+                 __print_flags((unsigned long)__entry->flags, "|",
+                               BTRFS_GROUP_FLAGS), __entry->start,
+                 __entry->bytes, __entry->empty_size,  __entry->min_bytes)
+);
+
+TRACE_EVENT(btrfs_failed_cluster_setup,
+
+       TP_PROTO(struct btrfs_block_group_cache *block_group),
+
+       TP_ARGS(block_group),
+
+       TP_STRUCT__entry(
+               __field(        u64,    bg_objectid             )
+       ),
+
+       TP_fast_assign(
+               __entry->bg_objectid    = block_group->key.objectid;
+       ),
+
+       TP_printk("block_group = %Lu", __entry->bg_objectid)
+);
+
+TRACE_EVENT(btrfs_setup_cluster,
+
+       TP_PROTO(struct btrfs_block_group_cache *block_group,
+                struct btrfs_free_cluster *cluster, u64 size, int bitmap),
+
+       TP_ARGS(block_group, cluster, size, bitmap),
+
+       TP_STRUCT__entry(
+               __field(        u64,    bg_objectid             )
+               __field(        u64,    flags                   )
+               __field(        u64,    start                   )
+               __field(        u64,    max_size                )
+               __field(        u64,    size                    )
+               __field(        int,    bitmap                  )
+       ),
+
+       TP_fast_assign(
+               __entry->bg_objectid    = block_group->key.objectid;
+               __entry->flags          = block_group->flags;
+               __entry->start          = cluster->window_start;
+               __entry->max_size       = cluster->max_size;
+               __entry->size           = size;
+               __entry->bitmap         = bitmap;
+       ),
+
+       TP_printk("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
+                 "size = %Lu, max_size = %Lu, bitmap = %d",
+                 __entry->bg_objectid,
+                 __entry->flags,
+                 __print_flags((unsigned long)__entry->flags, "|",
+                               BTRFS_GROUP_FLAGS), __entry->start,
+                 __entry->size, __entry->max_size, __entry->bitmap)
+);
+
 #endif /* _TRACE_BTRFS_H */
 
 /* This part must be outside protection */
index 6ac2236244c381f3bef900c600d1f9c1ba41c7f5..3f42cd66f0f87a25510dd000aed37de5c0d5cc71 100644 (file)
@@ -355,7 +355,7 @@ config AUDIT
 
 config AUDITSYSCALL
        bool "Enable system-call auditing support"
-       depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH)
+       depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || ARM)
        default y if SECURITY_SELINUX
        help
          Enable low-overhead system-call auditing infrastructure that
@@ -372,6 +372,20 @@ config AUDIT_TREE
        depends on AUDITSYSCALL
        select FSNOTIFY
 
+config AUDIT_LOGINUID_IMMUTABLE
+       bool "Make audit loginuid immutable"
+       depends on AUDIT
+       help
+         The config option toggles if a task setting its loginuid requires
+         CAP_SYS_AUDITCONTROL or if that task should require no special permissions
+         but should instead only allow setting its loginuid if it was never
+         previously set.  On systems which use systemd or a similar central
+         process to restart login services this should be set to true.  On older
+         systems in which an admin would typically have to directly stop and
+         start processes this should be set to false.  Setting this to true allows
+         one to drop potentially dangerous capabilites from the login tasks,
+         but may not be backwards compatible with older init systems.
+
 source "kernel/irq/Kconfig"
 
 menu "RCU Subsystem"
index 57e3f5107937f89951be2482040e294bee659bf5..bb0eb5bb9a0a8761286dfc29cd1aa5b8587e2801 100644 (file)
@@ -631,7 +631,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
        }
 
        *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
-       audit_log_format(*ab, "user pid=%d uid=%u auid=%u ses=%u",
+       audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u",
                         pid, uid, auid, ses);
        if (sid) {
                rc = security_secid_to_secctx(sid, &ctx, &len);
@@ -1423,7 +1423,7 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
        char *p, *pathname;
 
        if (prefix)
-               audit_log_format(ab, " %s", prefix);
+               audit_log_format(ab, "%s", prefix);
 
        /* We will allow 11 spaces for ' (deleted)' to be appended */
        pathname = kmalloc(PATH_MAX+11, ab->gfp_mask);
index 91e7071c4d2c4f06376e7ad5c485a2aa98294edd..81676680337158e20ce6077a522b64cdfa15f59f 100644 (file)
@@ -36,12 +36,8 @@ enum audit_state {
        AUDIT_DISABLED,         /* Do not create per-task audit_context.
                                 * No syscall-specific audit records can
                                 * be generated. */
-       AUDIT_SETUP_CONTEXT,    /* Create the per-task audit_context,
-                                * but don't necessarily fill it in at
-                                * syscall entry time (i.e., filter
-                                * instead). */
        AUDIT_BUILD_CONTEXT,    /* Create the per-task audit_context,
-                                * and always fill it in at syscall
+                                * and fill it in at syscall
                                 * entry time.  This makes a full
                                 * syscall record available if some
                                 * other part of the kernel decides it
index f8277c80d678bfeaefb74ad02805b0a4a2cebaa2..a6c3f1abd206c9d9736cbe5834483e36fd1d62ff 100644 (file)
@@ -235,13 +235,15 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule)
        switch(listnr) {
        default:
                goto exit_err;
-       case AUDIT_FILTER_USER:
-       case AUDIT_FILTER_TYPE:
 #ifdef CONFIG_AUDITSYSCALL
        case AUDIT_FILTER_ENTRY:
+               if (rule->action == AUDIT_ALWAYS)
+                       goto exit_err;
        case AUDIT_FILTER_EXIT:
        case AUDIT_FILTER_TASK:
 #endif
+       case AUDIT_FILTER_USER:
+       case AUDIT_FILTER_TYPE:
                ;
        }
        if (unlikely(rule->action == AUDIT_POSSIBLE)) {
@@ -385,7 +387,7 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
                                goto exit_free;
                        break;
                case AUDIT_FILETYPE:
-                       if ((f->val & ~S_IFMT) > S_IFMT)
+                       if (f->val & ~S_IFMT)
                                goto exit_free;
                        break;
                case AUDIT_INODE:
@@ -459,6 +461,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                case AUDIT_ARG1:
                case AUDIT_ARG2:
                case AUDIT_ARG3:
+               case AUDIT_OBJ_UID:
+               case AUDIT_OBJ_GID:
                        break;
                case AUDIT_ARCH:
                        entry->rule.arch_f = f;
@@ -522,7 +526,6 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                                goto exit_free;
                        break;
                case AUDIT_FILTERKEY:
-                       err = -EINVAL;
                        if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN)
                                goto exit_free;
                        str = audit_unpack_string(&bufp, &remain, f->val);
@@ -536,7 +539,11 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                                goto exit_free;
                        break;
                case AUDIT_FILETYPE:
-                       if ((f->val & ~S_IFMT) > S_IFMT)
+                       if (f->val & ~S_IFMT)
+                               goto exit_free;
+                       break;
+               case AUDIT_FIELD_COMPARE:
+                       if (f->val > AUDIT_MAX_FIELD_COMPARE)
                                goto exit_free;
                        break;
                default:
index e7fe2b0d29b3cd0676caccc6af116a76afee947e..caaea6e944f859595020a29265038e9669a2501f 100644 (file)
 
 #include "audit.h"
 
+/* flags stating the success for a syscall */
+#define AUDITSC_INVALID 0
+#define AUDITSC_SUCCESS 1
+#define AUDITSC_FAILURE 2
+
 /* AUDIT_NAMES is the number of slots we reserve in the audit_context
- * for saving names from getname(). */
-#define AUDIT_NAMES    20
+ * for saving names from getname().  If we get more names we will allocate
+ * a name dynamically and also add those to the list anchored by names_list. */
+#define AUDIT_NAMES    5
 
 /* Indicates that audit should log the full pathname. */
 #define AUDIT_NAME_FULL -1
@@ -101,9 +107,8 @@ struct audit_cap_data {
  *
  * Further, in fs/namei.c:path_lookup() we store the inode and device. */
 struct audit_names {
+       struct list_head list;          /* audit_context->names_list */
        const char      *name;
-       int             name_len;       /* number of name's characters to log */
-       unsigned        name_put;       /* call __putname() for this name */
        unsigned long   ino;
        dev_t           dev;
        umode_t         mode;
@@ -113,6 +118,14 @@ struct audit_names {
        u32             osid;
        struct audit_cap_data fcap;
        unsigned int    fcap_ver;
+       int             name_len;       /* number of name's characters to log */
+       bool            name_put;       /* call __putname() for this name */
+       /*
+        * This was an allocated audit_names and not from the array of
+        * names allocated in the task audit context.  Thus this name
+        * should be freed on syscall exit
+        */
+       bool            should_free;
 };
 
 struct audit_aux_data {
@@ -174,8 +187,17 @@ struct audit_context {
        long                return_code;/* syscall return code */
        u64                 prio;
        int                 return_valid; /* return code is valid */
-       int                 name_count;
-       struct audit_names  names[AUDIT_NAMES];
+       /*
+        * The names_list is the list of all audit_names collected during this
+        * syscall.  The first AUDIT_NAMES entries in the names_list will
+        * actually be from the preallocated_names array for performance
+        * reasons.  Except during allocation they should never be referenced
+        * through the preallocated_names array and should only be found/used
+        * by running the names_list.
+        */
+       struct audit_names  preallocated_names[AUDIT_NAMES];
+       int                 name_count; /* total records in names_list */
+       struct list_head    names_list; /* anchor for struct audit_names->list */
        char *              filterkey;  /* key for rule that triggered record */
        struct path         pwd;
        struct audit_context *previous; /* For nested syscalls */
@@ -305,21 +327,21 @@ static int audit_match_perm(struct audit_context *ctx, int mask)
        }
 }
 
-static int audit_match_filetype(struct audit_context *ctx, int which)
+static int audit_match_filetype(struct audit_context *ctx, int val)
 {
-       unsigned index = which & ~S_IFMT;
-       umode_t mode = which & S_IFMT;
+       struct audit_names *n;
+       umode_t mode = (umode_t)val;
 
        if (unlikely(!ctx))
                return 0;
 
-       if (index >= ctx->name_count)
-               return 0;
-       if (ctx->names[index].ino == -1)
-               return 0;
-       if ((ctx->names[index].mode ^ mode) & S_IFMT)
-               return 0;
-       return 1;
+       list_for_each_entry(n, &ctx->names_list, list) {
+               if ((n->ino != -1) &&
+                   ((n->mode & S_IFMT) == mode))
+                       return 1;
+       }
+
+       return 0;
 }
 
 /*
@@ -441,6 +463,134 @@ static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree)
        return 0;
 }
 
+static int audit_compare_id(uid_t uid1,
+                           struct audit_names *name,
+                           unsigned long name_offset,
+                           struct audit_field *f,
+                           struct audit_context *ctx)
+{
+       struct audit_names *n;
+       unsigned long addr;
+       uid_t uid2;
+       int rc;
+
+       BUILD_BUG_ON(sizeof(uid_t) != sizeof(gid_t));
+
+       if (name) {
+               addr = (unsigned long)name;
+               addr += name_offset;
+
+               uid2 = *(uid_t *)addr;
+               rc = audit_comparator(uid1, f->op, uid2);
+               if (rc)
+                       return rc;
+       }
+
+       if (ctx) {
+               list_for_each_entry(n, &ctx->names_list, list) {
+                       addr = (unsigned long)n;
+                       addr += name_offset;
+
+                       uid2 = *(uid_t *)addr;
+
+                       rc = audit_comparator(uid1, f->op, uid2);
+                       if (rc)
+                               return rc;
+               }
+       }
+       return 0;
+}
+
+static int audit_field_compare(struct task_struct *tsk,
+                              const struct cred *cred,
+                              struct audit_field *f,
+                              struct audit_context *ctx,
+                              struct audit_names *name)
+{
+       switch (f->val) {
+       /* process to file object comparisons */
+       case AUDIT_COMPARE_UID_TO_OBJ_UID:
+               return audit_compare_id(cred->uid,
+                                       name, offsetof(struct audit_names, uid),
+                                       f, ctx);
+       case AUDIT_COMPARE_GID_TO_OBJ_GID:
+               return audit_compare_id(cred->gid,
+                                       name, offsetof(struct audit_names, gid),
+                                       f, ctx);
+       case AUDIT_COMPARE_EUID_TO_OBJ_UID:
+               return audit_compare_id(cred->euid,
+                                       name, offsetof(struct audit_names, uid),
+                                       f, ctx);
+       case AUDIT_COMPARE_EGID_TO_OBJ_GID:
+               return audit_compare_id(cred->egid,
+                                       name, offsetof(struct audit_names, gid),
+                                       f, ctx);
+       case AUDIT_COMPARE_AUID_TO_OBJ_UID:
+               return audit_compare_id(tsk->loginuid,
+                                       name, offsetof(struct audit_names, uid),
+                                       f, ctx);
+       case AUDIT_COMPARE_SUID_TO_OBJ_UID:
+               return audit_compare_id(cred->suid,
+                                       name, offsetof(struct audit_names, uid),
+                                       f, ctx);
+       case AUDIT_COMPARE_SGID_TO_OBJ_GID:
+               return audit_compare_id(cred->sgid,
+                                       name, offsetof(struct audit_names, gid),
+                                       f, ctx);
+       case AUDIT_COMPARE_FSUID_TO_OBJ_UID:
+               return audit_compare_id(cred->fsuid,
+                                       name, offsetof(struct audit_names, uid),
+                                       f, ctx);
+       case AUDIT_COMPARE_FSGID_TO_OBJ_GID:
+               return audit_compare_id(cred->fsgid,
+                                       name, offsetof(struct audit_names, gid),
+                                       f, ctx);
+       /* uid comparisons */
+       case AUDIT_COMPARE_UID_TO_AUID:
+               return audit_comparator(cred->uid, f->op, tsk->loginuid);
+       case AUDIT_COMPARE_UID_TO_EUID:
+               return audit_comparator(cred->uid, f->op, cred->euid);
+       case AUDIT_COMPARE_UID_TO_SUID:
+               return audit_comparator(cred->uid, f->op, cred->suid);
+       case AUDIT_COMPARE_UID_TO_FSUID:
+               return audit_comparator(cred->uid, f->op, cred->fsuid);
+       /* auid comparisons */
+       case AUDIT_COMPARE_AUID_TO_EUID:
+               return audit_comparator(tsk->loginuid, f->op, cred->euid);
+       case AUDIT_COMPARE_AUID_TO_SUID:
+               return audit_comparator(tsk->loginuid, f->op, cred->suid);
+       case AUDIT_COMPARE_AUID_TO_FSUID:
+               return audit_comparator(tsk->loginuid, f->op, cred->fsuid);
+       /* euid comparisons */
+       case AUDIT_COMPARE_EUID_TO_SUID:
+               return audit_comparator(cred->euid, f->op, cred->suid);
+       case AUDIT_COMPARE_EUID_TO_FSUID:
+               return audit_comparator(cred->euid, f->op, cred->fsuid);
+       /* suid comparisons */
+       case AUDIT_COMPARE_SUID_TO_FSUID:
+               return audit_comparator(cred->suid, f->op, cred->fsuid);
+       /* gid comparisons */
+       case AUDIT_COMPARE_GID_TO_EGID:
+               return audit_comparator(cred->gid, f->op, cred->egid);
+       case AUDIT_COMPARE_GID_TO_SGID:
+               return audit_comparator(cred->gid, f->op, cred->sgid);
+       case AUDIT_COMPARE_GID_TO_FSGID:
+               return audit_comparator(cred->gid, f->op, cred->fsgid);
+       /* egid comparisons */
+       case AUDIT_COMPARE_EGID_TO_SGID:
+               return audit_comparator(cred->egid, f->op, cred->sgid);
+       case AUDIT_COMPARE_EGID_TO_FSGID:
+               return audit_comparator(cred->egid, f->op, cred->fsgid);
+       /* sgid comparison */
+       case AUDIT_COMPARE_SGID_TO_FSGID:
+               return audit_comparator(cred->sgid, f->op, cred->fsgid);
+       default:
+               WARN(1, "Missing AUDIT_COMPARE define.  Report as a bug\n");
+               return 0;
+       }
+       return 0;
+}
+
 /* Determine if any context name data matches a rule's watch data */
 /* Compare a task_struct with an audit_rule.  Return 1 on match, 0
  * otherwise.
@@ -457,13 +607,14 @@ static int audit_filter_rules(struct task_struct *tsk,
                              bool task_creation)
 {
        const struct cred *cred;
-       int i, j, need_sid = 1;
+       int i, need_sid = 1;
        u32 sid;
 
        cred = rcu_dereference_check(tsk->cred, tsk == current || task_creation);
 
        for (i = 0; i < rule->field_count; i++) {
                struct audit_field *f = &rule->fields[i];
+               struct audit_names *n;
                int result = 0;
 
                switch (f->type) {
@@ -522,12 +673,14 @@ static int audit_filter_rules(struct task_struct *tsk,
                        }
                        break;
                case AUDIT_DEVMAJOR:
-                       if (name)
-                               result = audit_comparator(MAJOR(name->dev),
-                                                         f->op, f->val);
-                       else if (ctx) {
-                               for (j = 0; j < ctx->name_count; j++) {
-                                       if (audit_comparator(MAJOR(ctx->names[j].dev),  f->op, f->val)) {
+                       if (name) {
+                               if (audit_comparator(MAJOR(name->dev), f->op, f->val) ||
+                                   audit_comparator(MAJOR(name->rdev), f->op, f->val))
+                                       ++result;
+                       } else if (ctx) {
+                               list_for_each_entry(n, &ctx->names_list, list) {
+                                       if (audit_comparator(MAJOR(n->dev), f->op, f->val) ||
+                                           audit_comparator(MAJOR(n->rdev), f->op, f->val)) {
                                                ++result;
                                                break;
                                        }
@@ -535,12 +688,14 @@ static int audit_filter_rules(struct task_struct *tsk,
                        }
                        break;
                case AUDIT_DEVMINOR:
-                       if (name)
-                               result = audit_comparator(MINOR(name->dev),
-                                                         f->op, f->val);
-                       else if (ctx) {
-                               for (j = 0; j < ctx->name_count; j++) {
-                                       if (audit_comparator(MINOR(ctx->names[j].dev), f->op, f->val)) {
+                       if (name) {
+                               if (audit_comparator(MINOR(name->dev), f->op, f->val) ||
+                                   audit_comparator(MINOR(name->rdev), f->op, f->val))
+                                       ++result;
+                       } else if (ctx) {
+                               list_for_each_entry(n, &ctx->names_list, list) {
+                                       if (audit_comparator(MINOR(n->dev), f->op, f->val) ||
+                                           audit_comparator(MINOR(n->rdev), f->op, f->val)) {
                                                ++result;
                                                break;
                                        }
@@ -551,8 +706,32 @@ static int audit_filter_rules(struct task_struct *tsk,
                        if (name)
                                result = (name->ino == f->val);
                        else if (ctx) {
-                               for (j = 0; j < ctx->name_count; j++) {
-                                       if (audit_comparator(ctx->names[j].ino, f->op, f->val)) {
+                               list_for_each_entry(n, &ctx->names_list, list) {
+                                       if (audit_comparator(n->ino, f->op, f->val)) {
+                                               ++result;
+                                               break;
+                                       }
+                               }
+                       }
+                       break;
+               case AUDIT_OBJ_UID:
+                       if (name) {
+                               result = audit_comparator(name->uid, f->op, f->val);
+                       } else if (ctx) {
+                               list_for_each_entry(n, &ctx->names_list, list) {
+                                       if (audit_comparator(n->uid, f->op, f->val)) {
+                                               ++result;
+                                               break;
+                                       }
+                               }
+                       }
+                       break;
+               case AUDIT_OBJ_GID:
+                       if (name) {
+                               result = audit_comparator(name->gid, f->op, f->val);
+                       } else if (ctx) {
+                               list_for_each_entry(n, &ctx->names_list, list) {
+                                       if (audit_comparator(n->gid, f->op, f->val)) {
                                                ++result;
                                                break;
                                        }
@@ -607,11 +786,10 @@ static int audit_filter_rules(struct task_struct *tsk,
                                                   name->osid, f->type, f->op,
                                                   f->lsm_rule, ctx);
                                } else if (ctx) {
-                                       for (j = 0; j < ctx->name_count; j++) {
-                                               if (security_audit_rule_match(
-                                                     ctx->names[j].osid,
-                                                     f->type, f->op,
-                                                     f->lsm_rule, ctx)) {
+                                       list_for_each_entry(n, &ctx->names_list, list) {
+                                               if (security_audit_rule_match(n->osid, f->type,
+                                                                             f->op, f->lsm_rule,
+                                                                             ctx)) {
                                                        ++result;
                                                        break;
                                                }
@@ -643,8 +821,10 @@ static int audit_filter_rules(struct task_struct *tsk,
                case AUDIT_FILETYPE:
                        result = audit_match_filetype(ctx, f->val);
                        break;
+               case AUDIT_FIELD_COMPARE:
+                       result = audit_field_compare(tsk, cred, f, ctx, name);
+                       break;
                }
-
                if (!result)
                        return 0;
        }
@@ -722,40 +902,53 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
        return AUDIT_BUILD_CONTEXT;
 }
 
-/* At syscall exit time, this filter is called if any audit_names[] have been
+/*
+ * Given an audit_name check the inode hash table to see if they match.
+ * Called holding the rcu read lock to protect the use of audit_inode_hash
+ */
+static int audit_filter_inode_name(struct task_struct *tsk,
+                                  struct audit_names *n,
+                                  struct audit_context *ctx) {
+       int word, bit;
+       int h = audit_hash_ino((u32)n->ino);
+       struct list_head *list = &audit_inode_hash[h];
+       struct audit_entry *e;
+       enum audit_state state;
+
+       word = AUDIT_WORD(ctx->major);
+       bit  = AUDIT_BIT(ctx->major);
+
+       if (list_empty(list))
+               return 0;
+
+       list_for_each_entry_rcu(e, list, list) {
+               if ((e->rule.mask[word] & bit) == bit &&
+                   audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) {
+                       ctx->current_state = state;
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+/* At syscall exit time, this filter is called if any audit_names have been
  * collected during syscall processing.  We only check rules in sublists at hash
- * buckets applicable to the inode numbers in audit_names[].
+ * buckets applicable to the inode numbers in audit_names.
  * Regarding audit_state, same rules apply as for audit_filter_syscall().
  */
 void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
 {
-       int i;
-       struct audit_entry *e;
-       enum audit_state state;
+       struct audit_names *n;
 
        if (audit_pid && tsk->tgid == audit_pid)
                return;
 
        rcu_read_lock();
-       for (i = 0; i < ctx->name_count; i++) {
-               int word = AUDIT_WORD(ctx->major);
-               int bit  = AUDIT_BIT(ctx->major);
-               struct audit_names *n = &ctx->names[i];
-               int h = audit_hash_ino((u32)n->ino);
-               struct list_head *list = &audit_inode_hash[h];
-
-               if (list_empty(list))
-                       continue;
 
-               list_for_each_entry_rcu(e, list, list) {
-                       if ((e->rule.mask[word] & bit) == bit &&
-                           audit_filter_rules(tsk, &e->rule, ctx, n,
-                                              &state, false)) {
-                               rcu_read_unlock();
-                               ctx->current_state = state;
-                               return;
-                       }
-               }
+       list_for_each_entry(n, &ctx->names_list, list) {
+               if (audit_filter_inode_name(tsk, n, ctx))
+                       break;
        }
        rcu_read_unlock();
 }
@@ -766,7 +959,7 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk,
 {
        struct audit_context *context = tsk->audit_context;
 
-       if (likely(!context))
+       if (!context)
                return NULL;
        context->return_valid = return_valid;
 
@@ -799,7 +992,7 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk,
 
 static inline void audit_free_names(struct audit_context *context)
 {
-       int i;
+       struct audit_names *n, *next;
 
 #if AUDIT_DEBUG == 2
        if (context->put_count + context->ino_count != context->name_count) {
@@ -810,10 +1003,9 @@ static inline void audit_free_names(struct audit_context *context)
                       context->serial, context->major, context->in_syscall,
                       context->name_count, context->put_count,
                       context->ino_count);
-               for (i = 0; i < context->name_count; i++) {
+               list_for_each_entry(n, &context->names_list, list) {
                        printk(KERN_ERR "names[%d] = %p = %s\n", i,
-                              context->names[i].name,
-                              context->names[i].name ?: "(null)");
+                              n->name, n->name ?: "(null)");
                }
                dump_stack();
                return;
@@ -824,9 +1016,12 @@ static inline void audit_free_names(struct audit_context *context)
        context->ino_count  = 0;
 #endif
 
-       for (i = 0; i < context->name_count; i++) {
-               if (context->names[i].name && context->names[i].name_put)
-                       __putname(context->names[i].name);
+       list_for_each_entry_safe(n, next, &context->names_list, list) {
+               list_del(&n->list);
+               if (n->name && n->name_put)
+                       __putname(n->name);
+               if (n->should_free)
+                       kfree(n);
        }
        context->name_count = 0;
        path_put(&context->pwd);
@@ -864,6 +1059,7 @@ static inline struct audit_context *audit_alloc_context(enum audit_state state)
                return NULL;
        audit_zero_context(context, state);
        INIT_LIST_HEAD(&context->killed_trees);
+       INIT_LIST_HEAD(&context->names_list);
        return context;
 }
 
@@ -886,7 +1082,7 @@ int audit_alloc(struct task_struct *tsk)
                return 0; /* Return if not auditing. */
 
        state = audit_filter_task(tsk, &key);
-       if (likely(state == AUDIT_DISABLED))
+       if (state == AUDIT_DISABLED)
                return 0;
 
        if (!(context = audit_alloc_context(state))) {
@@ -975,7 +1171,7 @@ static void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk
                while (vma) {
                        if ((vma->vm_flags & VM_EXECUTABLE) &&
                            vma->vm_file) {
-                               audit_log_d_path(ab, "exe=",
+                               audit_log_d_path(ab, " exe=",
                                                 &vma->vm_file->f_path);
                                break;
                        }
@@ -1166,8 +1362,8 @@ static void audit_log_execve_info(struct audit_context *context,
                                  struct audit_buffer **ab,
                                  struct audit_aux_data_execve *axi)
 {
-       int i;
-       size_t len, len_sent = 0;
+       int i, len;
+       size_t len_sent = 0;
        const char __user *p;
        char *buf;
 
@@ -1324,6 +1520,68 @@ static void show_special(struct audit_context *context, int *call_panic)
        audit_log_end(ab);
 }
 
+static void audit_log_name(struct audit_context *context, struct audit_names *n,
+                          int record_num, int *call_panic)
+{
+       struct audit_buffer *ab;
+       ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
+       if (!ab)
+               return; /* audit_panic has been called */
+
+       audit_log_format(ab, "item=%d", record_num);
+
+       if (n->name) {
+               switch (n->name_len) {
+               case AUDIT_NAME_FULL:
+                       /* log the full path */
+                       audit_log_format(ab, " name=");
+                       audit_log_untrustedstring(ab, n->name);
+                       break;
+               case 0:
+                       /* name was specified as a relative path and the
+                        * directory component is the cwd */
+                       audit_log_d_path(ab, " name=", &context->pwd);
+                       break;
+               default:
+                       /* log the name's directory component */
+                       audit_log_format(ab, " name=");
+                       audit_log_n_untrustedstring(ab, n->name,
+                                                   n->name_len);
+               }
+       } else
+               audit_log_format(ab, " name=(null)");
+
+       if (n->ino != (unsigned long)-1) {
+               audit_log_format(ab, " inode=%lu"
+                                " dev=%02x:%02x mode=%#ho"
+                                " ouid=%u ogid=%u rdev=%02x:%02x",
+                                n->ino,
+                                MAJOR(n->dev),
+                                MINOR(n->dev),
+                                n->mode,
+                                n->uid,
+                                n->gid,
+                                MAJOR(n->rdev),
+                                MINOR(n->rdev));
+       }
+       if (n->osid != 0) {
+               char *ctx = NULL;
+               u32 len;
+               if (security_secid_to_secctx(
+                       n->osid, &ctx, &len)) {
+                       audit_log_format(ab, " osid=%u", n->osid);
+                       *call_panic = 2;
+               } else {
+                       audit_log_format(ab, " obj=%s", ctx);
+                       security_release_secctx(ctx, len);
+               }
+       }
+
+       audit_log_fcaps(ab, n);
+
+       audit_log_end(ab);
+}
+
 static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
 {
        const struct cred *cred;
@@ -1331,6 +1589,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
        struct audit_buffer *ab;
        struct audit_aux_data *aux;
        const char *tty;
+       struct audit_names *n;
 
        /* tsk == current */
        context->pid = tsk->pid;
@@ -1466,70 +1725,14 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
        if (context->pwd.dentry && context->pwd.mnt) {
                ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD);
                if (ab) {
-                       audit_log_d_path(ab, "cwd=", &context->pwd);
+                       audit_log_d_path(ab, " cwd=", &context->pwd);
                        audit_log_end(ab);
                }
        }
-       for (i = 0; i < context->name_count; i++) {
-               struct audit_names *n = &context->names[i];
 
-               ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
-               if (!ab)
-                       continue; /* audit_panic has been called */
-
-               audit_log_format(ab, "item=%d", i);
-
-               if (n->name) {
-                       switch(n->name_len) {
-                       case AUDIT_NAME_FULL:
-                               /* log the full path */
-                               audit_log_format(ab, " name=");
-                               audit_log_untrustedstring(ab, n->name);
-                               break;
-                       case 0:
-                               /* name was specified as a relative path and the
-                                * directory component is the cwd */
-                               audit_log_d_path(ab, "name=", &context->pwd);
-                               break;
-                       default:
-                               /* log the name's directory component */
-                               audit_log_format(ab, " name=");
-                               audit_log_n_untrustedstring(ab, n->name,
-                                                           n->name_len);
-                       }
-               } else
-                       audit_log_format(ab, " name=(null)");
-
-               if (n->ino != (unsigned long)-1) {
-                       audit_log_format(ab, " inode=%lu"
-                                        " dev=%02x:%02x mode=%#ho"
-                                        " ouid=%u ogid=%u rdev=%02x:%02x",
-                                        n->ino,
-                                        MAJOR(n->dev),
-                                        MINOR(n->dev),
-                                        n->mode,
-                                        n->uid,
-                                        n->gid,
-                                        MAJOR(n->rdev),
-                                        MINOR(n->rdev));
-               }
-               if (n->osid != 0) {
-                       char *ctx = NULL;
-                       u32 len;
-                       if (security_secid_to_secctx(
-                               n->osid, &ctx, &len)) {
-                               audit_log_format(ab, " osid=%u", n->osid);
-                               call_panic = 2;
-                       } else {
-                               audit_log_format(ab, " obj=%s", ctx);
-                               security_release_secctx(ctx, len);
-                       }
-               }
-
-               audit_log_fcaps(ab, n);
-
-               audit_log_end(ab);
-       }
+       i = 0;
+       list_for_each_entry(n, &context->names_list, list)
+               audit_log_name(context, n, i++, &call_panic);
 
        /* Send end of event record to help user space know we are finished */
        ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
@@ -1545,12 +1748,12 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
  *
  * Called from copy_process and do_exit
  */
-void audit_free(struct task_struct *tsk)
+void __audit_free(struct task_struct *tsk)
 {
        struct audit_context *context;
 
        context = audit_get_context(tsk, 0, 0);
-       if (likely(!context))
+       if (!context)
                return;
 
        /* Check for system calls that do not go through the exit
@@ -1583,7 +1786,7 @@ void audit_free(struct task_struct *tsk)
  * will only be written if another part of the kernel requests that it
  * be written).
  */
-void audit_syscall_entry(int arch, int major,
+void __audit_syscall_entry(int arch, int major,
                         unsigned long a1, unsigned long a2,
                         unsigned long a3, unsigned long a4)
 {
@@ -1591,7 +1794,7 @@ void audit_syscall_entry(int arch, int major,
        struct audit_context *context = tsk->audit_context;
        enum audit_state     state;
 
-       if (unlikely(!context))
+       if (!context)
                return;
 
        /*
@@ -1648,7 +1851,7 @@ void audit_syscall_entry(int arch, int major,
                context->prio = 0;
                state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]);
        }
-       if (likely(state == AUDIT_DISABLED))
+       if (state == AUDIT_DISABLED)
                return;
 
        context->serial     = 0;
@@ -1658,30 +1861,9 @@ void audit_syscall_entry(int arch, int major,
        context->ppid       = 0;
 }
 
-void audit_finish_fork(struct task_struct *child)
-{
-       struct audit_context *ctx = current->audit_context;
-       struct audit_context *p = child->audit_context;
-       if (!p || !ctx)
-               return;
-       if (!ctx->in_syscall || ctx->current_state != AUDIT_RECORD_CONTEXT)
-               return;
-       p->arch = ctx->arch;
-       p->major = ctx->major;
-       memcpy(p->argv, ctx->argv, sizeof(ctx->argv));
-       p->ctime = ctx->ctime;
-       p->dummy = ctx->dummy;
-       p->in_syscall = ctx->in_syscall;
-       p->filterkey = kstrdup(ctx->filterkey, GFP_KERNEL);
-       p->ppid = current->pid;
-       p->prio = ctx->prio;
-       p->current_state = ctx->current_state;
-}
-
 /**
  * audit_syscall_exit - deallocate audit context after a system call
- * @valid: success/failure flag
- * @return_code: syscall return value
+ * @pt_regs: syscall registers
  *
  * Tear down after system call.  If the audit context has been marked as
  * auditable (either because of the AUDIT_RECORD_CONTEXT state from
@@ -1689,14 +1871,18 @@ void audit_finish_fork(struct task_struct *child)
  * message), then write out the syscall information.  In call cases,
  * free the names stored from getname().
  */
-void audit_syscall_exit(int valid, long return_code)
+void __audit_syscall_exit(int success, long return_code)
 {
        struct task_struct *tsk = current;
        struct audit_context *context;
 
-       context = audit_get_context(tsk, valid, return_code);
+       if (success)
+               success = AUDITSC_SUCCESS;
+       else
+               success = AUDITSC_FAILURE;
 
-       if (likely(!context))
+       context = audit_get_context(tsk, success, return_code);
+       if (!context)
                return;
 
        if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT)
@@ -1821,6 +2007,30 @@ static void handle_path(const struct dentry *dentry)
 #endif
 }
 
+static struct audit_names *audit_alloc_name(struct audit_context *context)
+{
+       struct audit_names *aname;
+
+       if (context->name_count < AUDIT_NAMES) {
+               aname = &context->preallocated_names[context->name_count];
+               memset(aname, 0, sizeof(*aname));
+       } else {
+               aname = kzalloc(sizeof(*aname), GFP_NOFS);
+               if (!aname)
+                       return NULL;
+               aname->should_free = true;
+       }
+
+       aname->ino = (unsigned long)-1;
+       list_add_tail(&aname->list, &context->names_list);
+
+       context->name_count++;
+#if AUDIT_DEBUG
+       context->ino_count++;
+#endif
+       return aname;
+}
+
 /**
  * audit_getname - add a name to the list
  * @name: name to add
@@ -1831,9 +2041,7 @@ static void handle_path(const struct dentry *dentry)
 void __audit_getname(const char *name)
 {
        struct audit_context *context = current->audit_context;
-
-       if (IS_ERR(name) || !name)
-               return;
+       struct audit_names *n;
 
        if (!context->in_syscall) {
 #if AUDIT_DEBUG == 2
@@ -1843,13 +2051,15 @@ void __audit_getname(const char *name)
 #endif
                return;
        }
-       BUG_ON(context->name_count >= AUDIT_NAMES);
-       context->names[context->name_count].name = name;
-       context->names[context->name_count].name_len = AUDIT_NAME_FULL;
-       context->names[context->name_count].name_put = 1;
-       context->names[context->name_count].ino  = (unsigned long)-1;
-       context->names[context->name_count].osid = 0;
-       ++context->name_count;
+
+       n = audit_alloc_name(context);
+       if (!n)
+               return;
+
+       n->name = name;
+       n->name_len = AUDIT_NAME_FULL;
+       n->name_put = true;
+
        if (!context->pwd.dentry)
                get_fs_pwd(current->fs, &context->pwd);
 }
@@ -1871,12 +2081,13 @@ void audit_putname(const char *name)
                printk(KERN_ERR "%s:%d(:%d): __putname(%p)\n",
                       __FILE__, __LINE__, context->serial, name);
                if (context->name_count) {
+                       struct audit_names *n;
                        int i;
-                       for (i = 0; i < context->name_count; i++)
+
+                       list_for_each_entry(n, &context->names_list, list)
                                printk(KERN_ERR "name[%d] = %p = %s\n", i,
-                                      context->names[i].name,
-                                      context->names[i].name ?: "(null)");
-               }
+                                      n->name, n->name ?: "(null)");
+                       }
 #endif
                __putname(name);
        }
@@ -1897,39 +2108,11 @@ void audit_putname(const char *name)
 #endif
 }
 
-static int audit_inc_name_count(struct audit_context *context,
-                               const struct inode *inode)
-{
-       if (context->name_count >= AUDIT_NAMES) {
-               if (inode)
-                       printk(KERN_DEBUG "audit: name_count maxed, losing inode data: "
-                              "dev=%02x:%02x, inode=%lu\n",
-                              MAJOR(inode->i_sb->s_dev),
-                              MINOR(inode->i_sb->s_dev),
-                              inode->i_ino);
-
-               else
-                       printk(KERN_DEBUG "name_count maxed, losing inode data\n");
-               return 1;
-       }
-       context->name_count++;
-#if AUDIT_DEBUG
-       context->ino_count++;
-#endif
-       return 0;
-}
-
-
 static inline int audit_copy_fcaps(struct audit_names *name, const struct dentry *dentry)
 {
        struct cpu_vfs_cap_data caps;
        int rc;
 
-       memset(&name->fcap.permitted, 0, sizeof(kernel_cap_t));
-       memset(&name->fcap.inheritable, 0, sizeof(kernel_cap_t));
-       name->fcap.fE = 0;
-       name->fcap_ver = 0;
-
        if (!dentry)
                return 0;
 
@@ -1969,30 +2152,25 @@ static void audit_copy_inode(struct audit_names *name, const struct dentry *dent
  */
 void __audit_inode(const char *name, const struct dentry *dentry)
 {
-       int idx;
        struct audit_context *context = current->audit_context;
        const struct inode *inode = dentry->d_inode;
+       struct audit_names *n;
 
        if (!context->in_syscall)
                return;
-       if (context->name_count
-           && context->names[context->name_count-1].name
-           && context->names[context->name_count-1].name == name)
-               idx = context->name_count - 1;
-       else if (context->name_count > 1
-                && context->names[context->name_count-2].name
-                && context->names[context->name_count-2].name == name)
-               idx = context->name_count - 2;
-       else {
-               /* FIXME: how much do we care about inodes that have no
-                * associated name? */
-               if (audit_inc_name_count(context, inode))
-                       return;
-               idx = context->name_count - 1;
-               context->names[idx].name = NULL;
+
+       list_for_each_entry_reverse(n, &context->names_list, list) {
+               if (n->name && (n->name == name))
+                       goto out;
        }
+
+       /* unable to find the name from a previous getname() */
+       n = audit_alloc_name(context);
+       if (!n)
+               return;
+out:
        handle_path(dentry);
-       audit_copy_inode(&context->names[idx], dentry, inode);
+       audit_copy_inode(n, dentry, inode);
 }
 
 /**
@@ -2011,11 +2189,11 @@ void __audit_inode(const char *name, const struct dentry *dentry)
 void __audit_inode_child(const struct dentry *dentry,
                         const struct inode *parent)
 {
-       int idx;
        struct audit_context *context = current->audit_context;
        const char *found_parent = NULL, *found_child = NULL;
        const struct inode *inode = dentry->d_inode;
        const char *dname = dentry->d_name.name;
+       struct audit_names *n;
        int dirlen = 0;
 
        if (!context->in_syscall)
@@ -2025,9 +2203,7 @@ void __audit_inode_child(const struct dentry *dentry,
                handle_one(inode);
 
        /* parent is more likely, look for it first */
-       for (idx = 0; idx < context->name_count; idx++) {
-               struct audit_names *n = &context->names[idx];
-
+       list_for_each_entry(n, &context->names_list, list) {
                if (!n->name)
                        continue;
 
@@ -2040,9 +2216,7 @@ void __audit_inode_child(const struct dentry *dentry,
        }
 
        /* no matching parent, look for matching child */
-       for (idx = 0; idx < context->name_count; idx++) {
-               struct audit_names *n = &context->names[idx];
-
+       list_for_each_entry(n, &context->names_list, list) {
                if (!n->name)
                        continue;
 
@@ -2060,34 +2234,29 @@ void __audit_inode_child(const struct dentry *dentry,
 
 add_names:
        if (!found_parent) {
-               if (audit_inc_name_count(context, parent))
+               n = audit_alloc_name(context);
+               if (!n)
                        return;
-               idx = context->name_count - 1;
-               context->names[idx].name = NULL;
-               audit_copy_inode(&context->names[idx], NULL, parent);
+               audit_copy_inode(n, NULL, parent);
        }
 
        if (!found_child) {
-               if (audit_inc_name_count(context, inode))
+               n = audit_alloc_name(context);
+               if (!n)
                        return;
-               idx = context->name_count - 1;
 
                /* Re-use the name belonging to the slot for a matching parent
                 * directory. All names for this context are relinquished in
                 * audit_free_names() */
                if (found_parent) {
-                       context->names[idx].name = found_parent;
-                       context->names[idx].name_len = AUDIT_NAME_FULL;
+                       n->name = found_parent;
+                       n->name_len = AUDIT_NAME_FULL;
                        /* don't call __putname() */
-                       context->names[idx].name_put = 0;
-               } else {
-                       context->names[idx].name = NULL;
+                       n->name_put = false;
                }
 
                if (inode)
-                       audit_copy_inode(&context->names[idx], NULL, inode);
-               else
-                       context->names[idx].ino = (unsigned long)-1;
+                       audit_copy_inode(n, NULL, inode);
        }
 }
 EXPORT_SYMBOL_GPL(__audit_inode_child);
@@ -2121,19 +2290,28 @@ int auditsc_get_stamp(struct audit_context *ctx,
 static atomic_t session_id = ATOMIC_INIT(0);
 
 /**
- * audit_set_loginuid - set a task's audit_context loginuid
- * @task: task whose audit context is being modified
+ * audit_set_loginuid - set current task's audit_context loginuid
  * @loginuid: loginuid value
  *
  * Returns 0.
  *
  * Called (set) from fs/proc/base.c::proc_loginuid_write().
  */
-int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
+int audit_set_loginuid(uid_t loginuid)
 {
-       unsigned int sessionid = atomic_inc_return(&session_id);
+       struct task_struct *task = current;
        struct audit_context *context = task->audit_context;
+       unsigned int sessionid;
+
+#ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE
+       if (task->loginuid != -1)
+               return -EPERM;
+#else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
+       if (!capable(CAP_AUDIT_CONTROL))
+               return -EPERM;
+#endif  /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
 
+       sessionid = atomic_inc_return(&session_id);
        if (context && context->in_syscall) {
                struct audit_buffer *ab;
 
@@ -2271,14 +2449,11 @@ void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mo
        context->ipc.has_perm = 1;
 }
 
-int audit_bprm(struct linux_binprm *bprm)
+int __audit_bprm(struct linux_binprm *bprm)
 {
        struct audit_aux_data_execve *ax;
        struct audit_context *context = current->audit_context;
 
-       if (likely(!audit_enabled || !context || context->dummy))
-               return 0;
-
        ax = kmalloc(sizeof(*ax), GFP_KERNEL);
        if (!ax)
                return -ENOMEM;
@@ -2299,13 +2474,10 @@ int audit_bprm(struct linux_binprm *bprm)
  * @args: args array
  *
  */
-void audit_socketcall(int nargs, unsigned long *args)
+void __audit_socketcall(int nargs, unsigned long *args)
 {
        struct audit_context *context = current->audit_context;
 
-       if (likely(!context || context->dummy))
-               return;
-
        context->type = AUDIT_SOCKETCALL;
        context->socketcall.nargs = nargs;
        memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long));
@@ -2331,13 +2503,10 @@ void __audit_fd_pair(int fd1, int fd2)
  *
  * Returns 0 for success or NULL context or < 0 on error.
  */
-int audit_sockaddr(int len, void *a)
+int __audit_sockaddr(int len, void *a)
 {
        struct audit_context *context = current->audit_context;
 
-       if (likely(!context || context->dummy))
-               return 0;
-
        if (!context->sockaddr) {
                void *p = kmalloc(sizeof(struct sockaddr_storage), GFP_KERNEL);
                if (!p)
@@ -2499,6 +2668,25 @@ void __audit_mmap_fd(int fd, int flags)
        context->type = AUDIT_MMAP;
 }
 
+static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
+{
+       uid_t auid, uid;
+       gid_t gid;
+       unsigned int sessionid;
+
+       auid = audit_get_loginuid(current);
+       sessionid = audit_get_sessionid(current);
+       current_uid_gid(&uid, &gid);
+
+       audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u",
+                        auid, uid, gid, sessionid);
+       audit_log_task_context(ab);
+       audit_log_format(ab, " pid=%d comm=", current->pid);
+       audit_log_untrustedstring(ab, current->comm);
+       audit_log_format(ab, " reason=");
+       audit_log_string(ab, reason);
+       audit_log_format(ab, " sig=%ld", signr);
+}
 /**
  * audit_core_dumps - record information about processes that end abnormally
  * @signr: signal value
@@ -2509,10 +2697,6 @@ void __audit_mmap_fd(int fd, int flags)
 void audit_core_dumps(long signr)
 {
        struct audit_buffer *ab;
-       u32 sid;
-       uid_t auid = audit_get_loginuid(current), uid;
-       gid_t gid;
-       unsigned int sessionid = audit_get_sessionid(current);
 
        if (!audit_enabled)
                return;
@@ -2521,24 +2705,17 @@ void audit_core_dumps(long signr)
                return;
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
-       current_uid_gid(&uid, &gid);
-       audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u",
-                        auid, uid, gid, sessionid);
-       security_task_getsecid(current, &sid);
-       if (sid) {
-               char *ctx = NULL;
-               u32 len;
+       audit_log_abend(ab, "memory violation", signr);
+       audit_log_end(ab);
+}
 
-               if (security_secid_to_secctx(sid, &ctx, &len))
-                       audit_log_format(ab, " ssid=%u", sid);
-               else {
-                       audit_log_format(ab, " subj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
-       audit_log_format(ab, " pid=%d comm=", current->pid);
-       audit_log_untrustedstring(ab, current->comm);
-       audit_log_format(ab, " sig=%ld", signr);
+void __audit_seccomp(unsigned long syscall)
+{
+       struct audit_buffer *ab;
+
+       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
+       audit_log_abend(ab, "seccomp", SIGKILL);
+       audit_log_format(ab, " syscall=%ld", syscall);
        audit_log_end(ab);
 }
 
index 0fcf1c14a297c57d7cda541b71438441dd1adbde..3f1adb6c647015d80aa6b5b138f118fde7484d11 100644 (file)
@@ -384,7 +384,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
                BUG();
        }
 
-       if (has_ns_capability(current, ns, cap)) {
+       if (security_capable(current_cred(), ns, cap) == 0) {
                current->flags |= PF_SUPERPRIV;
                return true;
        }
index c44738267be770118b64203eb3cbd411d9656686..294b1709170d1e890c80d064c65d38c1b12a88c1 100644 (file)
@@ -964,8 +964,7 @@ void do_exit(long code)
        acct_collect(code, group_dead);
        if (group_dead)
                tty_audit_exit();
-       if (unlikely(tsk->audit_context))
-               audit_free(tsk);
+       audit_free(tsk);
 
        tsk->exit_code = code;
        taskstats_exit(tsk, group_dead);
index f3fa18887cc9b8d7fbde14f0e6fe36f57f791b96..051f090d40c1230462d4b4d53c05133879805f08 100644 (file)
@@ -1527,8 +1527,6 @@ long do_fork(unsigned long clone_flags,
                        init_completion(&vfork);
                }
 
-               audit_finish_fork(p);
-
                /*
                 * We set PF_STARTING at creation in case tracing wants to
                 * use this to distinguish a fully live task from one that
index 57d4b13b631de36161349feb99ae6f76a2984b06..e8d76c5895ea15f3bb4867a85f48851dedf90cd4 100644 (file)
@@ -6,6 +6,7 @@
  * This defines a simple but solid secure-computing mode.
  */
 
+#include <linux/audit.h>
 #include <linux/seccomp.h>
 #include <linux/sched.h>
 #include <linux/compat.h>
@@ -54,6 +55,7 @@ void __secure_computing(int this_syscall)
 #ifdef SECCOMP_DEBUG
        dump_stack();
 #endif
+       audit_seccomp(this_syscall);
        do_exit(SIGKILL);
 }
 
index 201e1b33d721bf2ff83e6cd5ca817ea243861c39..169eb7c598e54d42a78018ac2b1338811674afd5 100644 (file)
@@ -286,25 +286,24 @@ config CORDIC
          calculations are in fixed point. Module will be called cordic.
 
 config MPILIB
-       tristate "Multiprecision maths library"
+       tristate
        help
          Multiprecision maths library from GnuPG.
          It is used to implement RSA digital signature verification,
          which is used by IMA/EVM digital signature extension.
 
 config MPILIB_EXTRA
-       bool "Multiprecision maths library - additional sources"
+       bool
        depends on MPILIB
        help
-         Multiprecision maths library from GnuPG.
-         It is used to implement RSA digital signature verification,
-         which is used by IMA/EVM digital signature extension.
-         This code in unnecessary for RSA digital signature verification,
-         and can be compiled if needed.
+         Additional sources of multiprecision maths library from GnuPG.
+         This code is unnecessary for RSA digital signature verification,
+         but can be compiled if needed.
 
-config DIGSIG
-       tristate "In-kernel signature checker"
-       depends on KEYS
+config SIGNATURE
+       tristate
+       depends on KEYS && CRYPTO
+       select CRYPTO_SHA1
        select MPILIB
        help
          Digital signature verification. Currently only RSA is supported.
index dace162c7e1c2f063498ded701e5576f3c16b75a..d71aae1b01b364d7972837a562e765f6945e1955 100644 (file)
@@ -119,7 +119,7 @@ obj-$(CONFIG_CORDIC) += cordic.o
 obj-$(CONFIG_DQL) += dynamic_queue_limits.o
 
 obj-$(CONFIG_MPILIB) += mpi/
-obj-$(CONFIG_DIGSIG) += digsig.o
+obj-$(CONFIG_SIGNATURE) += digsig.o
 
 hostprogs-y    := gen_crc32table
 clean-files    := crc32table.h
index 602207be985379f7e15e94ea546b8d70b0b2a7b9..3dbff4dcde35191a62b0245ac7ee185bc8fc7084 100644 (file)
@@ -373,7 +373,6 @@ static void mem_cgroup_put(struct mem_cgroup *memcg);
 
 /* Writing them here to avoid exposing memcg's inner layout */
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
-#ifdef CONFIG_INET
 #include <net/sock.h>
 #include <net/ip.h>
 
@@ -420,6 +419,7 @@ void sock_release_memcg(struct sock *sk)
        }
 }
 
+#ifdef CONFIG_INET
 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
 {
        if (!memcg || mem_cgroup_is_root(memcg))
index f963f6b1884fd96dc12a96743a2f04669194cb3a..5ba0c844d508cbe549788e2219b4dd9ab1383149 100644 (file)
@@ -146,7 +146,7 @@ void br_fdb_cleanup(unsigned long _data)
        unsigned long next_timer = jiffies + br->ageing_time;
        int i;
 
-       spin_lock_bh(&br->hash_lock);
+       spin_lock(&br->hash_lock);
        for (i = 0; i < BR_HASH_SIZE; i++) {
                struct net_bridge_fdb_entry *f;
                struct hlist_node *h, *n;
@@ -162,7 +162,7 @@ void br_fdb_cleanup(unsigned long _data)
                                next_timer = this_timer;
                }
        }
-       spin_unlock_bh(&br->hash_lock);
+       spin_unlock(&br->hash_lock);
 
        mod_timer(&br->gc_timer, round_jiffies_up(next_timer));
 }
index 61570ee76fe6eca7824431663917a32827c90c9b..673728add60bf878c7b582a770bf2f3a48dc69bf 100644 (file)
@@ -146,15 +146,17 @@ void caif_flow_cb(struct sk_buff *skb)
        spin_lock_bh(&caifd->flow_lock);
        send_xoff = caifd->xoff;
        caifd->xoff = 0;
-       if (!WARN_ON(caifd->xoff_skb_dtor == NULL)) {
-               WARN_ON(caifd->xoff_skb != skb);
-               dtor = caifd->xoff_skb_dtor;
-               caifd->xoff_skb = NULL;
-               caifd->xoff_skb_dtor = NULL;
-       }
+       dtor = caifd->xoff_skb_dtor;
+
+       if (WARN_ON(caifd->xoff_skb != skb))
+               skb = NULL;
+
+       caifd->xoff_skb = NULL;
+       caifd->xoff_skb_dtor = NULL;
+
        spin_unlock_bh(&caifd->flow_lock);
 
-       if (dtor)
+       if (dtor && skb)
                dtor(skb);
 
        if (send_xoff)
index 5fc9eca8cd4149b2cc102ea32b0ae131db4e8452..fd7cbf5aa8956732f51637eeab3f512e00743fcb 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/slab.h>
-#include <linux/netdevice.h>
 #include <linux/mii.h>
 #include <linux/usb.h>
 #include <linux/usb/usbnet.h>
@@ -27,7 +26,7 @@ MODULE_LICENSE("GPL");
 #define CFUSB_ALIGNMENT 4      /* Number of bytes to align. */
 #define CFUSB_MAX_HEADLEN (CFUSB_PAD_DESCR_SZ + CFUSB_ALIGNMENT-1)
 #define STE_USB_VID 0x04cc     /* USB Product ID for ST-Ericsson */
-#define STE_USB_PID_CAIF 0x2306        /* Product id for CAIF Modems */
+#define STE_USB_PID_CAIF 0x230f        /* Product id for CAIF Modems */
 
 struct cfusbl {
        struct cflayer layer;
index f494675471a91b7f093665e6098ee7b3a91e6e4e..115dee1d985d40c5998abd4ddd57ae82123127c3 100644 (file)
@@ -1887,6 +1887,23 @@ void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
 EXPORT_SYMBOL(skb_set_dev);
 #endif /* CONFIG_NET_NS */
 
+static void skb_warn_bad_offload(const struct sk_buff *skb)
+{
+       static const netdev_features_t null_features = 0;
+       struct net_device *dev = skb->dev;
+       const char *driver = "";
+
+       if (dev && dev->dev.parent)
+               driver = dev_driver_string(dev->dev.parent);
+
+       WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
+            "gso_type=%d ip_summed=%d\n",
+            driver, dev ? &dev->features : &null_features,
+            skb->sk ? &skb->sk->sk_route_caps : &null_features,
+            skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
+            skb_shinfo(skb)->gso_type, skb->ip_summed);
+}
+
 /*
  * Invalidate hardware checksum when packet is to be mangled, and
  * complete checksum manually on outgoing path.
@@ -1900,8 +1917,8 @@ int skb_checksum_help(struct sk_buff *skb)
                goto out_set_summed;
 
        if (unlikely(skb_shinfo(skb)->gso_size)) {
-               /* Let GSO fix up the checksum. */
-               goto out_set_summed;
+               skb_warn_bad_offload(skb);
+               return -EINVAL;
        }
 
        offset = skb_checksum_start_offset(skb);
@@ -1961,16 +1978,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,
        __skb_pull(skb, skb->mac_len);
 
        if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
-               struct net_device *dev = skb->dev;
-               struct ethtool_drvinfo info = {};
-
-               if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
-                       dev->ethtool_ops->get_drvinfo(dev, &info);
-
-               WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d ip_summed=%d\n",
-                    info.driver, dev ? &dev->features : NULL,
-                    skb->sk ? &skb->sk->sk_route_caps : NULL,
-                    skb->len, skb->data_len, skb->ip_summed);
+               skb_warn_bad_offload(skb);
 
                if (skb_header_cloned(skb) &&
                    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
index f3dbd4f596a4904a99650d18ae73c9a963126aa8..a1727cda03d7bec9b565de4647dd3cff754fcbe8 100644 (file)
@@ -929,7 +929,7 @@ static ssize_t bql_show_inflight(struct netdev_queue *queue,
 }
 
 static struct netdev_queue_attribute bql_inflight_attribute =
-       __ATTR(inflight, S_IRUGO | S_IWUSR, bql_show_inflight, NULL);
+       __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
 
 #define BQL_ATTR(NAME, FIELD)                                          \
 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,           \
index 6fd44606fdd130a12712ac3f67ea6f92befab574..99b2596531bbc2a9a714bf262a8c79c61f1726b5 100644 (file)
@@ -46,7 +46,7 @@ __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
 
        memcpy(hash, saddr, 16);
        for (i = 0; i < 4; i++)
-               secret[i] = net_secret[i] + daddr[i];
+               secret[i] = net_secret[i] + (__force u32)daddr[i];
        secret[4] = net_secret[4] +
                (((__force u16)sport << 16) + (__force u16)dport);
        for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
index 86f13c67ea8579d32a4f076063ea1777c75e166b..bf4a9c4808e1faaa7461a472f224a4d3c8edf379 100644 (file)
@@ -136,7 +136,7 @@ static int addr_compare(const struct inetpeer_addr *a,
        for (i = 0; i < n; i++) {
                if (a->addr.a6[i] == b->addr.a6[i])
                        continue;
-               if (a->addr.a6[i] < b->addr.a6[i])
+               if ((__force u32)a->addr.a6[i] < (__force u32)b->addr.a6[i])
                        return -1;
                return 1;
        }
@@ -447,6 +447,7 @@ struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
                p->rate_last = 0;
                p->pmtu_expires = 0;
                p->pmtu_orig = 0;
+               p->redirect_genid = 0;
                memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
 
 
index 7e4ec9fc2cef3c38bd0e659a8c36548ff25fe429..6e412a60a91f27acb3ea2241a860c6a18401fed8 100644 (file)
@@ -141,7 +141,7 @@ __be32 ic_servaddr = NONE;  /* Boot server IP address */
 __be32 root_server_addr = NONE;        /* Address of NFS server */
 u8 root_server_path[256] = { 0, };     /* Path to mount as root */
 
-u32 ic_dev_xid;                /* Device under configuration */
+__be32 ic_dev_xid;             /* Device under configuration */
 
 /* vendor class identifier */
 static char vendor_class_identifier[253] __initdata;
@@ -859,9 +859,9 @@ static int __init ic_bootp_string(char *dest, char *src, int len, int max)
  */
 static void __init ic_do_bootp_ext(u8 *ext)
 {
-       u8 servers;
-       int i;
-       u16 mtu;
+       u8 servers;
+       int i;
+       __be16 mtu;
 
 #ifdef IPCONFIG_DEBUG
        u8 *c;
index 43d4c3b223699aee36de3c3c79c6d82b5236a4c0..aea5a199c37a341be3fab08500f1c35a34cbf215 100644 (file)
@@ -140,13 +140,14 @@ static void ping_v4_unhash(struct sock *sk)
                write_lock_bh(&ping_table.lock);
                hlist_nulls_del(&sk->sk_nulls_node);
                sock_put(sk);
-               isk->inet_num = isk->inet_sport = 0;
+               isk->inet_num = 0;
+               isk->inet_sport = 0;
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
                write_unlock_bh(&ping_table.lock);
        }
 }
 
-static struct sock *ping_v4_lookup(struct net *net, u32 saddr, u32 daddr,
+static struct sock *ping_v4_lookup(struct net *net, __be32 saddr, __be32 daddr,
                                   u16 ident, int dif)
 {
        struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident);
@@ -154,15 +155,15 @@ static struct sock *ping_v4_lookup(struct net *net, u32 saddr, u32 daddr,
        struct inet_sock *isk;
        struct hlist_nulls_node *hnode;
 
-       pr_debug("try to find: num = %d, daddr = %ld, dif = %d\n",
-                        (int)ident, (unsigned long)daddr, dif);
+       pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
+                        (int)ident, &daddr, dif);
        read_lock_bh(&ping_table.lock);
 
        ping_portaddr_for_each_entry(sk, hnode, hslot) {
                isk = inet_sk(sk);
 
-               pr_debug("found: %p: num = %d, daddr = %ld, dif = %d\n", sk,
-                        (int)isk->inet_num, (unsigned long)isk->inet_rcv_saddr,
+               pr_debug("found: %p: num = %d, daddr = %pI4, dif = %d\n", sk,
+                        (int)isk->inet_num, &isk->inet_rcv_saddr,
                         sk->sk_bound_dev_if);
 
                pr_debug("iterate\n");
@@ -254,7 +255,7 @@ static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                sk, addr->sin_addr.s_addr, ntohs(addr->sin_port));
 
        chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
-       if (addr->sin_addr.s_addr == INADDR_ANY)
+       if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
                chk_addr_ret = RTN_LOCAL;
 
        if ((sysctl_ip_nonlocal_bind == 0 &&
@@ -278,9 +279,9 @@ static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                goto out;
        }
 
-       pr_debug("after bind(): num = %d, daddr = %ld, dif = %d\n",
+       pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n",
                (int)isk->inet_num,
-               (unsigned long) isk->inet_rcv_saddr,
+               &isk->inet_rcv_saddr,
                (int)sk->sk_bound_dev_if);
 
        err = 0;
@@ -407,7 +408,7 @@ void ping_err(struct sk_buff *skb, u32 info)
 struct pingfakehdr {
        struct icmphdr icmph;
        struct iovec *iov;
-       u32 wcheck;
+       __wsum wcheck;
 };
 
 static int ping_getfrag(void *from, char * to,
@@ -459,7 +460,7 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        struct rtable *rt = NULL;
        struct ip_options_data opt_copy;
        int free = 0;
-       u32 saddr, daddr, faddr;
+       __be32 saddr, daddr, faddr;
        u8  tos;
        int err;
 
@@ -696,8 +697,8 @@ void ping_rcv(struct sk_buff *skb)
        struct net *net = dev_net(skb->dev);
        struct iphdr *iph = ip_hdr(skb);
        struct icmphdr *icmph = icmp_hdr(skb);
-       u32 saddr = iph->saddr;
-       u32 daddr = iph->daddr;
+       __be32 saddr = iph->saddr;
+       __be32 daddr = iph->daddr;
 
        /* We assume the packet has already been checked by icmp_rcv */
 
index e5e18cb8a58686a2331480c53edac2a77b901e8c..8a949f19deb6dc93542396138004a5a6b2bfeeab 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/udp.h>
 #include <net/udp.h>
 #include <net/udplite.h>
-#include <linux/inet_diag.h>
 #include <linux/sock_diag.h>
 
 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
index ae08aee1773c678187f8be84414f95ac472aa426..251e7cd75e89787f5346037a278bfd7e5b8c8ced 100644 (file)
@@ -575,7 +575,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
        }
        if (np->rxopt.bits.rxorigdstaddr) {
                struct sockaddr_in6 sin6;
-               u16 *ports = (u16 *) skb_transport_header(skb);
+               __be16 *ports = (__be16 *) skb_transport_header(skb);
 
                if (skb_transport_offset(skb) + 4 <= skb->len) {
                        /* All current transport protocols have the port numbers in the
index fdeb6d03da812d136874fccc870b90c69efee7b1..da2e92d05c15a5052ea2cc19ba9bfc5751c23cf6 100644 (file)
@@ -237,8 +237,8 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
        struct inet6_dev *idev = (struct inet6_dev *)seq->private;
 
        seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
-       snmp6_seq_show_item(seq, (void __percpu **)idev->stats.ipv6, NULL,
-                           snmp6_ipstats_list);
+       snmp6_seq_show_item64(seq, (void __percpu **)idev->stats.ipv6,
+                           snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
        snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
                            snmp6_icmp6_list);
        snmp6_seq_show_icmpv6msg(seq, idev->stats.icmpv6msgdev->mibs);
index 07361dfa80852cbbe4db66027f8da5ef13ade4c1..8c2e3ab58f2af211c04f17c337929bd2802cbe27 100644 (file)
@@ -1091,6 +1091,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        else {
                neigh = ip6_neigh_lookup(&rt->dst, &fl6->daddr);
                if (IS_ERR(neigh)) {
+                       in6_dev_put(idev);
                        dst_free(&rt->dst);
                        return ERR_CAST(neigh);
                }
index e60df48fa4d4d235016398706bf3b32e5633c1e0..296620d6ca0c0c2388f05ffaffd9c5d832eb3cbd 100644 (file)
@@ -791,7 +791,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
                        ret = sta_info_move_state_checked(sta,
                                        IEEE80211_STA_AUTHORIZED);
-               else
+               else if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
                        ret = sta_info_move_state_checked(sta,
                                        IEEE80211_STA_ASSOC);
                if (ret)
index f407427c642f4a6fdd1d6b38fb2462b244f00da1..7514091207696b9066e416305da8fb8084f651b0 100644 (file)
@@ -1979,6 +1979,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
                mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3,
                                    0, reason, fwd_hdr->addr2, sdata);
                IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
+               kfree_skb(fwd_skb);
                return RX_DROP_MONITOR;
        }
 
index 3c428d4839c7c5cca61ba535197068f3b5c10dbd..ff11f6bf8266dc1a01b1b16330b416a054d702de 100644 (file)
@@ -238,9 +238,11 @@ static void sta_unblock(struct work_struct *wk)
        if (sta->dead)
                return;
 
-       if (!test_sta_flag(sta, WLAN_STA_PS_STA))
+       if (!test_sta_flag(sta, WLAN_STA_PS_STA)) {
+               local_bh_disable();
                ieee80211_sta_ps_deliver_wakeup(sta);
-       else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) {
+               local_bh_enable();
+       } else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) {
                clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
 
                local_bh_disable();
index edcd1c7ab83f94fcaffdb049a93540bdf77c5baf..e05667cd5e766057c22770670834e6f2b19e3301 100644 (file)
@@ -1001,8 +1001,6 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
-
        if (!tx->key)
                return TX_CONTINUE;
 
@@ -1017,13 +1015,7 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
        case WLAN_CIPHER_SUITE_AES_CMAC:
                return ieee80211_crypto_aes_cmac_encrypt(tx);
        default:
-               /* handle hw-only algorithm */
-               if (info->control.hw_key) {
-                       ieee80211_tx_set_protected(tx);
-                       return TX_CONTINUE;
-               }
-               break;
-
+               return ieee80211_crypto_hw_encrypt(tx);
        }
 
        return TX_DROP;
index 422b79851ec510ef4272f9f2565bb56a0e002aef..b758350919ff4641e69127a220f3c450fb07fcc9 100644 (file)
@@ -643,3 +643,22 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
 
        return RX_CONTINUE;
 }
+
+ieee80211_tx_result
+ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx)
+{
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *info = NULL;
+
+       skb_queue_walk(&tx->skbs, skb) {
+               info  = IEEE80211_SKB_CB(skb);
+
+               /* handle hw-only algorithm */
+               if (!info->control.hw_key)
+                       return TX_DROP;
+       }
+
+       ieee80211_tx_set_protected(tx);
+
+       return TX_CONTINUE;
+}
index baba0608313ef5419cccff809d28166b1178bf7b..07e33f899c71fc52f9ff1771cd283226a8a64084 100644 (file)
@@ -32,5 +32,7 @@ ieee80211_tx_result
 ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx);
 ieee80211_rx_result
 ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx);
+ieee80211_tx_result
+ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx);
 
 #endif /* WPA_H */
index 86137b558f455d9e1740a977aa09ea1e294bb241..32dbf0fa89db7661e1ee22cee519c273a229c9ca 100644 (file)
@@ -77,35 +77,42 @@ find_set_type(const char *name, u8 family, u8 revision)
 }
 
 /* Unlock, try to load a set type module and lock again */
-static int
-try_to_load_type(const char *name)
+static bool
+load_settype(const char *name)
 {
        nfnl_unlock();
        pr_debug("try to load ip_set_%s\n", name);
        if (request_module("ip_set_%s", name) < 0) {
                pr_warning("Can't find ip_set type %s\n", name);
                nfnl_lock();
-               return -IPSET_ERR_FIND_TYPE;
+               return false;
        }
        nfnl_lock();
-       return -EAGAIN;
+       return true;
 }
 
 /* Find a set type and reference it */
+#define find_set_type_get(name, family, revision, found)       \
+       __find_set_type_get(name, family, revision, found, false)
+
 static int
-find_set_type_get(const char *name, u8 family, u8 revision,
-                 struct ip_set_type **found)
+__find_set_type_get(const char *name, u8 family, u8 revision,
+                   struct ip_set_type **found, bool retry)
 {
        struct ip_set_type *type;
        int err;
 
+       if (retry && !load_settype(name))
+               return -IPSET_ERR_FIND_TYPE;
+
        rcu_read_lock();
        *found = find_set_type(name, family, revision);
        if (*found) {
                err = !try_module_get((*found)->me) ? -EFAULT : 0;
                goto unlock;
        }
-       /* Make sure the type is loaded but we don't support the revision */
+       /* Make sure the type is already loaded
+        * but we don't support the revision */
        list_for_each_entry_rcu(type, &ip_set_type_list, list)
                if (STREQ(type->name, name)) {
                        err = -IPSET_ERR_FIND_TYPE;
@@ -113,7 +120,8 @@ find_set_type_get(const char *name, u8 family, u8 revision,
                }
        rcu_read_unlock();
 
-       return try_to_load_type(name);
+       return retry ? -IPSET_ERR_FIND_TYPE :
+               __find_set_type_get(name, family, revision, found, true);
 
 unlock:
        rcu_read_unlock();
@@ -124,12 +132,19 @@ find_set_type_get(const char *name, u8 family, u8 revision,
  * If we succeeded, the supported minimal and maximum revisions are
  * filled out.
  */
+#define find_set_type_minmax(name, family, min, max) \
+       __find_set_type_minmax(name, family, min, max, false)
+
 static int
-find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max)
+__find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
+                      bool retry)
 {
        struct ip_set_type *type;
        bool found = false;
 
+       if (retry && !load_settype(name))
+               return -IPSET_ERR_FIND_TYPE;
+
        *min = 255; *max = 0;
        rcu_read_lock();
        list_for_each_entry_rcu(type, &ip_set_type_list, list)
@@ -145,7 +160,8 @@ find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max)
        if (found)
                return 0;
 
-       return try_to_load_type(name);
+       return retry ? -IPSET_ERR_FIND_TYPE :
+               __find_set_type_minmax(name, family, min, max, true);
 }
 
 #define family_name(f) ((f) == AF_INET ? "inet" : \
@@ -1126,6 +1142,7 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
        if (ret || !cb->args[2]) {
                pr_debug("release set %s\n", ip_set_list[index]->name);
                ip_set_put_byindex(index);
+               cb->args[2] = 0;
        }
 out:
        if (nlh) {
index 299fec91f74189a562f7fcb0d47914d9b279c286..bbe23baa19b64f4df7b2532b1471614a5315cc26 100644 (file)
@@ -121,18 +121,6 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
        int ret = 0;
 
        if (tmpl != NULL) {
-               /* we've got a userspace helper. */
-               if (tmpl->status & IPS_USERSPACE_HELPER) {
-                       help = nf_ct_helper_ext_add(ct, flags);
-                       if (help == NULL) {
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-                       rcu_assign_pointer(help->helper, NULL);
-                       __set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status);
-                       ret = 0;
-                       goto out;
-               }
                help = nfct_help(tmpl);
                if (help != NULL)
                        helper = help->helper;
index 2a4834b83332afa2ebf87b37102cd2a9d6dcd8d1..9307b033c0c9d9ff35c60b31755aaceef8d89087 100644 (file)
@@ -2042,10 +2042,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
        }
        help = nfct_help(ct);
        if (!help) {
-               err = -EOPNOTSUPP;
-               goto out;
-       }
-       if (test_bit(IPS_USERSPACE_HELPER_BIT, &ct->status)) {
                if (!cda[CTA_EXPECT_TIMEOUT]) {
                        err = -EINVAL;
                        goto out;
index 8e87123f1373a1a4a42ddb6211c1737d2346047d..0221d10de75a517dbc4c5e5c7d40b432abef15a3 100644 (file)
@@ -62,8 +62,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
        int ret = 0;
        u8 proto;
 
-       if (info->flags & ~(XT_CT_NOTRACK | XT_CT_USERSPACE_HELPER))
-               return -EOPNOTSUPP;
+       if (info->flags & ~XT_CT_NOTRACK)
+               return -EINVAL;
 
        if (info->flags & XT_CT_NOTRACK) {
                ct = nf_ct_untracked_get();
@@ -92,9 +92,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
                                  GFP_KERNEL))
                goto err3;
 
-       if (info->flags & XT_CT_USERSPACE_HELPER) {
-               __set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status);
-       } else if (info->helper[0]) {
+       if (info->helper[0]) {
                ret = -ENOENT;
                proto = xt_ct_find_proto(par);
                if (!proto) {
index 8e4992101875086cd412154d102793c99ffa6bed..d95f9c963cde01cfcd4d6541de352c85f44fc6f8 100644 (file)
@@ -445,7 +445,6 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
 {
        __be16 _ports[2], *ports;
        u8 nexthdr;
-       __be16 frag_off;
        int poff;
 
        memset(dst, 0, sizeof(*dst));
@@ -466,6 +465,9 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
                break;
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        case NFPROTO_IPV6:
+       {
+               __be16 frag_off;
+
                if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) {
                        memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr,
                               sizeof(dst->ip6.dst));
@@ -485,6 +487,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
                if ((int)protoff < 0)
                        return -1;
                break;
+       }
 #endif
        default:
                BUG();
index 9a2725114e99bad3a1e69351ac55ec764a295cc6..ce64c18b8c79a99ef4251e7c9d2a752105207725 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira Networks.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -36,7 +36,6 @@
 #include <linux/rcupdate.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
-#include <linux/version.h>
 #include <linux/ethtool.h>
 #include <linux/wait.h>
 #include <asm/system.h>
@@ -1397,9 +1396,8 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
        int i = 0;
 
        list_for_each_entry(dp, &dps, list_node) {
-               if (i < skip)
-                       continue;
-               if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
+               if (i >= skip &&
+                   ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                         OVS_DP_CMD_NEW) < 0)
                        break;
index 5b9f884b7055a6c75205fc00082f2f541202df7f..c73370cc1f02df24d32026c0e775efef72451dfc 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/u64_stats_sync.h>
-#include <linux/version.h>
 
 #include "flow.h"
 
index fe7f020a843efa2519f47c80ca4b340e14299917..1252c3081ef12740a0b818fbd58ae3ab6e9b870e 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/in.h>
 #include <linux/rcupdate.h>
 #include <linux/if_arp.h>
-#include <linux/if_ether.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/tcp.h>
index 8fc28b86f2b3e4666fd2ef18d63ad785aed2c25d..322b8d206693dde98a30f1d36a6db4d800e0f17a 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/skbuff.h>
-#include <linux/version.h>
 
 #include "datapath.h"
 #include "vport-internal_dev.h"
index 7f0ef3794c515064c67d719b8f65c564aa82295d..6c066ba25dc71c24c8a94e4acc50014d610f77a6 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/rcupdate.h>
 #include <linux/rtnetlink.h>
 #include <linux/compat.h>
-#include <linux/version.h>
 
 #include "vport.h"
 #include "vport-internal_dev.h"
index d384ea921482088dea0c7c13f54e2d3c57c18f26..5bd1cc1b4a54dcae8681bf7e0c092d374614ab6d 100644 (file)
@@ -3,11 +3,11 @@ config INTEGRITY
        def_bool y
        depends on IMA || EVM
 
-config INTEGRITY_DIGSIG
+config INTEGRITY_SIGNATURE
        boolean "Digital signature verification using multiple keyrings"
        depends on INTEGRITY && KEYS
        default n
-       select DIGSIG
+       select SIGNATURE
        help
          This option enables digital signature verification support
          using multiple keyrings. It defines separate keyrings for each
index bece0563ee5e019bc9fa56bd2dff4e426cf262b7..d43799cc14f69a67da22249d637b5f9d1334b357 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 obj-$(CONFIG_INTEGRITY) += integrity.o
-obj-$(CONFIG_INTEGRITY_DIGSIG) += digsig.o
+obj-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
 
 integrity-y := iint.o
 
index c5c5a72c30be30b0477446cd311e8f329cb7dd30..2ad942fb1e236e694eb50538ac71a33a90e8d40a 100644 (file)
@@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
                audit_log_format(ab, " name=");
                audit_log_untrustedstring(ab, fname);
        }
-       if (inode)
-               audit_log_format(ab, " dev=%s ino=%lu",
-                                inode->i_sb->s_id, inode->i_ino);
+       if (inode) {
+               audit_log_format(ab, " dev=");
+               audit_log_untrustedstring(ab, inode->i_sb->s_id);
+               audit_log_format(ab, " ino=%lu", inode->i_ino);
+       }
        audit_log_format(ab, " res=%d", !result ? 0 : 1);
        audit_log_end(ab);
 }
index 4da6ba81d1532aa4690cc95b99ba682ddb0254b1..7a25ecec5aaac6b8d00c2bf0f926deaeea561163 100644 (file)
@@ -51,7 +51,7 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
 #define INTEGRITY_KEYRING_IMA          2
 #define INTEGRITY_KEYRING_MAX          3
 
-#ifdef CONFIG_INTEGRITY_DIGSIG
+#ifdef CONFIG_INTEGRITY_SIGNATURE
 
 int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
                                        const char *digest, int digestlen);
@@ -65,7 +65,7 @@ static inline int integrity_digsig_verify(const unsigned int id,
        return -EOPNOTSUPP;
 }
 
-#endif /* CONFIG_INTEGRITY_DIGSIG */
+#endif /* CONFIG_INTEGRITY_SIGNATURE */
 
 /* set during initialization */
 extern int iint_initialized;
index 41144f71d6154f612570f8f2c5243269becf68a4..2d1bb8af7696d5d431394cfbe8ac6d5e40f24da2 100644 (file)
@@ -314,7 +314,7 @@ static struct key *request_user_key(const char *master_desc, u8 **master_key,
                goto error;
 
        down_read(&ukey->sem);
-       upayload = rcu_dereference(ukey->payload.data);
+       upayload = ukey->payload.data;
        *master_key = upayload->data;
        *master_keylen = upayload->datalen;
 error:
@@ -810,7 +810,7 @@ static int encrypted_instantiate(struct key *key, const void *data,
                goto out;
        }
 
-       rcu_assign_pointer(key->payload.data, epayload);
+       rcu_assign_keypointer(key, epayload);
 out:
        kfree(datablob);
        return ret;
@@ -874,7 +874,7 @@ static int encrypted_update(struct key *key, const void *data, size_t datalen)
        memcpy(new_epayload->payload_data, epayload->payload_data,
               epayload->payload_datalen);
 
-       rcu_assign_pointer(key->payload.data, new_epayload);
+       rcu_assign_keypointer(key, new_epayload);
        call_rcu(&epayload->rcu, encrypted_rcu_free);
 out:
        kfree(buf);
index df87272e3f519ca6909e9f9e4ab8132554ae9abb..013f7e5d3a2fce42d026fd87a4a0d6e94af1d53f 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/module.h>
 #include <linux/err.h>
 #include <keys/trusted-type.h>
+#include <keys/encrypted-type.h>
+#include "encrypted.h"
 
 /*
  * request_trusted_key - request the trusted key
@@ -37,7 +39,7 @@ struct key *request_trusted_key(const char *trusted_desc,
                goto error;
 
        down_read(&tkey->sem);
-       tpayload = rcu_dereference(tkey->payload.data);
+       tpayload = tkey->payload.data;
        *master_key = tpayload->key;
        *master_keylen = tpayload->key_len;
 error:
index bf4d8da5a79502f626a849473e4af18e229eb074..a42b45531aac173a337498cfc26fd09ded18baee 100644 (file)
@@ -145,7 +145,9 @@ static void key_gc_keyring(struct key *keyring, time_t limit)
        if (!klist)
                goto unlock_dont_gc;
 
-       for (loop = klist->nkeys - 1; loop >= 0; loop--) {
+       loop = klist->nkeys;
+       smp_rmb();
+       for (loop--; loop >= 0; loop--) {
                key = klist->keys[loop];
                if (test_bit(KEY_FLAG_DEAD, &key->flags) ||
                    (key->expiry > 0 && key->expiry <= limit))
index 37a7f3b28852e098f57ab1cd98eb2eccdad5c7c5..d605f75292e4390da5d7f8cd763266004288ecf2 100644 (file)
@@ -319,7 +319,7 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
        struct key *keyring, *key;
        key_ref_t key_ref;
        long err;
-       int sp, kix;
+       int sp, nkeys, kix;
 
        keyring = key_ref_to_ptr(keyring_ref);
        possessed = is_key_possessed(keyring_ref);
@@ -380,7 +380,9 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
                goto not_this_keyring;
 
        /* iterate through the keys in this keyring first */
-       for (kix = 0; kix < keylist->nkeys; kix++) {
+       nkeys = keylist->nkeys;
+       smp_rmb();
+       for (kix = 0; kix < nkeys; kix++) {
                key = keylist->keys[kix];
                kflags = key->flags;
 
@@ -421,7 +423,9 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
        /* search through the keyrings nested in this one */
        kix = 0;
 ascend:
-       for (; kix < keylist->nkeys; kix++) {
+       nkeys = keylist->nkeys;
+       smp_rmb();
+       for (; kix < nkeys; kix++) {
                key = keylist->keys[kix];
                if (key->type != &key_type_keyring)
                        continue;
@@ -515,7 +519,7 @@ key_ref_t __keyring_search_one(key_ref_t keyring_ref,
        struct keyring_list *klist;
        unsigned long possessed;
        struct key *keyring, *key;
-       int loop;
+       int nkeys, loop;
 
        keyring = key_ref_to_ptr(keyring_ref);
        possessed = is_key_possessed(keyring_ref);
@@ -524,7 +528,9 @@ key_ref_t __keyring_search_one(key_ref_t keyring_ref,
 
        klist = rcu_dereference(keyring->payload.subscriptions);
        if (klist) {
-               for (loop = 0; loop < klist->nkeys; loop++) {
+               nkeys = klist->nkeys;
+               smp_rmb();
+               for (loop = 0; loop < nkeys ; loop++) {
                        key = klist->keys[loop];
 
                        if (key->type == ktype &&
@@ -622,7 +628,7 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
 
        struct keyring_list *keylist;
        struct key *subtree, *key;
-       int sp, kix, ret;
+       int sp, nkeys, kix, ret;
 
        rcu_read_lock();
 
@@ -645,7 +651,9 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
 
 ascend:
        /* iterate through the remaining keys in this keyring */
-       for (; kix < keylist->nkeys; kix++) {
+       nkeys = keylist->nkeys;
+       smp_rmb();
+       for (; kix < nkeys; kix++) {
                key = keylist->keys[kix];
 
                if (key == A)
index 0ed5fdf238a22c6712e5375207f5bd6d8a168ef6..2d5d041f2049f323e5072c701f068d695f3f2c6c 100644 (file)
@@ -993,7 +993,7 @@ static int trusted_instantiate(struct key *key, const void *data,
        kfree(datablob);
        kfree(options);
        if (!ret)
-               rcu_assign_pointer(key->payload.data, payload);
+               rcu_assign_keypointer(key, payload);
        else
                kfree(payload);
        return ret;
@@ -1067,7 +1067,7 @@ static int trusted_update(struct key *key, const void *data, size_t datalen)
                        goto out;
                }
        }
-       rcu_assign_pointer(key->payload.data, new_p);
+       rcu_assign_keypointer(key, new_p);
        call_rcu(&p->rcu, trusted_rcu_free);
 out:
        kfree(datablob);
index 7bd6f138236b3a010d457ab473ea73fd02641c8c..293b8c45b1d1bbf9a28edff35001b8c02eb09191 100644 (file)
@@ -232,13 +232,14 @@ static void dump_common_audit_data(struct audit_buffer *ab,
        case LSM_AUDIT_DATA_PATH: {
                struct inode *inode;
 
-               audit_log_d_path(ab, "path=", &a->u.path);
+               audit_log_d_path(ab, " path=", &a->u.path);
 
                inode = a->u.path.dentry->d_inode;
-               if (inode)
-                       audit_log_format(ab, " dev=%s ino=%lu",
-                                       inode->i_sb->s_id,
-                                       inode->i_ino);
+               if (inode) {
+                       audit_log_format(ab, " dev=");
+                       audit_log_untrustedstring(ab, inode->i_sb->s_id);
+                       audit_log_format(ab, " ino=%lu", inode->i_ino);
+               }
                break;
        }
        case LSM_AUDIT_DATA_DENTRY: {
@@ -248,10 +249,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
 
                inode = a->u.dentry->d_inode;
-               if (inode)
-                       audit_log_format(ab, " dev=%s ino=%lu",
-                                       inode->i_sb->s_id,
-                                       inode->i_ino);
+               if (inode) {
+                       audit_log_format(ab, " dev=");
+                       audit_log_untrustedstring(ab, inode->i_sb->s_id);
+                       audit_log_format(ab, " ino=%lu", inode->i_ino);
+               }
                break;
        }
        case LSM_AUDIT_DATA_INODE: {
@@ -266,8 +268,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                                         dentry->d_name.name);
                        dput(dentry);
                }
-               audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
-                                inode->i_ino);
+               audit_log_format(ab, " dev=");
+               audit_log_untrustedstring(ab, inode->i_sb->s_id);
+               audit_log_format(ab, " ino=%lu", inode->i_ino);
                break;
        }
        case LSM_AUDIT_DATA_TASK:
@@ -315,7 +318,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                                                .dentry = u->dentry,
                                                .mnt = u->mnt
                                        };
-                                       audit_log_d_path(ab, "path=", &path);
+                                       audit_log_d_path(ab, " path=", &path);
                                        break;
                                }
                                if (!u->addr)
index 4a9b4b2eb755c0129d408b371ebb3f102b862662..867558c983349d144c5f46a729af55a2a72dbe0a 100644 (file)
@@ -492,13 +492,13 @@ static bool tomoyo_correct_word2(const char *string, size_t len)
                                if (d < '0' || d > '7' || e < '0' || e > '7')
                                        break;
                                c = tomoyo_make_byte(c, d, e);
-                               if (tomoyo_invalid(c))
-                                       continue; /* pattern is not \000 */
+                               if (c <= ' ' || c >= 127)
+                                       continue;
                        }
                        goto out;
                } else if (in_repetition && c == '/') {
                        goto out;
-               } else if (tomoyo_invalid(c)) {
+               } else if (c <= ' ' || c >= 127) {
                        goto out;
                }
        }
index 6fd9391b3a6cd1c4aeb60d791effd9edd985ac18..4fa1dbd8ee8381e1fbc4260c779cfecdea1d1923 100644 (file)
@@ -133,7 +133,7 @@ static int atmel_abdac_prepare_dma(struct atmel_abdac *dac,
        period_len = frames_to_bytes(runtime, runtime->period_size);
 
        cdesc = dw_dma_cyclic_prep(chan, runtime->dma_addr, buffer_len,
-                       period_len, DMA_TO_DEVICE);
+                       period_len, DMA_MEM_TO_DEV);
        if (IS_ERR(cdesc)) {
                dev_dbg(&dac->pdev->dev, "could not prepare cyclic DMA\n");
                return PTR_ERR(cdesc);
index 73516f69ac7ca8a33244cb300df8958ac2d77e20..61dade6983582ce415afdbe3cf88d208bb628c46 100644 (file)
@@ -102,7 +102,7 @@ static void atmel_ac97c_dma_capture_period_done(void *arg)
 
 static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip,
                struct snd_pcm_substream *substream,
-               enum dma_data_direction direction)
+               enum dma_transfer_direction direction)
 {
        struct dma_chan                 *chan;
        struct dw_cyclic_desc           *cdesc;
@@ -118,7 +118,7 @@ static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip,
                return -EINVAL;
        }
 
-       if (direction == DMA_TO_DEVICE)
+       if (direction == DMA_MEM_TO_DEV)
                chan = chip->dma.tx_chan;
        else
                chan = chip->dma.rx_chan;
@@ -133,7 +133,7 @@ static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip,
                return PTR_ERR(cdesc);
        }
 
-       if (direction == DMA_TO_DEVICE) {
+       if (direction == DMA_MEM_TO_DEV) {
                cdesc->period_callback = atmel_ac97c_dma_playback_period_done;
                set_bit(DMA_TX_READY, &chip->flags);
        } else {
@@ -393,7 +393,7 @@ static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream)
        if (cpu_is_at32ap7000()) {
                if (!test_bit(DMA_TX_READY, &chip->flags))
                        retval = atmel_ac97c_prepare_dma(chip, substream,
-                                       DMA_TO_DEVICE);
+                                       DMA_MEM_TO_DEV);
        } else {
                /* Initialize and start the PDC */
                writel(runtime->dma_addr, chip->regs + ATMEL_PDC_TPR);
@@ -484,7 +484,7 @@ static int atmel_ac97c_capture_prepare(struct snd_pcm_substream *substream)
        if (cpu_is_at32ap7000()) {
                if (!test_bit(DMA_RX_READY, &chip->flags))
                        retval = atmel_ac97c_prepare_dma(chip, substream,
-                                       DMA_FROM_DEVICE);
+                                       DMA_DEV_TO_MEM);
        } else {
                /* Initialize and start the PDC */
                writel(runtime->dma_addr, chip->regs + ATMEL_PDC_RPR);
index ad409381f8cca21370f4802b1a5044af2f946b71..b413ed05e74deae78fbd97809c99f0fd68f1ee46 100644 (file)
@@ -12,6 +12,9 @@ config SND_HWDEP
 config SND_RAWMIDI
        tristate
 
+config SND_COMPRESS_OFFLOAD
+       tristate
+
 # To be effective this also requires INPUT - users should say:
 #    select SND_JACK if INPUT=y || INPUT=SND
 # to avoid having to force INPUT on.
@@ -154,16 +157,6 @@ config SND_DYNAMIC_MINORS
 
          If you are unsure about this, say N here.
 
-config SND_COMPRESS_OFFLOAD
-       tristate "ALSA Compressed audio offload support"
-       default n
-       help
-         If you want support for offloading compressed audio and have such
-         a hardware, then you should say Y here and also to the DSP driver
-         of your platform.
-
-         If you are unsure about this, say N here.
-
 config SND_SUPPORT_OLD_API
        bool "Support old ALSA API"
        default y
index 762bb108c51c139b71e93cf8d627ebae858a25d2..f13ad536b2d59fe19ab9826b5abc0c301d5133cf 100644 (file)
@@ -268,8 +268,14 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
                card->shortname, chip->io, chip->irq);
 
        // (4) Alloc components.
+       err = snd_vortex_mixer(chip);
+       if (err < 0) {
+               snd_card_free(card);
+               return err;
+       }
        // ADB pcm.
-       if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_ADB, NR_ADB)) < 0) {
+       err = snd_vortex_new_pcm(chip, VORTEX_PCM_ADB, NR_PCM);
+       if (err < 0) {
                snd_card_free(card);
                return err;
        }
@@ -299,11 +305,6 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
                return err;
        }
 #endif
-       // snd_ac97_mixer and Vortex mixer.
-       if ((err = snd_vortex_mixer(chip)) < 0) {
-               snd_card_free(card);
-               return err;
-       }
        if ((err = snd_vortex_midi(chip)) < 0) {
                snd_card_free(card);
                return err;
index 02f6e08f7592e8bcd6c539598f397ebbf88856aa..bb938153a964811b41f471f064fbd293fb660e77 100644 (file)
 #define MIX_SPDIF(x) (vortex->mixspdif[x])
 
 #define NR_WTPB 0x20           /* WT channels per each bank. */
+#define NR_PCM 0x10
 
 /* Structs */
 typedef struct {
index 0488633ea87474c608591601201030a65a1c91f5..0ef2f97122080f206699a01e32b7f1f1ce7b6825 100644 (file)
@@ -168,6 +168,7 @@ static int snd_vortex_pcm_open(struct snd_pcm_substream *substream)
                        runtime->hw = snd_vortex_playback_hw_adb;
 #ifdef CHIP_AU8830
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+                       VORTEX_IS_QUAD(vortex) &&
                        VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB) {
                        runtime->hw.channels_max = 4;
                        snd_pcm_hw_constraint_list(runtime, 0,
index 0852e204a4c8439e557bd553dcde3aae1f7c173a..fb35474c1203658fb137b650da9dec0b3b1876ad 100644 (file)
@@ -2498,6 +2498,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
        SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
+       SND_PCI_QUIRK(0x10de, 0xcb89, "Macbook Pro 7,1", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
index 87e684fa830f83df49bfec3aa5e894d8c97bfa07..3556408d6ece45236ff5b13217f5f7d29905c6cc 100644 (file)
@@ -1596,7 +1596,7 @@ static const struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
                                "Dell Studio 1557", STAC_DELL_M6_DMIC),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
-                               "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
+                               "Dell Studio XPS 1645", STAC_DELL_M6_DMIC),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
                                "Dell Studio 1558", STAC_DELL_M6_DMIC),
        {} /* terminator */
index 478303e6c2b0801c16c2ae52ee4eff0996fac472..63cff90706bf1749b9b7154f1c6ec81c903f68fd 100644 (file)
@@ -177,6 +177,7 @@ static void wm8776_registers_init(struct oxygen *chip)
        struct xonar_wm87x6 *data = chip->model_data;
 
        wm8776_write(chip, WM8776_RESET, 0);
+       wm8776_write(chip, WM8776_PHASESWAP, WM8776_PH_MASK);
        wm8776_write(chip, WM8776_DACCTRL1, WM8776_DZCEN |
                     WM8776_PL_LEFT_LEFT | WM8776_PL_RIGHT_RIGHT);
        wm8776_write(chip, WM8776_DACMUTE, chip->dac_mute ? WM8776_DMUTE : 0);
index d7bd91831611cb3a7a45395a76a82f34b10177df..f8863ebb4304628ed119b97bfe732c5825430714 100644 (file)
@@ -1457,5 +1457,5 @@ static void __exit sgtl5000_exit(void)
 module_exit(sgtl5000_exit);
 
 MODULE_DESCRIPTION("Freescale SGTL5000 ALSA SoC Codec Driver");
-MODULE_AUTHOR("Zeng Zhaoming <zhaoming.zeng@freescale.com>");
+MODULE_AUTHOR("Zeng Zhaoming <zengzm.kernel@gmail.com>");
 MODULE_LICENSE("GPL");
index 2b40c93601ed493a11bf0222dd5ba0c09754355b..7c7fd925db8da78c1ad6bb08bdde66eb60d7210b 100644 (file)
@@ -444,6 +444,12 @@ static int _wm8993_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
        /* Enable the FLL */
        snd_soc_write(codec, WM8993_FLL_CONTROL_1, reg1 | WM8993_FLL_ENA);
 
+       /* Both overestimates */
+       if (Fref < 1000000)
+               msleep(3);
+       else
+               msleep(1);
+
        dev_dbg(codec->dev, "FLL enabled at %dHz->%dHz\n", Fref, Fout);
 
        wm8993->fll_fref = Fref;
index 3fc96130d1a6b0ccb76470e979eb34896682e965..de83904498730dbf43447cdcc4ef42cd34a9d0d7 100644 (file)
@@ -113,9 +113,9 @@ static int ep93xx_pcm_open(struct snd_pcm_substream *substream)
        rtd->dma_data.name = dma_params->name;
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               rtd->dma_data.direction = DMA_TO_DEVICE;
+               rtd->dma_data.direction = DMA_MEM_TO_DEV;
        else
-               rtd->dma_data.direction = DMA_FROM_DEVICE;
+               rtd->dma_data.direction = DMA_DEV_TO_MEM;
 
        rtd->dma_chan = dma_request_channel(mask, ep93xx_pcm_dma_filter,
                                            &rtd->dma_data);
index 1cf2fe889f6adaa885c77bf2c74da9dd9f56a3d8..5780c9b9d569cf78f82edb63104322c896b7c234 100644 (file)
@@ -88,11 +88,13 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream,
        iprtd->dma_data.dma_request = dma_params->dma;
 
        /* Try to grab a DMA channel */
-       dma_cap_zero(mask);
-       dma_cap_set(DMA_SLAVE, mask);
-       iprtd->dma_chan = dma_request_channel(mask, filter, iprtd);
-       if (!iprtd->dma_chan)
-               return -EINVAL;
+       if (!iprtd->dma_chan) {
+               dma_cap_zero(mask);
+               dma_cap_set(DMA_SLAVE, mask);
+               iprtd->dma_chan = dma_request_channel(mask, filter, iprtd);
+               if (!iprtd->dma_chan)
+                       return -EINVAL;
+       }
 
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
@@ -107,12 +109,12 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream,
        }
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               slave_config.direction = DMA_TO_DEVICE;
+               slave_config.direction = DMA_MEM_TO_DEV;
                slave_config.dst_addr = dma_params->dma_addr;
                slave_config.dst_addr_width = buswidth;
                slave_config.dst_maxburst = dma_params->burstsize;
        } else {
-               slave_config.direction = DMA_FROM_DEVICE;
+               slave_config.direction = DMA_DEV_TO_MEM;
                slave_config.src_addr = dma_params->dma_addr;
                slave_config.src_addr_width = buswidth;
                slave_config.src_maxburst = dma_params->burstsize;
@@ -159,7 +161,7 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
                        iprtd->period_bytes * iprtd->periods,
                        iprtd->period_bytes,
                        substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
-                       DMA_TO_DEVICE : DMA_FROM_DEVICE);
+                       DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
        if (!iprtd->desc) {
                dev_err(&chan->dev->device, "cannot prepare slave dma\n");
                return -EINVAL;
index 0e12f4e0a76d60ac10dab9b70ad4e12f80e0d0c7..105f42a394df6460e80899f22c567f3ac6192c7f 100644 (file)
@@ -136,7 +136,7 @@ static int snd_mxs_pcm_hw_params(struct snd_pcm_substream *substream,
                        iprtd->period_bytes * iprtd->periods,
                        iprtd->period_bytes,
                        substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
-                       DMA_TO_DEVICE : DMA_FROM_DEVICE);
+                       DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
        if (!iprtd->desc) {
                dev_err(&chan->dev->device, "cannot prepare slave dma\n");
                return -EINVAL;
index 427ae0d9817bb95cb09fbab000d6a2d0a35d32c8..e4ba17ce6b32cf3c8d3d3a0762d70c7b7c969b3c 100644 (file)
@@ -86,7 +86,7 @@ static void dma_enqueue(struct snd_pcm_substream *substream)
        dma_info.cap = (samsung_dma_has_circular() ? DMA_CYCLIC : DMA_SLAVE);
        dma_info.direction =
                (substream->stream == SNDRV_PCM_STREAM_PLAYBACK
-               ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+               ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
        dma_info.fp = audio_buffdone;
        dma_info.fp_param = substream;
        dma_info.period = prtd->dma_period;
@@ -171,7 +171,7 @@ static int dma_hw_params(struct snd_pcm_substream *substream,
                dma_info.client = prtd->params->client;
                dma_info.direction =
                        (substream->stream == SNDRV_PCM_STREAM_PLAYBACK
-                       ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+                       ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
                dma_info.width = prtd->params->dma_size;
                dma_info.fifo = prtd->params->dma_addr;
                prtd->params->ch = prtd->params->ops->request(
index f8f681690a712d6c978ec607196fdd956c6fca2a..0193e595d415da8b82e7bf383b243dbdba25f88e 100644 (file)
@@ -131,7 +131,7 @@ static int siu_pcm_wr_set(struct siu_port *port_info,
        sg_dma_address(&sg) = buff;
 
        desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan,
-               &sg, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               &sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dev_err(dev, "Failed to allocate a dma descriptor\n");
                return -ENOMEM;
@@ -181,7 +181,7 @@ static int siu_pcm_rd_set(struct siu_port *port_info,
        sg_dma_address(&sg) = buff;
 
        desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan,
-               &sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               &sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dev_err(dev, "Failed to allocate dma descriptor\n");
                return -ENOMEM;
index 3986520b4677244b9bd592b85f2350889bda5cdf..b5ecf6d2321446198551c6d2cc6d7728c81be07a 100644 (file)
@@ -907,6 +907,10 @@ static void soc_remove_dai_link(struct snd_soc_card *card, int num, int order)
                        if (err < 0)
                                printk(KERN_ERR "asoc: failed to remove %s\n", platform->name);
                }
+
+               /* Make sure all DAPM widgets are freed */
+               snd_soc_dapm_free(&platform->dapm);
+
                platform->probed = 0;
                list_del(&platform->card_list);
                module_put(platform->dev->driver->owner);
index 3ad1f59b80281cfc7c9ace89ae8a30d34317fe7a..1f55ded4047f03b9a538af971c01018f0fb5df10 100644 (file)
@@ -1426,7 +1426,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
                        dapm->target_bias_level = SND_SOC_BIAS_ON;
                        break;
                case SND_SOC_DAPM_STREAM_STOP:
-                       if (dapm->codec->active)
+                       if (dapm->codec && dapm->codec->active)
                                dapm->target_bias_level = SND_SOC_BIAS_ON;
                        else
                                dapm->target_bias_level = SND_SOC_BIAS_STANDBY;
index 93931def0dce62b3a5e162d7c759ac82f33745f2..21554611557c380ca391a9c384407a9744a056a1 100644 (file)
@@ -134,7 +134,7 @@ txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr)
        sg_dma_address(&sg) = buf_dma_addr;
        desc = chan->device->device_prep_slave_sg(chan, &sg, 1,
                dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
-               DMA_TO_DEVICE : DMA_FROM_DEVICE,
+               DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dev_err(&chan->dev->device, "cannot prepare slave dma\n");