]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 13 Aug 2018 18:21:34 +0000 (11:21 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 13 Aug 2018 18:21:34 +0000 (11:21 -0700)
Pull scheduler fix from Thomas Gleixner:
 "A single bugfix to prevent a pinned thread which queues stomp machine
  work to be preempted by the stopper thread on its CPU which causes a
  live lock as it is unable to wake the second CPUs stopper thread"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  stop_machine: Atomically queue and wake stopper threads

197 files changed:
Documentation/RCU/Design/Data-Structures/Data-Structures.html
Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-cleanup.svg
Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-1.svg
Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp-init-3.svg
Documentation/RCU/Design/Memory-Ordering/TreeRCU-gp.svg
Documentation/RCU/Design/Memory-Ordering/TreeRCU-qs.svg
Documentation/RCU/stallwarn.txt
Documentation/RCU/whatisRCU.txt
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/interrupt-controller/ingenic,intc.txt
Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/include/asm/efi.h
arch/arm/include/asm/irq.h
arch/arm/include/asm/mach/arch.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/irq.c
arch/arm/kernel/setup.c
arch/arm64/Kconfig
arch/arm64/crypto/aes-ce-ccm-core.S
arch/arm64/crypto/ghash-ce-core.S
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/irq.h
arch/arm64/kernel/irq.c
arch/m68k/Kconfig
arch/m68k/apollo/config.c
arch/m68k/atari/config.c
arch/m68k/atari/time.c
arch/m68k/bvme6000/config.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/Kbuild
arch/m68k/include/asm/bitops.h
arch/m68k/include/asm/dma-mapping.h [deleted file]
arch/m68k/include/asm/io.h
arch/m68k/include/asm/io_mm.h
arch/m68k/include/asm/io_no.h
arch/m68k/include/asm/kmap.h
arch/m68k/include/asm/machdep.h
arch/m68k/include/asm/macintosh.h
arch/m68k/include/asm/page_no.h
arch/m68k/kernel/dma.c
arch/m68k/kernel/setup_mm.c
arch/m68k/kernel/setup_no.c
arch/m68k/mac/config.c
arch/m68k/mac/misc.c
arch/m68k/mm/init.c
arch/m68k/mm/mcfmmu.c
arch/m68k/mm/motorola.c
arch/m68k/mvme147/config.c
arch/m68k/mvme16x/config.c
arch/m68k/q40/config.c
arch/m68k/sun3/config.c
arch/openrisc/Kconfig
arch/openrisc/include/asm/irq.h
arch/openrisc/kernel/irq.c
arch/parisc/Kconfig
arch/parisc/include/asm/barrier.h [new file with mode: 0644]
arch/parisc/kernel/entry.S
arch/parisc/kernel/pacache.S
arch/parisc/kernel/syscall.S
arch/s390/Kconfig
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/eboot.h
arch/x86/crypto/aegis128-aesni-glue.c
arch/x86/crypto/aegis128l-aesni-glue.c
arch/x86/crypto/aegis256-aesni-glue.c
arch/x86/crypto/morus1280-avx2-glue.c
arch/x86/crypto/morus1280-sse2-glue.c
arch/x86/crypto/morus640-sse2-glue.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/quirks.c
drivers/block/zram/zram_drv.c
drivers/firmware/efi/Kconfig
drivers/firmware/efi/cper.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/esrt.c
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/runtime-wrappers.c
drivers/gpio/gpiolib-acpi.c
drivers/i2c/busses/i2c-xlp9xx.c
drivers/input/keyboard/hilkbd.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
drivers/irqchip/irq-gic-v3-its-pci-msi.c
drivers/irqchip/irq-gic-v3-its-platform-msi.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-ingenic.c
drivers/irqchip/irq-stm32-exti.c
drivers/net/ethernet/8390/mac8390.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/xen-netfront.c
drivers/nubus/bus.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/sr.c
drivers/scsi/vmw_pvscsi.c
drivers/vhost/vhost.c
drivers/video/fbdev/efifb.c
fs/dcache.c
fs/efivarfs/inode.c
fs/namespace.c
include/linux/cpu.h
include/linux/efi.h
include/linux/irqchip/arm-gic-v3.h
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/srcu.h
include/linux/torture.h
include/net/af_vsock.h
include/net/llc.h
include/trace/events/rcu.h
init/main.c
kernel/bpf/cpumap.c
kernel/bpf/devmap.c
kernel/bpf/sockmap.c
kernel/cpu.c
kernel/irq/Kconfig
kernel/irq/irqdesc.c
kernel/irq/manage.c
kernel/irq/proc.c
kernel/locking/locktorture.c
kernel/rcu/rcu.h
kernel/rcu/rcuperf.c
kernel/rcu/rcutorture.c
kernel/rcu/srcutree.c
kernel/rcu/tiny.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h
kernel/rcu/update.c
kernel/torture.c
lib/Kconfig.ubsan
lib/debugobjects.c
mm/memory.c
net/dccp/ccids/ccid2.c
net/dsa/slave.c
net/ipv6/ip6_tunnel.c
net/ipv6/route.c
net/llc/llc_core.c
net/packet/af_packet.c
net/rxrpc/ar-internal.h
net/rxrpc/conn_event.c
net/rxrpc/net_ns.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/rxrpc/rxkad.c
net/smc/af_smc.c
net/tipc/net.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/vmci_transport.c
samples/bpf/xdp_redirect_cpu_kern.c
samples/bpf/xdp_redirect_cpu_user.c
scripts/Makefile.ubsan
tools/lib/bpf/btf.c
tools/lib/bpf/btf.h
tools/testing/selftests/bpf/test_sockmap.c
tools/testing/selftests/rcutorture/bin/configinit.sh
tools/testing/selftests/rcutorture/bin/kvm-build.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/bin/parse-console.sh
tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot [deleted file]
tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh

index 6c06e10bd04bd107380e391cee17c5f50a366633..f5120a00f5116bc72fda540b427d4d774069a13b 100644 (file)
@@ -380,31 +380,26 @@ and therefore need no protection.
 as follows:
 
 <pre>
-  1   unsigned long gpnum;
-  2   unsigned long completed;
+  1   unsigned long gp_seq;
 </pre>
 
 <p>RCU grace periods are numbered, and
-the <tt>-&gt;gpnum</tt> field contains the number of the grace
-period that started most recently.
-The <tt>-&gt;completed</tt> field contains the number of the
-grace period that completed most recently.
-If the two fields are equal, the RCU grace period that most recently
-started has already completed, and therefore the corresponding
-flavor of RCU is idle.
-If <tt>-&gt;gpnum</tt> is one greater than <tt>-&gt;completed</tt>,
-then <tt>-&gt;gpnum</tt> gives the number of the current RCU
-grace period, which has not yet completed.
-Any other combination of values indicates that something is broken.
-These two fields are protected by the root <tt>rcu_node</tt>'s
+the <tt>-&gt;gp_seq</tt> field contains the current grace-period
+sequence number.
+The bottom two bits are the state of the current grace period,
+which can be zero for not yet started or one for in progress.
+In other words, if the bottom two bits of <tt>-&gt;gp_seq</tt> are
+zero, the corresponding flavor of RCU is idle.
+Any other value in the bottom two bits indicates that something is broken.
+This field is protected by the root <tt>rcu_node</tt> structure's
 <tt>-&gt;lock</tt> field.
 
-</p><p>There are <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt> fields
+</p><p>There are <tt>-&gt;gp_seq</tt> fields
 in the <tt>rcu_node</tt> and <tt>rcu_data</tt> structures
 as well.
 The fields in the <tt>rcu_state</tt> structure represent the
-most current values, and those of the other structures are compared
-in order to detect the start of a new grace period in a distributed
+most current value, and those of the other structures are compared
+in order to detect the beginnings and ends of grace periods in a distributed
 fashion.
 The values flow from <tt>rcu_state</tt> to <tt>rcu_node</tt>
 (down the tree from the root to the leaves) to <tt>rcu_data</tt>.
@@ -512,27 +507,47 @@ than to be heisenbugged out of existence.
 as follows:
 
 <pre>
-  1   unsigned long gpnum;
-  2   unsigned long completed;
+  1   unsigned long gp_seq;
+  2   unsigned long gp_seq_needed;
 </pre>
 
-<p>These fields are the counterparts of the fields of the same name in
-the <tt>rcu_state</tt> structure.
-They each may lag up to one behind their <tt>rcu_state</tt>
-counterparts.
-If a given <tt>rcu_node</tt> structure's <tt>-&gt;gpnum</tt> and
-<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_node</tt>
+<p>The <tt>rcu_node</tt> structures' <tt>-&gt;gp_seq</tt> fields are
+the counterparts of the field of the same name in the <tt>rcu_state</tt>
+structure.
+They each may lag up to one step behind their <tt>rcu_state</tt>
+counterpart.
+If the bottom two bits of a given <tt>rcu_node</tt> structure's
+<tt>-&gt;gp_seq</tt> field is zero, then this <tt>rcu_node</tt>
 structure believes that RCU is idle.
-Otherwise, as with the <tt>rcu_state</tt> structure,
-the <tt>-&gt;gpnum</tt> field will be one greater than the
-<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
-indicating which grace period this <tt>rcu_node</tt> believes
-is still being waited for.
+</p><p>The <tt>&gt;gp_seq</tt> field of each <tt>rcu_node</tt>
+structure is updated at the beginning and the end
+of each grace period.
+
+<p>The <tt>-&gt;gp_seq_needed</tt> fields record the
+furthest-in-the-future grace period request seen by the corresponding
+<tt>rcu_node</tt> structure.  The request is considered fulfilled when
+the value of the <tt>-&gt;gp_seq</tt> field equals or exceeds that of
+the <tt>-&gt;gp_seq_needed</tt> field.
 
-</p><p>The <tt>&gt;gpnum</tt> field of each <tt>rcu_node</tt>
-structure is updated at the beginning
-of each grace period, and the <tt>-&gt;completed</tt> fields are
-updated at the end of each grace period.
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Suppose that this <tt>rcu_node</tt> structure doesn't see
+       a request for a very long time.
+       Won't wrapping of the <tt>-&gt;gp_seq</tt> field cause
+       problems?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       No, because if the <tt>-&gt;gp_seq_needed</tt> field lags behind the
+       <tt>-&gt;gp_seq</tt> field, the <tt>-&gt;gp_seq_needed</tt> field
+       will be updated at the end of the grace period.
+       Modulo-arithmetic comparisons therefore will always get the
+       correct answer, even with wrapping.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <h5>Quiescent-State Tracking</h5>
 
@@ -626,9 +641,8 @@ normal and expedited grace periods, respectively.
        </ol>
 
        <p><font color="ffffff">So the locking is absolutely required in
-       order to coordinate
-       clearing of the bits with the grace-period numbers in
-       <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt>.
+       order to coordinate clearing of the bits with updating of the
+       grace-period sequence number in <tt>-&gt;gp_seq</tt>.
 </font></td></tr>
 <tr><td>&nbsp;</td></tr>
 </table>
@@ -1038,15 +1052,15 @@ out any <tt>rcu_data</tt> structure for which this flag is not set.
 as follows:
 
 <pre>
-  1   unsigned long completed;
-  2   unsigned long gpnum;
+  1   unsigned long gp_seq;
+  2   unsigned long gp_seq_needed;
   3   bool cpu_no_qs;
   4   bool core_needs_qs;
   5   bool gpwrap;
   6   unsigned long rcu_qs_ctr_snap;
 </pre>
 
-<p>The <tt>completed</tt> and <tt>gpnum</tt>
+<p>The <tt>-&gt;gp_seq</tt> and <tt>-&gt;gp_seq_needed</tt>
 fields are the counterparts of the fields of the same name
 in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures.
 They may each lag up to one behind their <tt>rcu_node</tt>
@@ -1054,15 +1068,9 @@ counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
 <tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
 arbitrarily far behind for CPUs in dyntick-idle mode (but these counters
 will catch up upon exit from dyntick-idle mode).
-If a given <tt>rcu_data</tt> structure's <tt>-&gt;gpnum</tt> and
-<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_data</tt>
+If the lower two bits of a given <tt>rcu_data</tt> structure's
+<tt>-&gt;gp_seq</tt> are zero, then this <tt>rcu_data</tt>
 structure believes that RCU is idle.
-Otherwise, as with the <tt>rcu_state</tt> and <tt>rcu_node</tt>
-structure,
-the <tt>-&gt;gpnum</tt> field will be one greater than the
-<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
-indicating which grace period this <tt>rcu_data</tt> believes
-is still being waited for.
 
 <table>
 <tr><th>&nbsp;</th></tr>
@@ -1070,13 +1078,13 @@ is still being waited for.
 <tr><td>
        All this replication of the grace period numbers can only cause
        massive confusion.
-       Why not just keep a global pair of counters and be done with it???
+       Why not just keep a global sequence number and be done with it???
 </td></tr>
 <tr><th align="left">Answer:</th></tr>
 <tr><td bgcolor="#ffffff"><font color="ffffff">
-       Because if there was only a single global pair of grace-period
+       Because if there was only a single global sequence
        numbers, there would need to be a single global lock to allow
-       safely accessing and updating them.
+       safely accessing and updating it.
        And if we are not going to have a single global lock, we need
        to carefully manage the numbers on a per-node basis.
        Recall from the answer to a previous Quick Quiz that the consequences
@@ -1091,8 +1099,8 @@ CPU has not yet passed through a quiescent state,
 while the <tt>-&gt;core_needs_qs</tt> flag indicates that the
 RCU core needs a quiescent state from the corresponding CPU.
 The <tt>-&gt;gpwrap</tt> field indicates that the corresponding
-CPU has remained idle for so long that the <tt>completed</tt>
-and <tt>gpnum</tt> counters are in danger of overflow, which
+CPU has remained idle for so long that the
+<tt>gp_seq</tt> counter is in danger of overflow, which
 will cause the CPU to disregard the values of its counters on
 its next exit from idle.
 Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
@@ -1130,10 +1138,10 @@ The CPU advances the callbacks in its <tt>rcu_data</tt> structure
 whenever it notices that another RCU grace period has completed.
 The CPU detects the completion of an RCU grace period by noticing
 that the value of its <tt>rcu_data</tt> structure's
-<tt>-&gt;completed</tt> field differs from that of its leaf
+<tt>-&gt;gp_seq</tt> field differs from that of its leaf
 <tt>rcu_node</tt> structure.
 Recall that each <tt>rcu_node</tt> structure's
-<tt>-&gt;completed</tt> field is updated at the end of each
+<tt>-&gt;gp_seq</tt> field is updated at the beginnings and ends of each
 grace period.
 
 <p>
index 8651b0b4fd79f37caef5572037656d1eb2a43cda..a346ce0116eb5ebc04733e85f074cfd00240c886 100644 (file)
@@ -357,7 +357,7 @@ parts, starting in this section with the various phases of
 grace-period initialization.
 
 <p>The first ordering-related grace-period initialization action is to
-increment the <tt>rcu_state</tt> structure's <tt>-&gt;gpnum</tt>
+advance the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt>
 grace-period-number counter, as shown below:
 
 </p><p><img src="TreeRCU-gp-init-1.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -388,7 +388,7 @@ its last CPU and if the next <tt>rcu_node</tt> structure has no online CPUs).
 
 <p>The final <tt>rcu_gp_init()</tt> pass through the <tt>rcu_node</tt>
 tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
-<tt>-&gt;gpnum</tt> field to the newly incremented value from the
+<tt>-&gt;gp_seq</tt> field to the newly advanced value from the
 <tt>rcu_state</tt> structure, as shown in the following diagram.
 
 </p><p><img src="TreeRCU-gp-init-3.svg" alt="TreeRCU-gp-init-1.svg" width="75%">
@@ -398,9 +398,9 @@ tree traverses breadth-first, setting each <tt>rcu_node</tt> structure's
 to notice that a new grace period has started, as described in the next
 section.
 But because the grace-period kthread started the grace period at the
-root (with the increment of the <tt>rcu_state</tt> structure's
-<tt>-&gt;gpnum</tt> field) before setting each leaf <tt>rcu_node</tt>
-structure's <tt>-&gt;gpnum</tt> field, each CPU's observation of
+root (with the advancing of the <tt>rcu_state</tt> structure's
+<tt>-&gt;gp_seq</tt> field) before setting each leaf <tt>rcu_node</tt>
+structure's <tt>-&gt;gp_seq</tt> field, each CPU's observation of
 the start of the grace period will happen after the actual start
 of the grace period.
 
@@ -466,7 +466,7 @@ section that the grace period must wait on.
 <tr><td>
        But a RCU read-side critical section might have started
        after the beginning of the grace period
-       (the <tt>-&gt;gpnum++</tt> from earlier), so why should
+       (the advancing of <tt>-&gt;gp_seq</tt> from earlier), so why should
        the grace period wait on such a critical section?
 </td></tr>
 <tr><th align="left">Answer:</th></tr>
@@ -609,10 +609,8 @@ states outstanding from other CPUs.
 <h4><a name="Grace-Period Cleanup">Grace-Period Cleanup</a></h4>
 
 <p>Grace-period cleanup first scans the <tt>rcu_node</tt> tree
-breadth-first setting all the <tt>-&gt;completed</tt> fields equal
-to the number of the newly completed grace period, then it sets
-the <tt>rcu_state</tt> structure's <tt>-&gt;completed</tt> field,
-again to the number of the newly completed grace period.
+breadth-first advancing all the <tt>-&gt;gp_seq</tt> fields, then it
+advances the <tt>rcu_state</tt> structure's <tt>-&gt;gp_seq</tt> field.
 The ordering effects are shown below:
 
 </p><p><img src="TreeRCU-gp-cleanup.svg" alt="TreeRCU-gp-cleanup.svg" width="75%">
@@ -634,7 +632,7 @@ grace-period cleanup is complete, the next grace period can begin.
        CPU has reported its quiescent state, but it may be some
        milliseconds before RCU becomes aware of this.
        The latest reasonable candidate is once the <tt>rcu_state</tt>
-       structure's <tt>-&gt;completed</tt> field has been updated,
+       structure's <tt>-&gt;gp_seq</tt> field has been updated,
        but it is quite possible that some CPUs have already completed
        phase two of their updates by that time.
        In short, if you are going to work with RCU, you need to
@@ -647,7 +645,7 @@ grace-period cleanup is complete, the next grace period can begin.
 <h4><a name="Callback Invocation">Callback Invocation</a></h4>
 
 <p>Once a given CPU's leaf <tt>rcu_node</tt> structure's
-<tt>-&gt;completed</tt> field has been updated, that CPU can begin
+<tt>-&gt;gp_seq</tt> field has been updated, that CPU can begin
 invoking its RCU callbacks that were waiting for this grace period
 to end.
 These callbacks are identified by <tt>rcu_advance_cbs()</tt>,
index 754f426b297ab8a4611641d1c326871ce80899d9..bf84fbab27eece0856b030d34f421d608d460bce 100644 (file)
      inkscape:window-height="1144"
      id="namedview208"
      showgrid="true"
-     inkscape:zoom="0.70710678"
-     inkscape:cx="617.89017"
-     inkscape:cy="542.52419"
-     inkscape:window-x="86"
-     inkscape:window-y="28"
+     inkscape:zoom="0.78716603"
+     inkscape:cx="513.06403"
+     inkscape:cy="623.1214"
+     inkscape:window-x="102"
+     inkscape:window-y="38"
      inkscape:window-maximized="0"
      inkscape:current-layer="g3188-3"
      fit-margin-top="5"
      id="g3188">
     <text
        xml:space="preserve"
-       x="3199.1516"
+       x="3145.9592"
        y="13255.592"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3143">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     <g
        id="g3107"
        transform="translate(947.90548,11584.029)">
     </g>
     <text
        xml:space="preserve"
-       x="5324.5371"
-       y="15414.598"
+       x="5264.4731"
+       y="15428.84"
        font-style="normal"
        font-weight="bold"
        font-size="192"
-       id="text202-753"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       id="text202-36-7"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <g
      style="fill:none;stroke-width:0.025in"
        sodipodi:linespacing="125%"><tspan
          style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
          id="tspan3104-6-5-6-0">Leaf</tspan></text>
-    <text
-       xml:space="preserve"
-       x="7479.5796"
-       y="17699.943"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-9"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
     <path
        sodipodi:nodetypes="cc"
        inkscape:connector-curvature="0"
        style="fill:none;stroke-width:0.025in"
        transform="translate(-737.93887,7732.6672)"
        id="g3188-3">
-      <text
-         xml:space="preserve"
-         x="3225.7478"
-         y="13175.802"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-60"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;completed =</text>
       <g
          id="g3107-62"
          transform="translate(947.90548,11584.029)">
          sodipodi:linespacing="125%"><tspan
            style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
            id="tspan3104-6-5-7">Root</tspan></text>
-      <text
-         xml:space="preserve"
-         x="3225.7478"
-         y="13390.038"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-60-3"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">       rnp-&gt;completed</text>
       <flowRoot
          xml:space="preserve"
          id="flowRoot3356"
              height="63.63961"
              x="332.34018"
              y="681.87292" /></flowRegion><flowPara
-           id="flowPara3362" /></flowRoot>    </g>
+           id="flowPara3362" /></flowRoot>      <text
+         xml:space="preserve"
+         x="3156.6121"
+         y="13317.754"
+         font-style="normal"
+         font-weight="bold"
+         font-size="192"
+         id="text202-36-6"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
+    </g>
     <g
        style="fill:none;stroke-width:0.025in"
        transform="translate(-858.40227,7769.0342)"
        id="path3414-8-3-6-6"
        inkscape:connector-curvature="0"
        sodipodi:nodetypes="cc" />
+    <text
+       xml:space="preserve"
+       x="7418.769"
+       y="17646.104"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-36-70"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-93">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <g
      transform="translate(-1642.5377,-11611.245)"
     </g>
     <text
        xml:space="preserve"
-       x="5327.3057"
+       x="5274.1133"
        y="15428.84"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-36"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <g
      transform="translate(-151.71746,-11647.612)"
          id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7486.4907"
-       y="17670.119"
+       x="7408.5918"
+       y="17619.504"
        font-style="normal"
        font-weight="bold"
        font-size="192"
-       id="text202-6"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       id="text202-36-2"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <g
      transform="translate(-6817.1997,-11647.612)"
          id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7474.1382"
-       y="17688.926"
+       x="7416.8003"
+       y="17619.504"
        font-style="normal"
        font-weight="bold"
        font-size="192"
-       id="text202-5"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+       id="text202-36-3"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-56">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
   </g>
   <path
      style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
      id="path3414-8-3-6"
      inkscape:connector-curvature="0"
      sodipodi:nodetypes="cc" />
-  <text
-     xml:space="preserve"
-     x="7318.9653"
-     y="6031.6353"
-     font-style="normal"
-     font-weight="bold"
-     font-size="192"
-     id="text202-2"
-     style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
   <g
      style="fill:none;stroke-width:0.025in"
      id="g4504-3-9"
      id="path3134-9-0-3-5"
      d="m 6875.6003,15833.906 1595.7755,0"
      style="fill:none;stroke:#969696;stroke-width:53.19251633;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send-36)" />
+  <text
+     xml:space="preserve"
+     x="7275.2612"
+     y="5971.8916"
+     font-style="normal"
+     font-weight="bold"
+     font-size="192"
+     id="text202-36-1"
+     style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+       style="font-size:172.87567139px"
+       id="tspan3166-2">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
 </svg>
index 0161262904ece0892946636996e7f17ed0ad1cdb..8c207550818f3f544a4eb2292ffd7a2353946189 100644 (file)
      inkscape:window-height="1144"
      id="namedview208"
      showgrid="true"
-     inkscape:zoom="0.70710678"
-     inkscape:cx="617.89019"
-     inkscape:cy="636.57143"
-     inkscape:window-x="697"
+     inkscape:zoom="2.6330492"
+     inkscape:cx="524.82797"
+     inkscape:cy="519.31194"
+     inkscape:window-x="79"
      inkscape:window-y="28"
      inkscape:window-maximized="0"
-     inkscape:current-layer="svg2"
+     inkscape:current-layer="g3188"
      fit-margin-top="5"
      fit-margin-right="5"
      fit-margin-left="5"
      id="g3188">
     <text
        xml:space="preserve"
-       x="3305.5364"
+       x="3119.363"
        y="13255.592"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
     <g
        id="g3107"
        transform="translate(947.90548,11584.029)">
index de6ecc51b00e0e7073273ce7e2d6a9ef755ab91d..d24d7d555dbce63f0534ec07b7870ea94545f4e1 100644 (file)
@@ -19,7 +19,7 @@
    id="svg2"
    version="1.1"
    inkscape:version="0.48.4 r9939"
-   sodipodi:docname="TreeRCU-gp-init-2.svg">
+   sodipodi:docname="TreeRCU-gp-init-3.svg">
   <metadata
      id="metadata212">
     <rdf:RDF>
      inkscape:window-width="1087"
      inkscape:window-height="1144"
      id="namedview208"
-     showgrid="false"
-     inkscape:zoom="0.70710678"
+     showgrid="true"
+     inkscape:zoom="0.68224756"
      inkscape:cx="617.89019"
      inkscape:cy="625.84293"
-     inkscape:window-x="697"
+     inkscape:window-x="54"
      inkscape:window-y="28"
      inkscape:window-maximized="0"
-     inkscape:current-layer="svg2"
+     inkscape:current-layer="g3153"
      fit-margin-top="5"
      fit-margin-right="5"
      fit-margin-left="5"
-     fit-margin-bottom="5" />
+     fit-margin-bottom="5">
+    <inkscape:grid
+       type="xygrid"
+       id="grid3090" />
+  </sodipodi:namedview>
   <path
      sodipodi:nodetypes="cccccccccccccccccccccccc"
      inkscape:connector-curvature="0"
      id="g3188">
     <text
        xml:space="preserve"
-       x="3305.5364"
+       x="3145.9592"
        y="13255.592"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     <g
        id="g3107"
        transform="translate(947.90548,11584.029)">
     </g>
     <text
        xml:space="preserve"
-       x="5392.3345"
-       y="15407.104"
+       x="5253.6904"
+       y="15407.032"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-6"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <g
      style="fill:none;stroke-width:0.025in"
          id="tspan3104-6-5-6-0">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7536.4883"
-       y="17640.934"
+       x="7415.4365"
+       y="17670.572"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-9"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <g
      transform="translate(-1642.5375,-11610.962)"
     </g>
     <text
        xml:space="preserve"
-       x="5378.4146"
-       y="15436.927"
+       x="5258.0688"
+       y="15412.313"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-3"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <g
      transform="translate(-151.71726,-11647.329)"
          id="tspan3104-6-5-6-0-92">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7520.1294"
-       y="17673.639"
+       x="7405.2607"
+       y="17670.572"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-35"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <g
      transform="translate(-6817.1998,-11647.329)"
          id="tspan3104-6-5-6-0-1">Leaf</tspan></text>
     <text
        xml:space="preserve"
-       x="7521.4663"
-       y="17666.062"
+       x="7413.4688"
+       y="17670.566"
        font-style="normal"
        font-weight="bold"
        font-size="192"
        id="text202-75"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
   <path
      style="fill:none;stroke:#000000;stroke-width:13.29812908px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
      sodipodi:nodetypes="cc" />
   <text
      xml:space="preserve"
-     x="7370.856"
-     y="5997.5972"
+     x="7271.9297"
+     y="6023.2412"
      font-style="normal"
      font-weight="bold"
      font-size="192"
      id="text202-62"
-     style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+     style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
 </svg>
index b13b7b01bb3ab1e7818dcc644a8dfb5b89e11068..acd73c7ad0f4c168bff494fbdf44a03e98a44f8c 100644 (file)
      inkscape:window-height="1144"
      id="namedview208"
      showgrid="true"
-     inkscape:zoom="0.6004608"
-     inkscape:cx="826.65969"
-     inkscape:cy="483.3047"
-     inkscape:window-x="66"
-     inkscape:window-y="28"
+     inkscape:zoom="0.81932583"
+     inkscape:cx="840.45848"
+     inkscape:cy="5052.4242"
+     inkscape:window-x="787"
+     inkscape:window-y="24"
      inkscape:window-maximized="0"
-     inkscape:current-layer="svg2"
+     inkscape:current-layer="g4"
      fit-margin-top="5"
      fit-margin-right="5"
      fit-margin-left="5"
        style="fill:none;stroke-width:0.025in"
        transform="translate(1749.0282,658.72243)"
        id="g3188">
-      <text
-         xml:space="preserve"
-         x="3305.5364"
-         y="13255.592"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-5"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">rsp-&gt;gpnum++</text>
       <g
          id="g3107-62"
          transform="translate(947.90548,11584.029)">
          sodipodi:linespacing="125%"><tspan
            style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
            id="tspan3104-6-5-7">Root</tspan></text>
+      <text
+         xml:space="preserve"
+         x="3137.9988"
+         y="13271.316"
+         font-style="normal"
+         font-weight="bold"
+         font-size="192"
+         id="text202-626"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3071">rcu_seq_start(rsp-&gt;gp_seq)</tspan></text>
     </g>
     <rect
        ry="0"
        style="fill:none;stroke-width:0.025in"
        transform="translate(1739.0986,17188.625)"
        id="g3188-6">
-      <text
-         xml:space="preserve"
-         x="3305.5364"
-         y="13255.592"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-1"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
       <g
          id="g3107-5"
          transform="translate(947.90548,11584.029)">
          sodipodi:linespacing="125%"><tspan
            style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
            id="tspan3104-6-5-1">Root</tspan></text>
+      <text
+         xml:space="preserve"
+         x="3147.9268"
+         y="13240.524"
+         font-style="normal"
+         font-weight="bold"
+         font-size="192"
+         id="text202-1"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        style="fill:none;stroke-width:0.025in"
       </g>
       <text
          xml:space="preserve"
-         x="5392.3345"
-         y="15407.104"
+         x="5263.1094"
+         y="15411.646"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-6-7"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-92"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        style="fill:none;stroke-width:0.025in"
            id="tspan3104-6-5-6-0-94">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7536.4883"
-         y="17640.934"
+         x="7417.4053"
+         y="17655.502"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-9"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-759"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        transform="translate(-2353.8462,17224.992)"
       </g>
       <text
          xml:space="preserve"
-         x="5378.4146"
-         y="15436.927"
+         x="5246.1548"
+         y="15411.648"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-3"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-87"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        transform="translate(-863.02613,17188.625)"
            id="tspan3104-6-5-6-0-92-6">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7520.1294"
-         y="17673.639"
+         x="7433.8257"
+         y="17682.098"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-35"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-2"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <g
        transform="translate(-7528.5085,17188.625)"
            id="tspan3104-6-5-6-0-1-8">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7521.4663"
-         y="17666.062"
+         x="7415.4404"
+         y="17682.098"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-75-1"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
+         id="text202-0"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
     </g>
     <path
        style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
        id="path3414-8-3-6-4"
        inkscape:connector-curvature="0"
        sodipodi:nodetypes="cc" />
-    <text
-       xml:space="preserve"
-       x="6659.5469"
-       y="34833.551"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-62"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gpnum = rsp-&gt;gpnum</text>
     <path
        sodipodi:nodetypes="ccc"
        inkscape:connector-curvature="0"
          font-weight="bold"
          font-size="192"
          id="text202-6-6-5"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
       <text
          xml:space="preserve"
          x="5035.4155"
        style="fill:none;stroke-width:0.025in"
        transform="translate(1874.038,53203.538)"
        id="g3188-7">
-      <text
-         xml:space="preserve"
-         x="3199.1516"
-         y="13255.592"
-         font-style="normal"
-         font-weight="bold"
-         font-size="192"
-         id="text202-82"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
       <g
          id="g3107-53"
          transform="translate(947.90548,11584.029)">
          sodipodi:linespacing="125%"><tspan
            style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
            id="tspan3104-6-5-19">Root</tspan></text>
+      <text
+         xml:space="preserve"
+         x="3175.896"
+         y="13240.11"
+         font-style="normal"
+         font-weight="bold"
+         font-size="192"
+         id="text202-36-3"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <rect
        ry="0"
       </g>
       <text
          xml:space="preserve"
-         x="5324.5371"
-         y="15414.598"
+         x="5264.4829"
+         y="15411.231"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-753"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+         id="text202-36-7"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-5">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <g
        style="fill:none;stroke-width:0.025in"
        sodipodi:linespacing="125%"><tspan
          style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
          id="tspan3104-6-5-6-0-4">Leaf</tspan></text>
-    <text
-       xml:space="preserve"
-       x="10084.225"
-       y="70903.312"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-9-0"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
     <path
        sodipodi:nodetypes="ccc"
        inkscape:connector-curvature="0"
        id="path3134-9-0-3-9"
        d="m 6315.6122,72629.054 -20.9533,8108.684 1648.968,0"
        style="fill:none;stroke:#969696;stroke-width:53.19251251;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-opacity:1;stroke-dasharray:none;marker-end:url(#Arrow1Send)" />
-    <text
-       xml:space="preserve"
-       x="5092.4683"
-       y="74111.672"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-60"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rsp-&gt;completed =</text>
     <g
        style="fill:none;stroke-width:0.025in"
        id="g3107-62-6"
        sodipodi:linespacing="125%"><tspan
          style="font-size:159.57754517px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:start;line-height:125%;writing-mode:lr-tb;text-anchor:start;font-family:Liberation Sans;-inkscape-font-specification:Liberation Sans"
          id="tspan3104-6-5-7-7">Root</tspan></text>
-    <text
-       xml:space="preserve"
-       x="5092.4683"
-       y="74325.906"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-60-3"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">       rnp-&gt;completed</text>
     <g
        style="fill:none;stroke-width:0.025in"
        transform="translate(1746.2528,60972.572)"
       </g>
       <text
          xml:space="preserve"
-         x="5327.3057"
-         y="15428.84"
+         x="5274.1216"
+         y="15411.231"
          font-style="normal"
          font-weight="bold"
          font-size="192"
          id="text202-36"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-6">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <g
        transform="translate(-728.08545,53203.538)"
            id="tspan3104-6-5-6-0-92-5">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7486.4907"
-         y="17670.119"
+         x="7435.1987"
+         y="17708.281"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-6-2"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+         id="text202-36-9"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-1">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <g
        transform="translate(-7393.5687,53203.538)"
            id="tspan3104-6-5-6-0-1-5">Leaf</tspan></text>
       <text
          xml:space="preserve"
-         x="7474.1382"
-         y="17688.926"
+         x="7416.8125"
+         y="17708.281"
          font-style="normal"
          font-weight="bold"
          font-size="192"
-         id="text202-5-1"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
+         id="text202-36-35"
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+           style="font-size:172.87567139px"
+           id="tspan3166-62">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
     </g>
     <path
        style="fill:none;stroke:#000000;stroke-width:13.29812813px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;marker-end:url(#Arrow2Lend)"
        id="path3414-8-3-6-67"
        inkscape:connector-curvature="0"
        sodipodi:nodetypes="cc" />
-    <text
-       xml:space="preserve"
-       x="6742.6001"
-       y="70882.617"
-       font-style="normal"
-       font-weight="bold"
-       font-size="192"
-       id="text202-2"
-       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;completed = -&gt;gpnum</text>
     <g
        style="fill:none;stroke-width:0.025in"
        id="g4504-3-9-6"
        font-size="192"
        id="text202-7-9-6-6-7"
        style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rcu_do_batch()</text>
+    <text
+       xml:space="preserve"
+       x="6698.9019"
+       y="70885.211"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-36-2"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-7">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
+    <text
+       xml:space="preserve"
+       x="10023.457"
+       y="70885.234"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-36-0"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-9">rcu_seq_end(&amp;rnp-&gt;gp_seq)</tspan></text>
+    <text
+       xml:space="preserve"
+       x="5023.3389"
+       y="74209.773"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-36-36"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier"><tspan
+         style="font-size:172.87567139px"
+         id="tspan3166-0">rcu_seq_end(&amp;rsp-&gt;gp_seq)</tspan></text>
+    <text
+       xml:space="preserve"
+       x="6562.5884"
+       y="34870.727"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       id="text202-3"
+       style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">-&gt;gp_seq = rsp-&gt;gp_seq</text>
   </g>
 </svg>
index de3992f4cbe1bfcb3c91667621d435ada38768ac..149bec2a4493d53c69f2e0589cbff4bcc1342486 100644 (file)
      inkscape:window-height="1144"
      id="namedview208"
      showgrid="true"
-     inkscape:zoom="0.70710678"
-     inkscape:cx="616.47598"
-     inkscape:cy="595.41964"
-     inkscape:window-x="813"
+     inkscape:zoom="0.96484375"
+     inkscape:cx="507.0191"
+     inkscape:cy="885.62207"
+     inkscape:window-x="47"
      inkscape:window-y="28"
      inkscape:window-maximized="0"
-     inkscape:current-layer="g4405"
+     inkscape:current-layer="g3115"
      fit-margin-top="5"
      fit-margin-right="5"
      fit-margin-left="5"
          font-weight="bold"
          font-size="192"
          id="text202-6-6"
-         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gpnum</text>
+         style="font-size:192px;font-style:normal;font-weight:bold;text-anchor:start;fill:#000000;stroke-width:0.025in;font-family:Courier">rdp-&gt;gp_seq</text>
       <text
          xml:space="preserve"
          x="5035.4155"
index 4259f95c32616eb04f23037475163e4195f2ddf7..f99cf11b314b2c5667a917460d092c4f83be0a3d 100644 (file)
@@ -172,7 +172,7 @@ it will print a message similar to the following:
        INFO: rcu_sched detected stalls on CPUs/tasks:
        2-...: (3 GPs behind) idle=06c/0/0 softirq=1453/1455 fqs=0
        16-...: (0 ticks this GP) idle=81c/0/0 softirq=764/764 fqs=0
-       (detected by 32, t=2603 jiffies, g=7073, c=7072, q=625)
+       (detected by 32, t=2603 jiffies, g=7075, q=625)
 
 This message indicates that CPU 32 detected that CPUs 2 and 16 were both
 causing stalls, and that the stall was affecting RCU-sched.  This message
@@ -215,11 +215,10 @@ CPU since the last time that this CPU noted the beginning of a grace
 period.
 
 The "detected by" line indicates which CPU detected the stall (in this
-case, CPU 32), how many jiffies have elapsed since the start of the
-grace period (in this case 2603), the number of the last grace period
-to start and to complete (7073 and 7072, respectively), and an estimate
-of the total number of RCU callbacks queued across all CPUs (625 in
-this case).
+case, CPU 32), how many jiffies have elapsed since the start of the grace
+period (in this case 2603), the grace-period sequence number (7075), and
+an estimate of the total number of RCU callbacks queued across all CPUs
+(625 in this case).
 
 In kernels with CONFIG_RCU_FAST_NO_HZ, more information is printed
 for each CPU:
@@ -266,15 +265,16 @@ If the relevant grace-period kthread has been unable to run prior to
 the stall warning, as was the case in the "All QSes seen" line above,
 the following additional line is printed:
 
-       kthread starved for 23807 jiffies! g7073 c7072 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1
+       kthread starved for 23807 jiffies! g7075 f0x0 RCU_GP_WAIT_FQS(3) ->state=0x1 ->cpu=5
 
 Starving the grace-period kthreads of CPU time can of course result
 in RCU CPU stall warnings even when all CPUs and tasks have passed
-through the required quiescent states.  The "g" and "c" numbers flag the
-number of the last grace period started and completed, respectively,
-the "f" precedes the ->gp_flags command to the grace-period kthread,
-the "RCU_GP_WAIT_FQS" indicates that the kthread is waiting for a short
-timeout, and the "state" precedes value of the task_struct ->state field.
+through the required quiescent states.  The "g" number shows the current
+grace-period sequence number, the "f" precedes the ->gp_flags command
+to the grace-period kthread, the "RCU_GP_WAIT_FQS" indicates that the
+kthread is waiting for a short timeout, the "state" precedes value of the
+task_struct ->state field, and the "cpu" indicates that the grace-period
+kthread last ran on CPU 5.
 
 
 Multiple Warnings From One Stall
index 65eb856526b7c308b08088a31957d70baa21b5a7..c2a7facf7ff937a2265ad3103bd267d7aabf0d4b 100644 (file)
@@ -588,6 +588,7 @@ It is extremely simple:
        void synchronize_rcu(void)
        {
                write_lock(&rcu_gp_mutex);
+               smp_mb__after_spinlock();
                write_unlock(&rcu_gp_mutex);
        }
 
@@ -609,12 +610,15 @@ don't forget about them when submitting patches making use of RCU!]
 
 The rcu_read_lock() and rcu_read_unlock() primitive read-acquire
 and release a global reader-writer lock.  The synchronize_rcu()
-primitive write-acquires this same lock, then immediately releases
-it.  This means that once synchronize_rcu() exits, all RCU read-side
-critical sections that were in progress before synchronize_rcu() was
-called are guaranteed to have completed -- there is no way that
-synchronize_rcu() would have been able to write-acquire the lock
-otherwise.
+primitive write-acquires this same lock, then releases it.  This means
+that once synchronize_rcu() exits, all RCU read-side critical sections
+that were in progress before synchronize_rcu() was called are guaranteed
+to have completed -- there is no way that synchronize_rcu() would have
+been able to write-acquire the lock otherwise.  The smp_mb__after_spinlock()
+promotes synchronize_rcu() to a full memory barrier in compliance with
+the "Memory-Barrier Guarantees" listed in:
+
+       Documentation/RCU/Design/Requirements/Requirements.html.
 
 It is possible to nest rcu_read_lock(), since reader-writer locks may
 be recursively acquired.  Note also that rcu_read_lock() is immune
@@ -816,11 +820,13 @@ RCU list traversal:
        list_next_rcu
        list_for_each_entry_rcu
        list_for_each_entry_continue_rcu
+       list_for_each_entry_from_rcu
        hlist_first_rcu
        hlist_next_rcu
        hlist_pprev_rcu
        hlist_for_each_entry_rcu
        hlist_for_each_entry_rcu_bh
+       hlist_for_each_entry_from_rcu
        hlist_for_each_entry_continue_rcu
        hlist_for_each_entry_continue_rcu_bh
        hlist_nulls_first_rcu
index 533ff5c68970aef7b71e976941e8b305f250f2d5..c370f5f0eb38b0cb8b785a361d2b45977ef1a311 100644 (file)
                        Set time (s) after boot for CPU-hotplug testing.
 
        rcutorture.onoff_interval= [KNL]
-                       Set time (s) between CPU-hotplug operations, or
-                       zero to disable CPU-hotplug testing.
+                       Set time (jiffies) between CPU-hotplug operations,
+                       or zero to disable CPU-hotplug testing.
 
        rcutorture.shuffle_interval= [KNL]
                        Set task-shuffle interval (s).  Shuffling tasks
index 5f89fb635a1b3dd409390931571336c9ca9d4137..f97fd8ab5e4594fde9014e3f4614f41069a01f07 100644 (file)
@@ -4,6 +4,7 @@ Required properties:
 
 - compatible : should be "ingenic,<socname>-intc". Valid strings are:
     ingenic,jz4740-intc
+    ingenic,jz4725b-intc
     ingenic,jz4770-intc
     ingenic,jz4775-intc
     ingenic,jz4780-intc
index 20f121daa9106f177a9224ecd12140e6d5f30781..697ca2f26d1b5d8b0d649709234958a2d0f3ada4 100644 (file)
@@ -7,6 +7,7 @@ Required properties:
     - "renesas,irqc-r8a73a4" (R-Mobile APE6)
     - "renesas,irqc-r8a7743" (RZ/G1M)
     - "renesas,irqc-r8a7745" (RZ/G1E)
+    - "renesas,irqc-r8a77470" (RZ/G1C)
     - "renesas,irqc-r8a7790" (R-Car H2)
     - "renesas,irqc-r8a7791" (R-Car M2-W)
     - "renesas,irqc-r8a7792" (R-Car V2H)
@@ -16,6 +17,7 @@ Required properties:
     - "renesas,intc-ex-r8a7796" (R-Car M3-W)
     - "renesas,intc-ex-r8a77965" (R-Car M3-N)
     - "renesas,intc-ex-r8a77970" (R-Car V3M)
+    - "renesas,intc-ex-r8a77980" (R-Car V3H)
     - "renesas,intc-ex-r8a77995" (R-Car D3)
 - #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
   interrupts.txt in this directory
index 7cebd5bba8a8eb15305966228440f77174744a97..d0d729016d65bf0bbaf90127867d73be2563091c 100644 (file)
@@ -5930,7 +5930,7 @@ F:        Documentation/dev-tools/gcov.rst
 
 GDB KERNEL DEBUGGING HELPER SCRIPTS
 M:     Jan Kiszka <jan.kiszka@siemens.com>
-M:     Kieran Bingham <kieran@bingham.xyz>
+M:     Kieran Bingham <kbingham@kernel.org>
 S:     Supported
 F:     scripts/gdb/
 
@@ -12039,9 +12039,9 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 F:     Documentation/RCU/
 X:     Documentation/RCU/torture.txt
 F:     include/linux/rcu*
-X:     include/linux/srcu.h
+X:     include/linux/srcu*.h
 F:     kernel/rcu/
-X:     kernel/torture.c
+X:     kernel/rcu/srcu*.c
 
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:     Alessandro Zummo <a.zummo@towertech.it>
@@ -13078,8 +13078,8 @@ L:      linux-kernel@vger.kernel.org
 W:     http://www.rdrop.com/users/paulmck/RCU/
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
-F:     include/linux/srcu.h
-F:     kernel/rcu/srcu.c
+F:     include/linux/srcu*.h
+F:     kernel/rcu/srcu*.c
 
 SERIAL LOW-POWER INTER-CHIP MEDIA BUS (SLIMbus)
 M:     Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
@@ -14438,6 +14438,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 F:     Documentation/RCU/torture.txt
 F:     kernel/torture.c
 F:     kernel/rcu/rcutorture.c
+F:     kernel/rcu/rcuperf.c
 F:     kernel/locking/locktorture.c
 
 TOSHIBA ACPI EXTRAS DRIVER
index 7a3c4548162b7f818943107d84ce81348c49b322..863f58503beed45ca5ec40606a6b313fe4ae6b14 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION = -rc8
+EXTRAVERSION =
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
index 843edfd000be7210ebef62e529ba074df5ee242f..d7a81284c272d0e405bf4b1e200623e314585283 100644 (file)
@@ -337,8 +337,8 @@ config ARCH_MULTIPLATFORM
        select TIMER_OF
        select COMMON_CLK
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select MIGHT_HAVE_PCI
-       select MULTI_IRQ_HANDLER
        select PCI_DOMAINS if PCI
        select SPARSE_IRQ
        select USE_OF
@@ -465,9 +465,9 @@ config ARCH_DOVE
        bool "Marvell Dove"
        select CPU_PJ4
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
        select MIGHT_HAVE_PCI
-       select MULTI_IRQ_HANDLER
        select MVEBU_MBUS
        select PINCTRL
        select PINCTRL_DOVE
@@ -512,8 +512,8 @@ config ARCH_LPC32XX
        select COMMON_CLK
        select CPU_ARM926T
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
-       select MULTI_IRQ_HANDLER
        select SPARSE_IRQ
        select USE_OF
        help
@@ -532,11 +532,11 @@ config ARCH_PXA
        select TIMER_OF
        select CPU_XSCALE if !CPU_XSC3
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIO_PXA
        select GPIOLIB
        select HAVE_IDE
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
        select PLAT_PXA
        select SPARSE_IRQ
        help
@@ -572,11 +572,11 @@ config ARCH_SA1100
        select CPU_FREQ
        select CPU_SA1100
        select GENERIC_CLOCKEVENTS
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
        select HAVE_IDE
        select IRQ_DOMAIN
        select ISA
-       select MULTI_IRQ_HANDLER
        select NEED_MACH_MEMORY_H
        select SPARSE_IRQ
        help
@@ -590,10 +590,10 @@ config ARCH_S3C24XX
        select GENERIC_CLOCKEVENTS
        select GPIO_SAMSUNG
        select GPIOLIB
+       select GENERIC_IRQ_MULTI_HANDLER
        select HAVE_S3C2410_I2C if I2C
        select HAVE_S3C2410_WATCHDOG if WATCHDOG
        select HAVE_S3C_RTC if RTC_CLASS
-       select MULTI_IRQ_HANDLER
        select NEED_MACH_IO_H
        select SAMSUNG_ATAGS
        select USE_OF
@@ -627,10 +627,10 @@ config ARCH_OMAP1
        select CLKSRC_MMIO
        select GENERIC_CLOCKEVENTS
        select GENERIC_IRQ_CHIP
+       select GENERIC_IRQ_MULTI_HANDLER
        select GPIOLIB
        select HAVE_IDE
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
        select NEED_MACH_IO_H if PCCARD
        select NEED_MACH_MEMORY_H
        select SPARSE_IRQ
@@ -921,11 +921,6 @@ config IWMMXT
          Enable support for iWMMXt context switching at run time if
          running on a CPU that supports it.
 
-config MULTI_IRQ_HANDLER
-       bool
-       help
-         Allow each machine to specify it's own IRQ handler at run time.
-
 if !MMU
 source "arch/arm/Kconfig-nommu"
 endif
index 17f1f1a814ff60968fdff9a18005c4448bc66690..38badaae8d9d712fe97ca5ac2f6b6de7198a44ec 100644 (file)
@@ -58,6 +58,9 @@ void efi_virtmap_unload(void);
 #define efi_call_runtime(f, ...)       sys_table_arg->runtime->f(__VA_ARGS__)
 #define efi_is_64bit()                 (false)
 
+#define efi_table_attr(table, attr, instance)                          \
+       ((table##_t *)instance)->attr
+
 #define efi_call_proto(protocol, f, instance, ...)                     \
        ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
 
index b6f319606e306ad00864e81e3ead98a96cdf76ef..c883fcbe93b67ef68bfc18a6e48d4ec53c37cdd0 100644 (file)
@@ -31,11 +31,6 @@ extern void asm_do_IRQ(unsigned int, struct pt_regs *);
 void handle_IRQ(unsigned int, struct pt_regs *);
 void init_IRQ(void);
 
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-extern void (*handle_arch_irq)(struct pt_regs *);
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-#endif
-
 #ifdef CONFIG_SMP
 extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
                                           bool exclude_self);
index 5c1ad11aa39264aee7e9210cbf6747adab443930..bb8851208e1755b2c8eceff2054140f1a9933d01 100644 (file)
@@ -59,7 +59,7 @@ struct machine_desc {
        void                    (*init_time)(void);
        void                    (*init_machine)(void);
        void                    (*init_late)(void);
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
        void                    (*handle_irq)(struct pt_regs *);
 #endif
        void                    (*restart)(enum reboot_mode, const char *);
index 179a9f6bd1e31c63564fd3e67444d41916939617..e85a3af9ddeb5694b793363f8245ba1ad5f99899 100644 (file)
@@ -22,7 +22,7 @@
 #include <asm/glue-df.h>
 #include <asm/glue-pf.h>
 #include <asm/vfpmacros.h>
-#ifndef CONFIG_MULTI_IRQ_HANDLER
+#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
 #include <mach/entry-macro.S>
 #endif
 #include <asm/thread_notify.h>
@@ -39,7 +39,7 @@
  * Interrupt handling.
  */
        .macro  irq_handler
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
        ldr     r1, =handle_arch_irq
        mov     r0, sp
        badr    lr, 9997f
@@ -1226,9 +1226,3 @@ vector_addrexcptn:
        .globl  cr_alignment
 cr_alignment:
        .space  4
-
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-       .globl  handle_arch_irq
-handle_arch_irq:
-       .space  4
-#endif
index ece04a457486c5998d312bce4f3c69b97e0e7b64..9908dacf9229fbfa694ceebdfb2ed1b534c3f522 100644 (file)
@@ -102,16 +102,6 @@ void __init init_IRQ(void)
        uniphier_cache_init();
 }
 
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
-       if (handle_arch_irq)
-               return;
-
-       handle_arch_irq = handle_irq;
-}
-#endif
-
 #ifdef CONFIG_SPARSE_IRQ
 int __init arch_probe_nr_irqs(void)
 {
index 35ca494c028cc841e1077bf59428b11d9d817fbb..4c249cb261f3913112792cd6cad0a7e2df17ff4f 100644 (file)
@@ -1145,7 +1145,7 @@ void __init setup_arch(char **cmdline_p)
 
        reserve_crashkernel();
 
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
        handle_arch_irq = mdesc->handle_irq;
 #endif
 
index 42c090cf02927283ccb2dc597e30205a11050ecb..3d1011957823108d5b988b983fe8225423cc7060 100644 (file)
@@ -74,6 +74,7 @@ config ARM64
        select GENERIC_CPU_AUTOPROBE
        select GENERIC_EARLY_IOREMAP
        select GENERIC_IDLE_POLL_SETUP
+       select GENERIC_IRQ_MULTI_HANDLER
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
        select GENERIC_IRQ_SHOW_LEVEL
@@ -264,9 +265,6 @@ config ARCH_SUPPORTS_UPROBES
 config ARCH_PROC_KCORE_TEXT
        def_bool y
 
-config MULTI_IRQ_HANDLER
-       def_bool y
-
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
index 88f5aef7934c77a5213fdc85a7f8fe435fc76e70..e3a375c4cb83c383242ac6b9cc8b3247939e0947 100644 (file)
         *                           u32 *macp, u8 const rk[], u32 rounds);
         */
 ENTRY(ce_aes_ccm_auth_data)
-       frame_push      7
-
-       mov     x19, x0
-       mov     x20, x1
-       mov     x21, x2
-       mov     x22, x3
-       mov     x23, x4
-       mov     x24, x5
-
-       ldr     w25, [x22]                      /* leftover from prev round? */
+       ldr     w8, [x3]                        /* leftover from prev round? */
        ld1     {v0.16b}, [x0]                  /* load mac */
-       cbz     w25, 1f
-       sub     w25, w25, #16
+       cbz     w8, 1f
+       sub     w8, w8, #16
        eor     v1.16b, v1.16b, v1.16b
-0:     ldrb    w7, [x20], #1                   /* get 1 byte of input */
-       subs    w21, w21, #1
-       add     w25, w25, #1
+0:     ldrb    w7, [x1], #1                    /* get 1 byte of input */
+       subs    w2, w2, #1
+       add     w8, w8, #1
        ins     v1.b[0], w7
        ext     v1.16b, v1.16b, v1.16b, #1      /* rotate in the input bytes */
        beq     8f                              /* out of input? */
-       cbnz    w25, 0b
+       cbnz    w8, 0b
        eor     v0.16b, v0.16b, v1.16b
-1:     ld1     {v3.4s}, [x23]                  /* load first round key */
-       prfm    pldl1strm, [x20]
-       cmp     w24, #12                        /* which key size? */
-       add     x6, x23, #16
-       sub     w7, w24, #2                     /* modified # of rounds */
+1:     ld1     {v3.4s}, [x4]                   /* load first round key */
+       prfm    pldl1strm, [x1]
+       cmp     w5, #12                         /* which key size? */
+       add     x6, x4, #16
+       sub     w7, w5, #2                      /* modified # of rounds */
        bmi     2f
        bne     5f
        mov     v5.16b, v3.16b
@@ -64,43 +55,33 @@ ENTRY(ce_aes_ccm_auth_data)
        ld1     {v5.4s}, [x6], #16              /* load next round key */
        bpl     3b
        aese    v0.16b, v4.16b
-       subs    w21, w21, #16                   /* last data? */
+       subs    w2, w2, #16                     /* last data? */
        eor     v0.16b, v0.16b, v5.16b          /* final round */
        bmi     6f
-       ld1     {v1.16b}, [x20], #16            /* load next input block */
+       ld1     {v1.16b}, [x1], #16             /* load next input block */
        eor     v0.16b, v0.16b, v1.16b          /* xor with mac */
-       beq     6f
-
-       if_will_cond_yield_neon
-       st1     {v0.16b}, [x19]                 /* store mac */
-       do_cond_yield_neon
-       ld1     {v0.16b}, [x19]                 /* reload mac */
-       endif_yield_neon
-
-       b       1b
-6:     st1     {v0.16b}, [x19]                 /* store mac */
+       bne     1b
+6:     st1     {v0.16b}, [x0]                  /* store mac */
        beq     10f
-       adds    w21, w21, #16
+       adds    w2, w2, #16
        beq     10f
-       mov     w25, w21
-7:     ldrb    w7, [x20], #1
+       mov     w8, w2
+7:     ldrb    w7, [x1], #1
        umov    w6, v0.b[0]
        eor     w6, w6, w7
-       strb    w6, [x19], #1
-       subs    w21, w21, #1
+       strb    w6, [x0], #1
+       subs    w2, w2, #1
        beq     10f
        ext     v0.16b, v0.16b, v0.16b, #1      /* rotate out the mac bytes */
        b       7b
-8:     mov     w7, w25
-       add     w25, w25, #16
+8:     mov     w7, w8
+       add     w8, w8, #16
 9:     ext     v1.16b, v1.16b, v1.16b, #1
        adds    w7, w7, #1
        bne     9b
        eor     v0.16b, v0.16b, v1.16b
-       st1     {v0.16b}, [x19]
-10:    str     w25, [x22]
-
-       frame_pop
+       st1     {v0.16b}, [x0]
+10:    str     w8, [x3]
        ret
 ENDPROC(ce_aes_ccm_auth_data)
 
@@ -145,29 +126,19 @@ ENTRY(ce_aes_ccm_final)
 ENDPROC(ce_aes_ccm_final)
 
        .macro  aes_ccm_do_crypt,enc
-       frame_push      8
-
-       mov     x19, x0
-       mov     x20, x1
-       mov     x21, x2
-       mov     x22, x3
-       mov     x23, x4
-       mov     x24, x5
-       mov     x25, x6
-
-       ldr     x26, [x25, #8]                  /* load lower ctr */
-       ld1     {v0.16b}, [x24]                 /* load mac */
-CPU_LE(        rev     x26, x26                )       /* keep swabbed ctr in reg */
+       ldr     x8, [x6, #8]                    /* load lower ctr */
+       ld1     {v0.16b}, [x5]                  /* load mac */
+CPU_LE(        rev     x8, x8                  )       /* keep swabbed ctr in reg */
 0:     /* outer loop */
-       ld1     {v1.8b}, [x25]                  /* load upper ctr */
-       prfm    pldl1strm, [x20]
-       add     x26, x26, #1
-       rev     x9, x26
-       cmp     w23, #12                        /* which key size? */
-       sub     w7, w23, #2                     /* get modified # of rounds */
+       ld1     {v1.8b}, [x6]                   /* load upper ctr */
+       prfm    pldl1strm, [x1]
+       add     x8, x8, #1
+       rev     x9, x8
+       cmp     w4, #12                         /* which key size? */
+       sub     w7, w4, #2                      /* get modified # of rounds */
        ins     v1.d[1], x9                     /* no carry in lower ctr */
-       ld1     {v3.4s}, [x22]                  /* load first round key */
-       add     x10, x22, #16
+       ld1     {v3.4s}, [x3]                   /* load first round key */
+       add     x10, x3, #16
        bmi     1f
        bne     4f
        mov     v5.16b, v3.16b
@@ -194,9 +165,9 @@ CPU_LE(     rev     x26, x26                )       /* keep swabbed ctr in reg */
        bpl     2b
        aese    v0.16b, v4.16b
        aese    v1.16b, v4.16b
-       subs    w21, w21, #16
-       bmi     7f                              /* partial block? */
-       ld1     {v2.16b}, [x20], #16            /* load next input block */
+       subs    w2, w2, #16
+       bmi     6f                              /* partial block? */
+       ld1     {v2.16b}, [x1], #16             /* load next input block */
        .if     \enc == 1
        eor     v2.16b, v2.16b, v5.16b          /* final round enc+mac */
        eor     v1.16b, v1.16b, v2.16b          /* xor with crypted ctr */
@@ -205,29 +176,18 @@ CPU_LE(   rev     x26, x26                )       /* keep swabbed ctr in reg */
        eor     v1.16b, v2.16b, v5.16b          /* final round enc */
        .endif
        eor     v0.16b, v0.16b, v2.16b          /* xor mac with pt ^ rk[last] */
-       st1     {v1.16b}, [x19], #16            /* write output block */
-       beq     5f
-
-       if_will_cond_yield_neon
-       st1     {v0.16b}, [x24]                 /* store mac */
-       do_cond_yield_neon
-       ld1     {v0.16b}, [x24]                 /* reload mac */
-       endif_yield_neon
-
-       b       0b
-5:
-CPU_LE(        rev     x26, x26                        )
-       st1     {v0.16b}, [x24]                 /* store mac */
-       str     x26, [x25, #8]                  /* store lsb end of ctr (BE) */
-
-6:     frame_pop
-       ret
-
-7:     eor     v0.16b, v0.16b, v5.16b          /* final round mac */
+       st1     {v1.16b}, [x0], #16             /* write output block */
+       bne     0b
+CPU_LE(        rev     x8, x8                  )
+       st1     {v0.16b}, [x5]                  /* store mac */
+       str     x8, [x6, #8]                    /* store lsb end of ctr (BE) */
+5:     ret
+
+6:     eor     v0.16b, v0.16b, v5.16b          /* final round mac */
        eor     v1.16b, v1.16b, v5.16b          /* final round enc */
-       st1     {v0.16b}, [x24]                 /* store mac */
-       add     w21, w21, #16                   /* process partial tail block */
-8:     ldrb    w9, [x20], #1                   /* get 1 byte of input */
+       st1     {v0.16b}, [x5]                  /* store mac */
+       add     w2, w2, #16                     /* process partial tail block */
+7:     ldrb    w9, [x1], #1                    /* get 1 byte of input */
        umov    w6, v1.b[0]                     /* get top crypted ctr byte */
        umov    w7, v0.b[0]                     /* get top mac byte */
        .if     \enc == 1
@@ -237,13 +197,13 @@ CPU_LE(   rev     x26, x26                        )
        eor     w9, w9, w6
        eor     w7, w7, w9
        .endif
-       strb    w9, [x19], #1                   /* store out byte */
-       strb    w7, [x24], #1                   /* store mac byte */
-       subs    w21, w21, #1
-       beq     6b
+       strb    w9, [x0], #1                    /* store out byte */
+       strb    w7, [x5], #1                    /* store mac byte */
+       subs    w2, w2, #1
+       beq     5b
        ext     v0.16b, v0.16b, v0.16b, #1      /* shift out mac byte */
        ext     v1.16b, v1.16b, v1.16b, #1      /* shift out ctr byte */
-       b       8b
+       b       7b
        .endm
 
        /*
index dcffb9e77589cd843bb04691a46d117013f12ba1..c723647b37db0387f58d3ea88f899147fdbc2727 100644 (file)
@@ -322,55 +322,41 @@ ENDPROC(pmull_ghash_update_p8)
        .endm
 
        .macro          pmull_gcm_do_crypt, enc
-       frame_push      10
+       ld1             {SHASH.2d}, [x4]
+       ld1             {XL.2d}, [x1]
+       ldr             x8, [x5, #8]                    // load lower counter
 
-       mov             x19, x0
-       mov             x20, x1
-       mov             x21, x2
-       mov             x22, x3
-       mov             x23, x4
-       mov             x24, x5
-       mov             x25, x6
-       mov             x26, x7
-       .if             \enc == 1
-       ldr             x27, [sp, #96]                  // first stacked arg
-       .endif
-
-       ldr             x28, [x24, #8]                  // load lower counter
-CPU_LE(        rev             x28, x28        )
-
-0:     mov             x0, x25
-       load_round_keys w26, x0
-       ld1             {SHASH.2d}, [x23]
-       ld1             {XL.2d}, [x20]
+       load_round_keys w7, x6
 
        movi            MASK.16b, #0xe1
        ext             SHASH2.16b, SHASH.16b, SHASH.16b, #8
+CPU_LE(        rev             x8, x8          )
        shl             MASK.2d, MASK.2d, #57
        eor             SHASH2.16b, SHASH2.16b, SHASH.16b
 
        .if             \enc == 1
-       ld1             {KS.16b}, [x27]
+       ldr             x10, [sp]
+       ld1             {KS.16b}, [x10]
        .endif
 
-1:     ld1             {CTR.8b}, [x24]                 // load upper counter
-       ld1             {INP.16b}, [x22], #16
-       rev             x9, x28
-       add             x28, x28, #1
-       sub             w19, w19, #1
+0:     ld1             {CTR.8b}, [x5]                  // load upper counter
+       ld1             {INP.16b}, [x3], #16
+       rev             x9, x8
+       add             x8, x8, #1
+       sub             w0, w0, #1
        ins             CTR.d[1], x9                    // set lower counter
 
        .if             \enc == 1
        eor             INP.16b, INP.16b, KS.16b        // encrypt input
-       st1             {INP.16b}, [x21], #16
+       st1             {INP.16b}, [x2], #16
        .endif
 
        rev64           T1.16b, INP.16b
 
-       cmp             w26, #12
-       b.ge            4f                              // AES-192/256?
+       cmp             w7, #12
+       b.ge            2f                              // AES-192/256?
 
-2:     enc_round       CTR, v21
+1:     enc_round       CTR, v21
 
        ext             T2.16b, XL.16b, XL.16b, #8
        ext             IN1.16b, T1.16b, T1.16b, #8
@@ -425,39 +411,27 @@ CPU_LE(   rev             x28, x28        )
 
        .if             \enc == 0
        eor             INP.16b, INP.16b, KS.16b
-       st1             {INP.16b}, [x21], #16
+       st1             {INP.16b}, [x2], #16
        .endif
 
-       cbz             w19, 3f
+       cbnz            w0, 0b
 
-       if_will_cond_yield_neon
-       st1             {XL.2d}, [x20]
-       .if             \enc == 1
-       st1             {KS.16b}, [x27]
-       .endif
-       do_cond_yield_neon
-       b               0b
-       endif_yield_neon
+CPU_LE(        rev             x8, x8          )
+       st1             {XL.2d}, [x1]
+       str             x8, [x5, #8]                    // store lower counter
 
-       b               1b
-
-3:     st1             {XL.2d}, [x20]
        .if             \enc == 1
-       st1             {KS.16b}, [x27]
+       st1             {KS.16b}, [x10]
        .endif
 
-CPU_LE(        rev             x28, x28        )
-       str             x28, [x24, #8]                  // store lower counter
-
-       frame_pop
        ret
 
-4:     b.eq            5f                              // AES-192?
+2:     b.eq            3f                              // AES-192?
        enc_round       CTR, v17
        enc_round       CTR, v18
-5:     enc_round       CTR, v19
+3:     enc_round       CTR, v19
        enc_round       CTR, v20
-       b               2b
+       b               1b
        .endm
 
        /*
index 192d791f11036747704fedbc0839b37a4c1e49fc..7ed320895d1f463d1e95cd9ec6328a49eed765ae 100644 (file)
@@ -87,6 +87,9 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
 #define efi_call_runtime(f, ...)       sys_table_arg->runtime->f(__VA_ARGS__)
 #define efi_is_64bit()                 (true)
 
+#define efi_table_attr(table, attr, instance)                          \
+       ((table##_t *)instance)->attr
+
 #define efi_call_proto(protocol, f, instance, ...)                     \
        ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
 
index a0fee6985e6a75b621644ea4e7d19f8b4f220643..b2b0c6405eb082fea7c99e607e7f2c9d9cca4ee7 100644 (file)
@@ -8,8 +8,6 @@
 
 struct pt_regs;
 
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-
 static inline int nr_legacy_irqs(void)
 {
        return 0;
index 60e5fc661f745b899ff6137b4c3f002de867a390..780a12f59a8f8c3426c3a4274e32ae9c3d829ab0 100644 (file)
@@ -42,16 +42,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        return 0;
 }
 
-void (*handle_arch_irq)(struct pt_regs *) = NULL;
-
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
-       if (handle_arch_irq)
-               return;
-
-       handle_arch_irq = handle_irq;
-}
-
 #ifdef CONFIG_VMAP_STACK
 static void init_irq_stacks(void)
 {
index 785612b576f7df4de1f6287426ce79a49aed8255..b29f93774d95590b1008815fbfc129afa826d4a7 100644 (file)
@@ -2,6 +2,7 @@
 config M68K
        bool
        default y
+       select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
        select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
        select ARCH_NO_COHERENT_DMA_MMAP if !MMU
        select HAVE_IDE
@@ -24,6 +25,10 @@ config M68K
        select MODULES_USE_ELF_RELA
        select OLD_SIGSUSPEND3
        select OLD_SIGACTION
+       select DMA_NONCOHERENT_OPS if HAS_DMA
+       select HAVE_MEMBLOCK
+       select ARCH_DISCARD_MEMBLOCK
+       select NO_BOOTMEM
 
 config CPU_BIG_ENDIAN
        def_bool y
index b2a6bc63f8cd1aab4806113812bf4c3987bbd8f8..aef8d42e078ddffe50f9557702bd493810a0ef13 100644 (file)
@@ -31,7 +31,6 @@ extern void dn_sched_init(irq_handler_t handler);
 extern void dn_init_IRQ(void);
 extern u32 dn_gettimeoffset(void);
 extern int dn_dummy_hwclk(int, struct rtc_time *);
-extern int dn_dummy_set_clock_mmss(unsigned long);
 extern void dn_dummy_reset(void);
 #ifdef CONFIG_HEARTBEAT
 static void dn_heartbeat(int on);
@@ -156,7 +155,6 @@ void __init config_apollo(void)
        arch_gettimeoffset   = dn_gettimeoffset;
        mach_max_dma_address = 0xffffffff;
        mach_hwclk           = dn_dummy_hwclk; /* */
-       mach_set_clock_mmss  = dn_dummy_set_clock_mmss; /* */
        mach_reset           = dn_dummy_reset;  /* */
 #ifdef CONFIG_HEARTBEAT
        mach_heartbeat = dn_heartbeat;
@@ -240,12 +238,6 @@ int dn_dummy_hwclk(int op, struct rtc_time *t) {
 
 }
 
-int dn_dummy_set_clock_mmss(unsigned long nowtime)
-{
-       pr_info("set_clock_mmss\n");
-       return 0;
-}
-
 void dn_dummy_reset(void) {
 
   dn_serial_print("The end !\n");
index 565c6f06ab0b8193ef02f8028d6a8baf8ff00b06..bd96702a1ad0977334a31ea21d8c782c7ddf1f00 100644 (file)
@@ -81,9 +81,6 @@ extern void atari_sched_init(irq_handler_t);
 extern u32 atari_gettimeoffset(void);
 extern int atari_mste_hwclk (int, struct rtc_time *);
 extern int atari_tt_hwclk (int, struct rtc_time *);
-extern int atari_mste_set_clock_mmss (unsigned long);
-extern int atari_tt_set_clock_mmss (unsigned long);
-
 
 /* ++roman: This is a more elaborate test for an SCC chip, since the plain
  * Medusa board generates DTACK at the SCC's standard addresses, but a SCC
@@ -362,13 +359,11 @@ void __init config_atari(void)
                ATARIHW_SET(TT_CLK);
                pr_cont(" TT_CLK");
                mach_hwclk = atari_tt_hwclk;
-               mach_set_clock_mmss = atari_tt_set_clock_mmss;
        }
        if (hwreg_present(&mste_rtc.sec_ones)) {
                ATARIHW_SET(MSTE_CLK);
                pr_cont(" MSTE_CLK");
                mach_hwclk = atari_mste_hwclk;
-               mach_set_clock_mmss = atari_mste_set_clock_mmss;
        }
        if (!MACH_IS_MEDUSA && hwreg_present(&dma_wd.fdc_speed) &&
            hwreg_write(&dma_wd.fdc_speed, 0)) {
index c549b48174ec8342620816d846ea7d492f36d0b1..9cca64286464c31ae9b961609f67c3afbbb25d73 100644 (file)
@@ -285,69 +285,6 @@ int atari_tt_hwclk( int op, struct rtc_time *t )
     return( 0 );
 }
 
-
-int atari_mste_set_clock_mmss (unsigned long nowtime)
-{
-    short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-    struct MSTE_RTC val;
-    unsigned char rtc_minutes;
-
-    mste_read(&val);
-    rtc_minutes= val.min_ones + val.min_tens * 10;
-    if ((rtc_minutes < real_minutes
-         ? real_minutes - rtc_minutes
-         : rtc_minutes - real_minutes) < 30)
-    {
-        val.sec_ones = real_seconds % 10;
-        val.sec_tens = real_seconds / 10;
-        val.min_ones = real_minutes % 10;
-        val.min_tens = real_minutes / 10;
-        mste_write(&val);
-    }
-    else
-        return -1;
-    return 0;
-}
-
-int atari_tt_set_clock_mmss (unsigned long nowtime)
-{
-    int retval = 0;
-    short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-    unsigned char save_control, save_freq_select, rtc_minutes;
-
-    save_control = RTC_READ (RTC_CONTROL); /* tell the clock it's being set */
-    RTC_WRITE (RTC_CONTROL, save_control | RTC_SET);
-
-    save_freq_select = RTC_READ (RTC_FREQ_SELECT); /* stop and reset prescaler */
-    RTC_WRITE (RTC_FREQ_SELECT, save_freq_select | RTC_DIV_RESET2);
-
-    rtc_minutes = RTC_READ (RTC_MINUTES);
-    if (!(save_control & RTC_DM_BINARY))
-       rtc_minutes = bcd2bin(rtc_minutes);
-
-    /* Since we're only adjusting minutes and seconds, don't interfere
-       with hour overflow.  This avoids messing with unknown time zones
-       but requires your RTC not to be off by more than 30 minutes.  */
-    if ((rtc_minutes < real_minutes
-         ? real_minutes - rtc_minutes
-         : rtc_minutes - real_minutes) < 30)
-        {
-            if (!(save_control & RTC_DM_BINARY))
-                {
-                   real_seconds = bin2bcd(real_seconds);
-                   real_minutes = bin2bcd(real_minutes);
-                }
-            RTC_WRITE (RTC_SECONDS, real_seconds);
-            RTC_WRITE (RTC_MINUTES, real_minutes);
-        }
-    else
-        retval = -1;
-
-    RTC_WRITE (RTC_FREQ_SELECT, save_freq_select);
-    RTC_WRITE (RTC_CONTROL, save_control);
-    return retval;
-}
-
 /*
  * Local variables:
  *  c-indent-level: 4
index 2cfff47650407479cae907205df89ea039934a37..143ee9fa3893ecce16e13c87309354a6d5c6211e 100644 (file)
@@ -41,7 +41,6 @@ static void bvme6000_get_model(char *model);
 extern void bvme6000_sched_init(irq_handler_t handler);
 extern u32 bvme6000_gettimeoffset(void);
 extern int bvme6000_hwclk (int, struct rtc_time *);
-extern int bvme6000_set_clock_mmss (unsigned long);
 extern void bvme6000_reset (void);
 void bvme6000_set_vectors (void);
 
@@ -113,7 +112,6 @@ void __init config_bvme6000(void)
     mach_init_IRQ        = bvme6000_init_IRQ;
     arch_gettimeoffset   = bvme6000_gettimeoffset;
     mach_hwclk           = bvme6000_hwclk;
-    mach_set_clock_mmss         = bvme6000_set_clock_mmss;
     mach_reset          = bvme6000_reset;
     mach_get_model       = bvme6000_get_model;
 
@@ -305,46 +303,3 @@ int bvme6000_hwclk(int op, struct rtc_time *t)
 
        return 0;
 }
-
-/*
- * Set the minutes and seconds from seconds value 'nowtime'.  Fail if
- * clock is out by > 30 minutes.  Logic lifted from atari code.
- * Algorithm is to wait for the 10ms register to change, and then to
- * wait a short while, and then set it.
- */
-
-int bvme6000_set_clock_mmss (unsigned long nowtime)
-{
-       int retval = 0;
-       short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-       unsigned char rtc_minutes, rtc_tenms;
-       volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
-       unsigned char msr = rtc->msr & 0xc0;
-       unsigned long flags;
-       volatile int i;
-
-       rtc->msr = 0;           /* Ensure clock accessible */
-       rtc_minutes = bcd2bin (rtc->bcd_min);
-
-       if ((rtc_minutes < real_minutes
-               ? real_minutes - rtc_minutes
-                       : rtc_minutes - real_minutes) < 30)
-       {
-               local_irq_save(flags);
-               rtc_tenms = rtc->bcd_tenms;
-               while (rtc_tenms == rtc->bcd_tenms)
-                       ;
-               for (i = 0; i < 1000; i++)
-                       ;
-               rtc->bcd_min = bin2bcd(real_minutes);
-               rtc->bcd_sec = bin2bcd(real_seconds);
-               local_irq_restore(flags);
-       }
-       else
-               retval = -1;
-
-       rtc->msr = msr;
-
-       return retval;
-}
-
index a874e54404d10c94243c8b47bdde41afd353cc4f..1d5483f6e457b0a0fe62d3722e1a0e3377063823 100644 (file)
@@ -52,6 +52,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -98,18 +99,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -122,6 +119,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -200,7 +198,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -231,7 +228,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -260,7 +256,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -356,6 +352,7 @@ CONFIG_A2091_SCSI=y
 CONFIG_GVP11_SCSI=y
 CONFIG_SCSI_A4000T=y
 CONFIG_SCSI_ZORRO7XX=y
+CONFIG_SCSI_ZORRO_ESP=y
 CONFIG_MD=y
 CONFIG_MD_LINEAR=m
 CONFIG_BLK_DEV_DM=m
@@ -363,6 +360,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -402,8 +400,8 @@ CONFIG_A2065=y
 CONFIG_ARIADNE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CIRRUS is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
@@ -412,8 +410,10 @@ CONFIG_ARIADNE=y
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
+CONFIG_XSURF100=y
 CONFIG_HYDRA=y
 CONFIG_APNE=y
 CONFIG_ZORRO8390=y
@@ -426,9 +426,9 @@ CONFIG_ZORRO8390=y
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -478,6 +478,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -499,7 +500,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -600,6 +601,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -622,6 +624,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -657,6 +664,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 8ce39e23aa4272440426a263c97833814d0a0122..52a0af127951f5f0ed11e9022822fe6992d1140c 100644 (file)
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -381,14 +378,15 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -400,9 +398,9 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -440,6 +438,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -458,7 +457,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -559,6 +558,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -581,6 +581,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -616,6 +621,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 346c4e75edf869b25d6910ee2ffe01b3ef334a60..b3103e51268a31759ae841a28559720a6f1aadac 100644 (file)
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -391,14 +388,15 @@ CONFIG_VETH=m
 CONFIG_ATARILANCE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
 CONFIG_NE2000=y
@@ -411,9 +409,9 @@ CONFIG_NE2000=y
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -480,7 +478,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -581,6 +579,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -603,6 +602,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -638,6 +642,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index fca9c7aa71a34e072420ca0482d34a4a7a850246..fb7d651a4cabe203b02e455f55d0dff0e19b1e55 100644 (file)
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index f9eab174915c59bb5ab8da593b04574cbc765b9c..6b37f5537c3905c5e96b8d65a7653acc490abc65 100644 (file)
@@ -50,6 +50,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -96,18 +97,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -120,6 +117,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -198,7 +196,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -229,7 +226,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -258,7 +254,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -299,6 +294,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -345,6 +341,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -382,14 +379,15 @@ CONFIG_VETH=m
 CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -401,9 +399,9 @@ CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -443,6 +441,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -460,7 +459,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -561,6 +560,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -583,6 +583,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -618,6 +623,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index b52e597899eb33f42b911541a0969eb70a61c29c..930cc2965a113c9bdd8ef4de54fc88a1ced4e6b9 100644 (file)
@@ -49,6 +49,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -95,18 +96,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -119,6 +116,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -197,7 +195,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -228,7 +225,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -257,7 +253,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -301,6 +296,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -354,6 +350,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -398,8 +395,8 @@ CONFIG_VETH=m
 CONFIG_MACMACE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 CONFIG_MAC89x0=y
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
@@ -407,6 +404,7 @@ CONFIG_MAC89x0=y
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 CONFIG_MACSONIC=y
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -420,9 +418,9 @@ CONFIG_MAC8390=y
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -465,6 +463,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -482,7 +481,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -583,6 +582,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -605,6 +605,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -640,6 +645,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 2a84eeec5b02af12e72cb0155c7c530a146427ff..e7dd2530012759c3e8ef328044fab541d4a53741 100644 (file)
@@ -59,6 +59,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -105,18 +106,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -129,6 +126,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -207,7 +205,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -238,7 +235,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -267,7 +263,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -311,6 +306,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -373,6 +369,7 @@ CONFIG_A2091_SCSI=y
 CONFIG_GVP11_SCSI=y
 CONFIG_SCSI_A4000T=y
 CONFIG_SCSI_ZORRO7XX=y
+CONFIG_SCSI_ZORRO_ESP=y
 CONFIG_ATARI_SCSI=y
 CONFIG_MAC_SCSI=y
 CONFIG_SCSI_MAC_ESP=y
@@ -387,6 +384,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -438,8 +436,8 @@ CONFIG_SUN3LANCE=y
 CONFIG_MACMACE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 CONFIG_MAC89x0=y
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
@@ -449,9 +447,11 @@ CONFIG_BVME6000_NET=y
 CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 CONFIG_MACSONIC=y
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
+CONFIG_XSURF100=y
 CONFIG_HYDRA=y
 CONFIG_MAC8390=y
 CONFIG_NE2000=y
@@ -466,9 +466,9 @@ CONFIG_ZORRO8390=y
 CONFIG_SMC91X=y
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PLIP=m
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -533,6 +533,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -562,7 +563,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -663,6 +664,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -685,6 +687,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -720,6 +727,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 476e69994340ebdf90a0ae6cd78dce6bf8a6d210..b383327fd77a9411922848696e9f299a0ba8909d 100644 (file)
@@ -47,6 +47,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -93,18 +94,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -117,6 +114,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -195,7 +193,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -226,7 +223,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -255,7 +251,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -296,6 +291,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -343,6 +339,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
 CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 1477cda9146e76927c6e42e038aab6b9da57642e..9783d3deb9e9d8525ba864ca9eeeae02ca6a46cb 100644 (file)
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -344,6 +340,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -380,14 +377,15 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -399,9 +397,9 @@ CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -433,6 +431,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -450,7 +449,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -551,6 +550,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -573,6 +573,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -608,6 +613,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index b3a543dc48a072ef31fba56f7f395fee25ea6014..a35d10ee10cb709b9e16b27f97d0f6144b9692b1 100644 (file)
@@ -48,6 +48,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -94,18 +95,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -118,6 +115,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -196,7 +194,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +224,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -256,7 +252,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -297,6 +292,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -350,6 +346,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -388,8 +385,8 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_AMD is not set
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CIRRUS is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
@@ -398,6 +395,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
 CONFIG_NE2000=y
@@ -410,9 +408,9 @@ CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_SMSC is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PLIP=m
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
@@ -455,6 +453,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -473,7 +472,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -574,6 +573,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -596,6 +596,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -631,6 +636,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index d543ed5dfa967b5608713e26e321a692926b2ac2..573bf922d44823b767db2208204d07907a9cce5b 100644 (file)
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -385,6 +382,7 @@ CONFIG_SUN3LANCE=y
 CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -574,6 +574,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -609,6 +614,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index a67e54246023e5e4b73ca117df70334e409b9f2b..efb27a7fcc559d5bb778998330d90006a46f9b0b 100644 (file)
@@ -45,6 +45,7 @@ CONFIG_UNIX_DIAG=m
 CONFIG_TLS=m
 CONFIG_XFRM_MIGRATE=y
 CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
 CONFIG_INET=y
 CONFIG_IP_PNP=y
 CONFIG_IP_PNP_DHCP=y
@@ -91,18 +92,14 @@ CONFIG_NF_CONNTRACK_SANE=m
 CONFIG_NF_CONNTRACK_SIP=m
 CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_SET=m
 CONFIG_NF_TABLES_INET=y
 CONFIG_NF_TABLES_NETDEV=y
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
-CONFIG_NFT_RT=m
 CONFIG_NFT_NUMGEN=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_FLOW_OFFLOAD=m
-CONFIG_NFT_SET_RBTREE=m
-CONFIG_NFT_SET_HASH=m
-CONFIG_NFT_SET_BITMAP=m
 CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_MASQ=m
@@ -115,6 +112,7 @@ CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
 CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_SOCKET=m
 CONFIG_NFT_DUP_NETDEV=m
 CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +191,6 @@ CONFIG_IP_SET_HASH_NETPORT=m
 CONFIG_IP_SET_HASH_NETIFACE=m
 CONFIG_IP_SET_LIST_SET=m
 CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_SOCKET_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_DUP_IPV4=m
 CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +221,6 @@ CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
 CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_NF_SOCKET_IPV6=m
 CONFIG_NFT_CHAIN_ROUTE_IPV6=m
 CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_NFT_MASQ_IPV6=m
@@ -253,7 +249,6 @@ CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
 CONFIG_IP6_NF_TARGET_NPT=m
 CONFIG_NF_TABLES_BRIDGE=y
-CONFIG_NFT_BRIDGE_META=m
 CONFIG_NFT_BRIDGE_REJECT=m
 CONFIG_NF_LOG_BRIDGE=m
 CONFIG_BRIDGE_NF_EBTABLES=m
@@ -294,6 +289,7 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
 CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
+# CONFIG_BATMAN_ADV_BATMAN_V is not set
 CONFIG_BATMAN_ADV_DAT=y
 CONFIG_BATMAN_ADV_NC=y
 CONFIG_BATMAN_ADV_MCAST=y
@@ -341,6 +337,7 @@ CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_ERA=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_RAID=m
@@ -378,14 +375,15 @@ CONFIG_VETH=m
 CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_CADENCE is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
 # CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_NI is not set
@@ -397,9 +395,9 @@ CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
 # CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -435,6 +433,7 @@ CONFIG_HIDRAW=y
 CONFIG_UHID=m
 # CONFIG_HID_GENERIC is not set
 # CONFIG_HID_ITE is not set
+# CONFIG_HID_REDRAGON is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_RTC_CLASS=y
 # CONFIG_RTC_NVMEM is not set
@@ -452,7 +451,7 @@ CONFIG_FS_ENCRYPTION=m
 CONFIG_FANOTIFY=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_AUTOFS4_FS=m
+CONFIG_AUTOFS_FS=m
 CONFIG_FUSE_FS=m
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
@@ -553,6 +552,7 @@ CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
 CONFIG_TEST_UUID=m
+CONFIG_TEST_OVERFLOW=m
 CONFIG_TEST_RHASHTABLE=m
 CONFIG_TEST_HASH=m
 CONFIG_TEST_USER_COPY=m
@@ -575,6 +575,11 @@ CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -610,6 +615,7 @@ CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_DRBG_HASH=y
 CONFIG_CRYPTO_DRBG_CTR=y
index 4d8d68c4e3ddd5193c9826680b60a3b009c7aa91..a4b8d3331a9e2c6c73e0de5120f62303e791854e 100644 (file)
@@ -1,6 +1,7 @@
 generic-y += barrier.h
 generic-y += compat.h
 generic-y += device.h
+generic-y += dma-mapping.h
 generic-y += emergency-restart.h
 generic-y += exec.h
 generic-y += extable.h
index 93b47b1f6fb420a1d7d52bd096f611b6668eceb0..54009ea710b37a73bb86b0100dc8983cb98fcbcc 100644 (file)
@@ -454,7 +454,7 @@ static inline unsigned long ffz(unsigned long word)
  */
 #if (defined(__mcfisaaplus__) || defined(__mcfisac__)) && \
        !defined(CONFIG_M68000) && !defined(CONFIG_MCPU32)
-static inline int __ffs(int x)
+static inline unsigned long __ffs(unsigned long x)
 {
        __asm__ __volatile__ ("bitrev %0; ff1 %0"
                : "=d" (x)
@@ -493,7 +493,11 @@ static inline int ffs(int x)
                : "dm" (x & -x));
        return 32 - cnt;
 }
-#define __ffs(x) (ffs(x) - 1)
+
+static inline unsigned long __ffs(unsigned long x)
+{
+       return ffs(x) - 1;
+}
 
 /*
  *     fls: find last bit set.
diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
deleted file mode 100644 (file)
index e3722ed..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _M68K_DMA_MAPPING_H
-#define _M68K_DMA_MAPPING_H
-
-extern const struct dma_map_ops m68k_dma_ops;
-
-static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
-        return &m68k_dma_ops;
-}
-
-#endif  /* _M68K_DMA_MAPPING_H */
index ca2849afb0877339e18866885b2e8f76401b6ccd..aabe6420ead2a5996d233a00af822d5dff0eac87 100644 (file)
@@ -1,6 +1,13 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _M68K_IO_H
+#define _M68K_IO_H
+
 #if defined(__uClinux__) || defined(CONFIG_COLDFIRE)
 #include <asm/io_no.h>
 #else
 #include <asm/io_mm.h>
 #endif
+
+#include <asm-generic/io.h>
+
+#endif /* _M68K_IO_H */
index fe485f4f5fac4d92a5bbc9ad34d4f217b8b8b416..782b78f8a04890b315685b0ea78cab16a2d219af 100644 (file)
  *    isa_readX(),isa_writeX()  are for ISA memory
  */
 
-#ifndef _IO_H
-#define _IO_H
+#ifndef _M68K_IO_MM_H
+#define _M68K_IO_MM_H
 
 #ifdef __KERNEL__
 
-#define ARCH_HAS_IOREMAP_WT
-
 #include <linux/compiler.h>
 #include <asm/raw_io.h>
 #include <asm/virtconvert.h>
@@ -369,40 +367,6 @@ static inline void isa_delay(void)
 #define writew(val, addr)      out_le16((addr), (val))
 #endif /* CONFIG_ATARI_ROM_ISA */
 
-#if !defined(CONFIG_ISA) && !defined(CONFIG_ATARI_ROM_ISA)
-/*
- * We need to define dummy functions for GENERIC_IOMAP support.
- */
-#define inb(port)          0xff
-#define inb_p(port)        0xff
-#define outb(val,port)     ((void)0)
-#define outb_p(val,port)   ((void)0)
-#define inw(port)          0xffff
-#define inw_p(port)        0xffff
-#define outw(val,port)     ((void)0)
-#define outw_p(val,port)   ((void)0)
-#define inl(port)          0xffffffffUL
-#define inl_p(port)        0xffffffffUL
-#define outl(val,port)     ((void)0)
-#define outl_p(val,port)   ((void)0)
-
-#define insb(port,buf,nr)  ((void)0)
-#define outsb(port,buf,nr) ((void)0)
-#define insw(port,buf,nr)  ((void)0)
-#define outsw(port,buf,nr) ((void)0)
-#define insl(port,buf,nr)  ((void)0)
-#define outsl(port,buf,nr) ((void)0)
-
-/*
- * These should be valid on any ioremap()ed region
- */
-#define readb(addr)      in_8(addr)
-#define writeb(val,addr) out_8((addr),(val))
-#define readw(addr)      in_le16(addr)
-#define writew(val,addr) out_le16((addr),(val))
-
-#endif /* !CONFIG_ISA && !CONFIG_ATARI_ROM_ISA */
-
 #define readl(addr)      in_le32(addr)
 #define writel(val,addr) out_le32((addr),(val))
 
@@ -444,4 +408,4 @@ static inline void isa_delay(void)
 #define writew_relaxed(b, addr)        writew(b, addr)
 #define writel_relaxed(b, addr)        writel(b, addr)
 
-#endif /* _IO_H */
+#endif /* _M68K_IO_MM_H */
index 83a0a6d449f44bdd5fa7396a836cba2339478108..0498192e1d983292e1ac3fe7ada7e154cdebbe98 100644 (file)
@@ -131,19 +131,7 @@ static inline void writel(u32 value, volatile void __iomem *addr)
 #define PCI_SPACE_LIMIT        PCI_IO_MASK
 #endif /* CONFIG_PCI */
 
-/*
- * These are defined in kmap.h as static inline functions. To maintain
- * previous behavior we put these define guards here so io_mm.h doesn't
- * see them.
- */
-#ifdef CONFIG_MMU
-#define memset_io memset_io
-#define memcpy_fromio memcpy_fromio
-#define memcpy_toio memcpy_toio
-#endif
-
 #include <asm/kmap.h>
 #include <asm/virtconvert.h>
-#include <asm-generic/io.h>
 
 #endif /* _M68KNOMMU_IO_H */
index 84b8333db8ad1987ae214dec025eb813d502f6c4..aac7f045f7f0aa8509cf5ef288f8030de8276bce 100644 (file)
@@ -4,6 +4,8 @@
 
 #ifdef CONFIG_MMU
 
+#define ARCH_HAS_IOREMAP_WT
+
 /* Values for nocacheflag and cmode */
 #define IOMAP_FULL_CACHING             0
 #define IOMAP_NOCACHE_SER              1
@@ -16,6 +18,7 @@
  */
 extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
                               int cacheflag);
+#define iounmap iounmap
 extern void iounmap(void __iomem *addr);
 extern void __iounmap(void *addr, unsigned long size);
 
@@ -33,31 +36,35 @@ static inline void __iomem *ioremap_nocache(unsigned long physaddr,
 }
 
 #define ioremap_uc ioremap_nocache
+#define ioremap_wt ioremap_wt
 static inline void __iomem *ioremap_wt(unsigned long physaddr,
                                       unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
 }
 
-#define ioremap_fillcache ioremap_fullcache
+#define ioremap_fullcache ioremap_fullcache
 static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
                                              unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
 }
 
+#define memset_io memset_io
 static inline void memset_io(volatile void __iomem *addr, unsigned char val,
                             int count)
 {
        __builtin_memset((void __force *) addr, val, count);
 }
 
+#define memcpy_fromio memcpy_fromio
 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
                                 int count)
 {
        __builtin_memcpy(dst, (void __force *) src, count);
 }
 
+#define memcpy_toio memcpy_toio
 static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
                               int count)
 {
index 1605da48ebf2244556be005c1ca3b7e822eeb961..49bd3266b4b1b79ad9581f0b40d454b25228ffcd 100644 (file)
@@ -22,7 +22,6 @@ extern int (*mach_hwclk)(int, struct rtc_time*);
 extern unsigned int (*mach_get_ss)(void);
 extern int (*mach_get_rtc_pll)(struct rtc_pll_info *);
 extern int (*mach_set_rtc_pll)(struct rtc_pll_info *);
-extern int (*mach_set_clock_mmss)(unsigned long);
 extern void (*mach_reset)( void );
 extern void (*mach_halt)( void );
 extern void (*mach_power_off)( void );
index 9b840c03ebb7ab06592a23d89290abecb0baaadb..08cee11180e6998d2069b1591e78ad57288f9134 100644 (file)
@@ -57,7 +57,6 @@ struct mac_model
 #define MAC_SCSI_IIFX          5
 #define MAC_SCSI_DUO           6
 #define MAC_SCSI_LC            7
-#define MAC_SCSI_LATE          8
 
 #define MAC_IDE_NONE           0
 #define MAC_IDE_QUADRA         1
index e644c4daf540e97e30aaba6c2761364ea5f17f55..6bbe52025de3c5c68518371a7d41e23003b6ece1 100644 (file)
@@ -18,7 +18,7 @@ extern unsigned long memory_end;
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
 #define __pa(vaddr)            ((unsigned long)(vaddr))
-#define __va(paddr)            ((void *)(paddr))
+#define __va(paddr)            ((void *)((unsigned long)(paddr)))
 
 #define virt_to_pfn(kaddr)     (__pa(kaddr) >> PAGE_SHIFT)
 #define pfn_to_virt(pfn)       __va((pfn) << PAGE_SHIFT)
index 463572c4943f195b57c9a69e0100a405c3b441a6..e99993c57d6b6c0a7fc8bfeac7fdffb4d25c06c8 100644 (file)
@@ -6,7 +6,7 @@
 
 #undef DEBUG
 
-#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
@@ -19,7 +19,7 @@
 
 #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
 
-static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
                gfp_t flag, unsigned long attrs)
 {
        struct page *page, **map;
@@ -62,7 +62,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        return addr;
 }
 
-static void m68k_dma_free(struct device *dev, size_t size, void *addr,
+void arch_dma_free(struct device *dev, size_t size, void *addr,
                dma_addr_t handle, unsigned long attrs)
 {
        pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
@@ -73,8 +73,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *addr,
 
 #include <asm/cacheflush.h>
 
-static void *m68k_dma_alloc(struct device *dev, size_t size,
-               dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, unsigned long attrs)
 {
        void *ret;
 
@@ -89,7 +89,7 @@ static void *m68k_dma_alloc(struct device *dev, size_t size,
        return ret;
 }
 
-static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
                dma_addr_t dma_handle, unsigned long attrs)
 {
        free_pages((unsigned long)vaddr, get_order(size));
@@ -97,8 +97,8 @@ static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
 
 #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
 
-static void m68k_dma_sync_single_for_device(struct device *dev,
-               dma_addr_t handle, size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
+               size_t size, enum dma_data_direction dir)
 {
        switch (dir) {
        case DMA_BIDIRECTIONAL:
@@ -115,58 +115,6 @@ static void m68k_dma_sync_single_for_device(struct device *dev,
        }
 }
 
-static void m68k_dma_sync_sg_for_device(struct device *dev,
-               struct scatterlist *sglist, int nents, enum dma_data_direction dir)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nents, i) {
-               dma_sync_single_for_device(dev, sg->dma_address, sg->length,
-                                          dir);
-       }
-}
-
-static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, enum dma_data_direction dir,
-               unsigned long attrs)
-{
-       dma_addr_t handle = page_to_phys(page) + offset;
-
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               dma_sync_single_for_device(dev, handle, size, dir);
-
-       return handle;
-}
-
-static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
-               int nents, enum dma_data_direction dir, unsigned long attrs)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sglist, sg, nents, i) {
-               sg->dma_address = sg_phys(sg);
-
-               if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
-                       continue;
-
-               dma_sync_single_for_device(dev, sg->dma_address, sg->length,
-                                          dir);
-       }
-       return nents;
-}
-
-const struct dma_map_ops m68k_dma_ops = {
-       .alloc                  = m68k_dma_alloc,
-       .free                   = m68k_dma_free,
-       .map_page               = m68k_dma_map_page,
-       .map_sg                 = m68k_dma_map_sg,
-       .sync_single_for_device = m68k_dma_sync_single_for_device,
-       .sync_sg_for_device     = m68k_dma_sync_sg_for_device,
-};
-EXPORT_SYMBOL(m68k_dma_ops);
-
 void arch_setup_pdev_archdata(struct platform_device *pdev)
 {
        if (pdev->dev.coherent_dma_mask == DMA_MASK_NONE &&
index f35e3ebd6331f846d4d523f2ea6b58f59cdb5641..5d3596c180f9f75efab10fa416199e554fba3794 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/module.h>
@@ -88,7 +89,6 @@ void (*mach_get_hardware_list) (struct seq_file *m);
 /* machine dependent timer functions */
 int (*mach_hwclk) (int, struct rtc_time*);
 EXPORT_SYMBOL(mach_hwclk);
-int (*mach_set_clock_mmss) (unsigned long);
 unsigned int (*mach_get_ss)(void);
 int (*mach_get_rtc_pll)(struct rtc_pll_info *);
 int (*mach_set_rtc_pll)(struct rtc_pll_info *);
@@ -165,6 +165,8 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
                                        be32_to_cpu(m->addr);
                                m68k_memory[m68k_num_memory].size =
                                        be32_to_cpu(m->size);
+                               memblock_add(m68k_memory[m68k_num_memory].addr,
+                                            m68k_memory[m68k_num_memory].size);
                                m68k_num_memory++;
                        } else
                                pr_warn("%s: too many memory chunks\n",
@@ -224,10 +226,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
 
 void __init setup_arch(char **cmdline_p)
 {
-#ifndef CONFIG_SUN3
-       int i;
-#endif
-
        /* The bootinfo is located right after the kernel */
        if (!CPU_IS_COLDFIRE)
                m68k_parse_bootinfo((const struct bi_record *)_end);
@@ -356,14 +354,9 @@ void __init setup_arch(char **cmdline_p)
 #endif
 
 #ifndef CONFIG_SUN3
-       for (i = 1; i < m68k_num_memory; i++)
-               free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
-                                 m68k_memory[i].size);
 #ifdef CONFIG_BLK_DEV_INITRD
        if (m68k_ramdisk.size) {
-               reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
-                                    m68k_ramdisk.addr, m68k_ramdisk.size,
-                                    BOOTMEM_DEFAULT);
+               memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
                initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
                initrd_end = initrd_start + m68k_ramdisk.size;
                pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
index a98af10182016c4cc8426c68f05573d616712833..cfd5475bfc3152aa0c47246db970d8468e4fa944 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/initrd.h>
@@ -51,7 +52,6 @@ char __initdata command_line[COMMAND_LINE_SIZE];
 
 /* machine dependent timer functions */
 void (*mach_sched_init)(irq_handler_t handler) __initdata = NULL;
-int (*mach_set_clock_mmss)(unsigned long);
 int (*mach_hwclk) (int, struct rtc_time*);
 
 /* machine dependent reboot functions */
@@ -86,8 +86,6 @@ void (*mach_power_off)(void);
 
 void __init setup_arch(char **cmdline_p)
 {
-       int bootmap_size;
-
        memory_start = PAGE_ALIGN(_ramstart);
        memory_end = _ramend;
 
@@ -142,6 +140,8 @@ void __init setup_arch(char **cmdline_p)
        pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
                 __bss_stop, memory_start, memory_start, memory_end);
 
+       memblock_add(memory_start, memory_end - memory_start);
+
        /* Keep a copy of command line */
        *cmdline_p = &command_line[0];
        memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
@@ -158,23 +158,10 @@ void __init setup_arch(char **cmdline_p)
        min_low_pfn = PFN_DOWN(memory_start);
        max_pfn = max_low_pfn = PFN_DOWN(memory_end);
 
-       bootmap_size = init_bootmem_node(
-                       NODE_DATA(0),
-                       min_low_pfn,            /* map goes here */
-                       PFN_DOWN(PAGE_OFFSET),
-                       max_pfn);
-       /*
-        * Free the usable memory, we have to make sure we do not free
-        * the bootmem bitmap so we then reserve it after freeing it :-)
-        */
-       free_bootmem(memory_start, memory_end - memory_start);
-       reserve_bootmem(memory_start, bootmap_size, BOOTMEM_DEFAULT);
-
 #if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD)
        if ((initrd_start > 0) && (initrd_start < initrd_end) &&
                        (initrd_end < memory_end))
-               reserve_bootmem(initrd_start, initrd_end - initrd_start,
-                                BOOTMEM_DEFAULT);
+               memblock_reserve(initrd_start, initrd_end - initrd_start);
 #endif /* if defined(CONFIG_BLK_DEV_INITRD) */
 
        /*
index e522307db47ccbd9d371239ea36e2c13488aec06..b02d7254b73a9fe3c68834f36839022548cd06ea 100644 (file)
@@ -57,7 +57,6 @@ static unsigned long mac_orig_videoaddr;
 /* Mac specific timer functions */
 extern u32 mac_gettimeoffset(void);
 extern int mac_hwclk(int, struct rtc_time *);
-extern int mac_set_clock_mmss(unsigned long);
 extern void iop_preinit(void);
 extern void iop_init(void);
 extern void via_init(void);
@@ -158,7 +157,6 @@ void __init config_mac(void)
        mach_get_model = mac_get_model;
        arch_gettimeoffset = mac_gettimeoffset;
        mach_hwclk = mac_hwclk;
-       mach_set_clock_mmss = mac_set_clock_mmss;
        mach_reset = mac_reset;
        mach_halt = mac_poweroff;
        mach_power_off = mac_poweroff;
@@ -709,7 +707,7 @@ static struct mac_model mac_data_table[] = {
                .name           = "PowerBook 520",
                .adb_type       = MAC_ADB_PB2,
                .via_type       = MAC_VIA_QUADRA,
-               .scsi_type      = MAC_SCSI_LATE,
+               .scsi_type      = MAC_SCSI_OLD,
                .scc_type       = MAC_SCC_QUADRA,
                .ether_type     = MAC_ETHER_SONIC,
                .floppy_type    = MAC_FLOPPY_SWIM_ADDR2,
@@ -943,18 +941,6 @@ static const struct resource mac_scsi_old_rsrc[] __initconst = {
        },
 };
 
-static const struct resource mac_scsi_late_rsrc[] __initconst = {
-       {
-               .flags = IORESOURCE_IRQ,
-               .start = IRQ_MAC_SCSI,
-               .end   = IRQ_MAC_SCSI,
-       }, {
-               .flags = IORESOURCE_MEM,
-               .start = 0x50010000,
-               .end   = 0x50011FFF,
-       },
-};
-
 static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
        {
                .flags = IORESOURCE_IRQ,
@@ -1064,11 +1050,6 @@ int __init mac_platform_init(void)
                platform_device_register_simple("mac_scsi", 0,
                        mac_scsi_old_rsrc, ARRAY_SIZE(mac_scsi_old_rsrc));
                break;
-       case MAC_SCSI_LATE:
-               /* XXX PDMA support for PowerBook 500 series needs testing */
-               platform_device_register_simple("mac_scsi", 0,
-                       mac_scsi_late_rsrc, ARRAY_SIZE(mac_scsi_late_rsrc));
-               break;
        case MAC_SCSI_LC:
                /* Addresses from Mac LC data in Designing Cards & Drivers 3ed.
                 * Also from the Developer Notes for Classic II, LC III,
index c68054361615a32eef13ab46c17c99d75793baa0..19e9d8eef1f282c7c930be14dbd1d97b64f0df87 100644 (file)
 
 #include <asm/machdep.h>
 
-/* Offset between Unix time (1970-based) and Mac time (1904-based) */
+/*
+ * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU
+ * times wrap in 2040. If we need to handle later times, the read_time functions
+ * need to be changed to interpret wrapped times as post-2040.
+ */
 
 #define RTC_OFFSET 2082844800
 
 static void (*rom_reset)(void);
 
 #ifdef CONFIG_ADB_CUDA
-static long cuda_read_time(void)
+static time64_t cuda_read_time(void)
 {
        struct adb_request req;
-       long time;
+       time64_t time;
 
        if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
                return 0;
        while (!req.complete)
                cuda_poll();
 
-       time = (req.reply[3] << 24) | (req.reply[4] << 16) |
-              (req.reply[5] << 8) | req.reply[6];
+       time = (u32)((req.reply[3] << 24) | (req.reply[4] << 16) |
+                    (req.reply[5] << 8) | req.reply[6]);
+
        return time - RTC_OFFSET;
 }
 
-static void cuda_write_time(long data)
+static void cuda_write_time(time64_t time)
 {
        struct adb_request req;
+       u32 data = lower_32_bits(time + RTC_OFFSET);
 
-       data += RTC_OFFSET;
        if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
                         (data >> 24) & 0xFF, (data >> 16) & 0xFF,
                         (data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -86,26 +91,27 @@ static void cuda_write_pram(int offset, __u8 data)
 #endif /* CONFIG_ADB_CUDA */
 
 #ifdef CONFIG_ADB_PMU68K
-static long pmu_read_time(void)
+static time64_t pmu_read_time(void)
 {
        struct adb_request req;
-       long time;
+       time64_t time;
 
        if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
                return 0;
        while (!req.complete)
                pmu_poll();
 
-       time = (req.reply[1] << 24) | (req.reply[2] << 16) |
-              (req.reply[3] << 8) | req.reply[4];
+       time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) |
+                    (req.reply[3] << 8) | req.reply[4]);
+
        return time - RTC_OFFSET;
 }
 
-static void pmu_write_time(long data)
+static void pmu_write_time(time64_t time)
 {
        struct adb_request req;
+       u32 data = lower_32_bits(time + RTC_OFFSET);
 
-       data += RTC_OFFSET;
        if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
                        (data >> 24) & 0xFF, (data >> 16) & 0xFF,
                        (data >> 8) & 0xFF, data & 0xFF) < 0)
@@ -245,11 +251,11 @@ static void via_write_pram(int offset, __u8 data)
  * is basically any machine with Mac II-style ADB.
  */
 
-static long via_read_time(void)
+static time64_t via_read_time(void)
 {
        union {
                __u8 cdata[4];
-               long idata;
+               __u32 idata;
        } result, last_result;
        int count = 1;
 
@@ -270,7 +276,7 @@ static long via_read_time(void)
                via_pram_command(0x8D, &result.cdata[0]);
 
                if (result.idata == last_result.idata)
-                       return result.idata - RTC_OFFSET;
+                       return (time64_t)result.idata - RTC_OFFSET;
 
                if (++count > 10)
                        break;
@@ -278,8 +284,8 @@ static long via_read_time(void)
                last_result.idata = result.idata;
        }
 
-       pr_err("via_read_time: failed to read a stable value; got 0x%08lx then 0x%08lx\n",
-              last_result.idata, result.idata);
+       pr_err("%s: failed to read a stable value; got 0x%08x then 0x%08x\n",
+              __func__, last_result.idata, result.idata);
 
        return 0;
 }
@@ -291,11 +297,11 @@ static long via_read_time(void)
  * is basically any machine with Mac II-style ADB.
  */
 
-static void via_write_time(long time)
+static void via_write_time(time64_t time)
 {
        union {
                __u8 cdata[4];
-               long idata;
+               __u32 idata;
        } data;
        __u8 temp;
 
@@ -304,7 +310,7 @@ static void via_write_time(long time)
        temp = 0x55;
        via_pram_command(0x35, &temp);
 
-       data.idata = time + RTC_OFFSET;
+       data.idata = lower_32_bits(time + RTC_OFFSET);
        via_pram_command(0x01, &data.cdata[3]);
        via_pram_command(0x05, &data.cdata[2]);
        via_pram_command(0x09, &data.cdata[1]);
@@ -585,12 +591,15 @@ void mac_reset(void)
  * This function translates seconds since 1970 into a proper date.
  *
  * Algorithm cribbed from glibc2.1, __offtime().
+ *
+ * This is roughly same as rtc_time64_to_tm(), which we should probably
+ * use here, but it's only available when CONFIG_RTC_LIB is enabled.
  */
 #define SECS_PER_MINUTE (60)
 #define SECS_PER_HOUR  (SECS_PER_MINUTE * 60)
 #define SECS_PER_DAY   (SECS_PER_HOUR * 24)
 
-static void unmktime(unsigned long time, long offset,
+static void unmktime(time64_t time, long offset,
                     int *yearp, int *monp, int *dayp,
                     int *hourp, int *minp, int *secp)
 {
@@ -602,11 +611,10 @@ static void unmktime(unsigned long time, long offset,
                /* Leap years.  */
                { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
        };
-       long int days, rem, y, wday, yday;
+       int days, rem, y, wday, yday;
        const unsigned short int *ip;
 
-       days = time / SECS_PER_DAY;
-       rem = time % SECS_PER_DAY;
+       days = div_u64_rem(time, SECS_PER_DAY, &rem);
        rem += offset;
        while (rem < 0) {
                rem += SECS_PER_DAY;
@@ -657,7 +665,7 @@ static void unmktime(unsigned long time, long offset,
 
 int mac_hwclk(int op, struct rtc_time *t)
 {
-       unsigned long now;
+       time64_t now;
 
        if (!op) { /* read */
                switch (macintosh_config->adb_type) {
@@ -693,8 +701,8 @@ int mac_hwclk(int op, struct rtc_time *t)
                         __func__, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
                         t->tm_hour, t->tm_min, t->tm_sec);
 
-               now = mktime(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
-                            t->tm_hour, t->tm_min, t->tm_sec);
+               now = mktime64(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
+                              t->tm_hour, t->tm_min, t->tm_sec);
 
                switch (macintosh_config->adb_type) {
                case MAC_ADB_IOP:
@@ -719,19 +727,3 @@ int mac_hwclk(int op, struct rtc_time *t)
        }
        return 0;
 }
-
-/*
- * Set minutes/seconds in the hardware clock
- */
-
-int mac_set_clock_mmss (unsigned long nowtime)
-{
-       struct rtc_time now;
-
-       mac_hwclk(0, &now);
-       now.tm_sec = nowtime % 60;
-       now.tm_min = (nowtime / 60) % 60;
-       mac_hwclk(1, &now);
-
-       return 0;
-}
index 8827b7f914025595341b7f9daad3770f095de440..38e2b272c220cbfaee5fdd4032c4461ab45c5e2e 100644 (file)
@@ -71,7 +71,6 @@ void __init m68k_setup_node(int node)
                pg_data_table[i] = pg_data_map + node;
        }
 #endif
-       pg_data_map[node].bdata = bootmem_node_data + node;
        node_set_online(node);
 }
 
index 2925d795d71a2bff61796dd651ecbfe70ca6901f..70dde040779b56feaeaa650a7c342a23750a1fce 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 
 #include <asm/setup.h>
 #include <asm/page.h>
@@ -153,31 +154,31 @@ int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
 
 void __init cf_bootmem_alloc(void)
 {
-       unsigned long start_pfn;
        unsigned long memstart;
 
        /* _rambase and _ramend will be naturally page aligned */
        m68k_memory[0].addr = _rambase;
        m68k_memory[0].size = _ramend - _rambase;
 
+       memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
+
        /* compute total pages in system */
        num_pages = PFN_DOWN(_ramend - _rambase);
 
        /* page numbers */
        memstart = PAGE_ALIGN(_ramstart);
        min_low_pfn = PFN_DOWN(_rambase);
-       start_pfn = PFN_DOWN(memstart);
        max_pfn = max_low_pfn = PFN_DOWN(_ramend);
        high_memory = (void *)_ramend;
 
+       /* Reserve kernel text/data/bss */
+       memblock_reserve(memstart, memstart - _rambase);
+
        m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
        module_fixup(NULL, __start_fixup, __stop_fixup);
 
-       /* setup bootmem data */
+       /* setup node data */
        m68k_setup_node(0);
-       memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
-               min_low_pfn, max_low_pfn);
-       free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
 }
 
 /*
index e490ecc7842c9db3e8c6ace9d6d11061ed49cad1..4e17ecb5928aae85155c9f6a5b0b0704a24b1100 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/types.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
+#include <linux/memblock.h>
 #include <linux/gfp.h>
 
 #include <asm/setup.h>
@@ -208,7 +209,7 @@ void __init paging_init(void)
 {
        unsigned long zones_size[MAX_NR_ZONES] = { 0, };
        unsigned long min_addr, max_addr;
-       unsigned long addr, size, end;
+       unsigned long addr;
        int i;
 
 #ifdef DEBUG
@@ -253,34 +254,20 @@ void __init paging_init(void)
        min_low_pfn = availmem >> PAGE_SHIFT;
        max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
 
-       for (i = 0; i < m68k_num_memory; i++) {
-               addr = m68k_memory[i].addr;
-               end = addr + m68k_memory[i].size;
-               m68k_setup_node(i);
-               availmem = PAGE_ALIGN(availmem);
-               availmem += init_bootmem_node(NODE_DATA(i),
-                                             availmem >> PAGE_SHIFT,
-                                             addr >> PAGE_SHIFT,
-                                             end >> PAGE_SHIFT);
-       }
+       /* Reserve kernel text/data/bss and the memory allocated in head.S */
+       memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
 
        /*
         * Map the physical memory available into the kernel virtual
-        * address space. First initialize the bootmem allocator with
-        * the memory we already mapped, so map_node() has something
-        * to allocate.
+        * address space. Make sure memblock will not try to allocate
+        * pages beyond the memory we already mapped in head.S
         */
-       addr = m68k_memory[0].addr;
-       size = m68k_memory[0].size;
-       free_bootmem_node(NODE_DATA(0), availmem,
-                         min(m68k_init_mapped_size, size) - (availmem - addr));
-       map_node(0);
-       if (size > m68k_init_mapped_size)
-               free_bootmem_node(NODE_DATA(0), addr + m68k_init_mapped_size,
-                                 size - m68k_init_mapped_size);
-
-       for (i = 1; i < m68k_num_memory; i++)
+       memblock_set_bottom_up(true);
+
+       for (i = 0; i < m68k_num_memory; i++) {
+               m68k_setup_node(i);
                map_node(i);
+       }
 
        flush_tlb_all();
 
index f8a710fd84cd68aea7467653890f1cd14dd45cf4..adea549d240e9eb555200c7ec7db49582373e2b7 100644 (file)
@@ -40,7 +40,6 @@ static void mvme147_get_model(char *model);
 extern void mvme147_sched_init(irq_handler_t handler);
 extern u32 mvme147_gettimeoffset(void);
 extern int mvme147_hwclk (int, struct rtc_time *);
-extern int mvme147_set_clock_mmss (unsigned long);
 extern void mvme147_reset (void);
 
 
@@ -92,7 +91,6 @@ void __init config_mvme147(void)
        mach_init_IRQ           = mvme147_init_IRQ;
        arch_gettimeoffset      = mvme147_gettimeoffset;
        mach_hwclk              = mvme147_hwclk;
-       mach_set_clock_mmss     = mvme147_set_clock_mmss;
        mach_reset              = mvme147_reset;
        mach_get_model          = mvme147_get_model;
 
@@ -164,8 +162,3 @@ int mvme147_hwclk(int op, struct rtc_time *t)
        }
        return 0;
 }
-
-int mvme147_set_clock_mmss (unsigned long nowtime)
-{
-       return 0;
-}
index 4ffd9ef98de4db20cb6b151132f53b75e64e84d5..6ee36a5b528d80b9b519c8ea97775be38114704f 100644 (file)
@@ -46,7 +46,6 @@ static void mvme16x_get_model(char *model);
 extern void mvme16x_sched_init(irq_handler_t handler);
 extern u32 mvme16x_gettimeoffset(void);
 extern int mvme16x_hwclk (int, struct rtc_time *);
-extern int mvme16x_set_clock_mmss (unsigned long);
 extern void mvme16x_reset (void);
 
 int bcd2int (unsigned char b);
@@ -280,7 +279,6 @@ void __init config_mvme16x(void)
     mach_init_IRQ        = mvme16x_init_IRQ;
     arch_gettimeoffset   = mvme16x_gettimeoffset;
     mach_hwclk           = mvme16x_hwclk;
-    mach_set_clock_mmss         = mvme16x_set_clock_mmss;
     mach_reset          = mvme16x_reset;
     mach_get_model       = mvme16x_get_model;
     mach_get_hardware_list = mvme16x_get_hardware_list;
@@ -411,9 +409,3 @@ int mvme16x_hwclk(int op, struct rtc_time *t)
        }
        return 0;
 }
-
-int mvme16x_set_clock_mmss (unsigned long nowtime)
-{
-       return 0;
-}
-
index 71c0867ecf20f201a99950ad584378d2607a0383..96810d91da2bd9cf2318e0dda4b40c092c3ea7d7 100644 (file)
@@ -43,7 +43,6 @@ extern void q40_sched_init(irq_handler_t handler);
 static u32 q40_gettimeoffset(void);
 static int q40_hwclk(int, struct rtc_time *);
 static unsigned int q40_get_ss(void);
-static int q40_set_clock_mmss(unsigned long);
 static int q40_get_rtc_pll(struct rtc_pll_info *pll);
 static int q40_set_rtc_pll(struct rtc_pll_info *pll);
 
@@ -175,7 +174,6 @@ void __init config_q40(void)
        mach_get_ss = q40_get_ss;
        mach_get_rtc_pll = q40_get_rtc_pll;
        mach_set_rtc_pll = q40_set_rtc_pll;
-       mach_set_clock_mmss = q40_set_clock_mmss;
 
        mach_reset = q40_reset;
        mach_get_model = q40_get_model;
@@ -267,34 +265,6 @@ static unsigned int q40_get_ss(void)
        return bcd2bin(Q40_RTC_SECS);
 }
 
-/*
- * Set the minutes and seconds from seconds value 'nowtime'.  Fail if
- * clock is out by > 30 minutes.  Logic lifted from atari code.
- */
-
-static int q40_set_clock_mmss(unsigned long nowtime)
-{
-       int retval = 0;
-       short real_seconds = nowtime % 60, real_minutes = (nowtime / 60) % 60;
-
-       int rtc_minutes;
-
-       rtc_minutes = bcd2bin(Q40_RTC_MINS);
-
-       if ((rtc_minutes < real_minutes ?
-            real_minutes - rtc_minutes :
-            rtc_minutes - real_minutes) < 30) {
-               Q40_RTC_CTRL |= Q40_RTC_WRITE;
-               Q40_RTC_MINS = bin2bcd(real_minutes);
-               Q40_RTC_SECS = bin2bcd(real_seconds);
-               Q40_RTC_CTRL &= ~(Q40_RTC_WRITE);
-       } else
-               retval = -1;
-
-       return retval;
-}
-
-
 /* get and set PLL calibration of RTC clock */
 #define Q40_RTC_PLL_MASK ((1<<5)-1)
 #define Q40_RTC_PLL_SIGN (1<<5)
index 1d28d380e8cc10379b5f155adb1a91194145150a..79a2bb85790615c308ecea9b2c3fdf9807054cbb 100644 (file)
@@ -123,10 +123,6 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start,
        availmem = memory_start;
 
        m68k_setup_node(0);
-       availmem += init_bootmem(start_page, num_pages);
-       availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
-
-       free_bootmem(__pa(availmem), memory_end - (availmem));
 }
 
 
index 9ecad05bfc7343ade8e5164d8fb0194fb494671f..dfb6a79ba7ff7ffd99a23bb2e88b668e1c39d118 100644 (file)
@@ -27,7 +27,6 @@ config OPENRISC
        select GENERIC_STRNLEN_USER
        select GENERIC_SMP_IDLE_THREAD
        select MODULES_USE_ELF_RELA
-       select MULTI_IRQ_HANDLER
        select HAVE_DEBUG_STACKOVERFLOW
        select OR1K_PIC
        select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1
@@ -36,6 +35,7 @@ config OPENRISC
        select ARCH_USE_QUEUED_RWLOCKS
        select OMPIC if SMP
        select ARCH_WANT_FRAME_POINTERS
+       select GENERIC_IRQ_MULTI_HANDLER
 
 config CPU_BIG_ENDIAN
        def_bool y
@@ -69,9 +69,6 @@ config STACKTRACE_SUPPORT
 config LOCKDEP_SUPPORT
        def_bool  y
 
-config MULTI_IRQ_HANDLER
-       def_bool y
-
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
index d9eee0a2b7b4a866c760b5dbcb0a66a7d82c4d3f..eb612b1865d24dd6431063f3bb7d9bc422903040 100644 (file)
@@ -24,6 +24,4 @@
 
 #define NO_IRQ         (-1)
 
-extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
-
 #endif /* __ASM_OPENRISC_IRQ_H__ */
index 35e478a93116802991b5242c1c497097399a222e..5f9445effaf8d9d819dcca1060f459db93e3e1e0 100644 (file)
@@ -41,13 +41,6 @@ void __init init_IRQ(void)
        irqchip_init();
 }
 
-static void (*handle_arch_irq)(struct pt_regs *);
-
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
-       handle_arch_irq = handle_irq;
-}
-
 void __irq_entry do_IRQ(struct pt_regs *regs)
 {
        handle_arch_irq(regs);
index 17526bebcbd277765c791b30e04e0096052d78cb..e7705dde953fc72d943154d456edc81b81fab0d4 100644 (file)
@@ -11,7 +11,6 @@ config PARISC
        select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_STRICT_KERNEL_RWX
        select ARCH_HAS_UBSAN_SANITIZE_ALL
-       select ARCH_WANTS_UBSAN_NO_NULL
        select ARCH_SUPPORTS_MEMORY_FAILURE
        select RTC_CLASS
        select RTC_DRV_GENERIC
@@ -195,7 +194,7 @@ config PREFETCH
 
 config MLONGCALLS
        bool "Enable the -mlong-calls compiler option for big kernels"
-       def_bool y if (!MODULES)
+       default y
        depends on PA8X00
        help
          If you configure the kernel to include many drivers built-in instead
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..dbaaca8
--- /dev/null
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* The synchronize caches instruction executes as a nop on systems in
+   which all memory references are performed in order. */
+#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
+
+#if defined(CONFIG_SMP)
+#define mb()           do { synchronize_caches(); } while (0)
+#define rmb()          mb()
+#define wmb()          mb()
+#define dma_rmb()      mb()
+#define dma_wmb()      mb()
+#else
+#define mb()           barrier()
+#define rmb()          barrier()
+#define wmb()          barrier()
+#define dma_rmb()      barrier()
+#define dma_wmb()      barrier()
+#endif
+
+#define __smp_mb()     mb()
+#define __smp_rmb()    mb()
+#define __smp_wmb()    mb()
+
+#include <asm-generic/barrier.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_BARRIER_H */
index e95207c0565eb12308e12d48c45bd5309ae6e2ae..1b4732e201374adc2721ab7141e5ed9b5bf9bd69 100644 (file)
        /* Release pa_tlb_lock lock without reloading lock address. */
        .macro          tlb_unlock0     spc,tmp
 #ifdef CONFIG_SMP
+       or,COND(=)      %r0,\spc,%r0
+       sync
        or,COND(=)      %r0,\spc,%r0
        stw             \spc,0(\tmp)
 #endif
index 22e6374ece4417e76fa91ed196973f6df2356f48..97451e67d35bf8d5c124af7ad88e3546bb33190d 100644 (file)
@@ -353,6 +353,7 @@ ENDPROC_CFI(flush_data_cache_local)
        .macro  tlb_unlock      la,flags,tmp
 #ifdef CONFIG_SMP
        ldi             1,\tmp
+       sync
        stw             \tmp,0(\la)
        mtsm            \flags
 #endif
index e775f80ae28c5ab8d7fac7456235fa4f415c7215..4886a6db42e98f2b1fc4c58916ee68dfc98df008 100644 (file)
@@ -633,6 +633,7 @@ cas_action:
        sub,<>  %r28, %r25, %r0
 2:     stw,ma  %r24, 0(%r26)
        /* Free lock */
+       sync
        stw,ma  %r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
        /* Clear thread register indicator */
@@ -647,6 +648,7 @@ cas_action:
 3:             
        /* Error occurred on load or store */
        /* Free lock */
+       sync
        stw     %r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
        stw     %r0, 4(%sr2,%r20)
@@ -848,6 +850,7 @@ cas2_action:
 
 cas2_end:
        /* Free lock */
+       sync
        stw,ma  %r20, 0(%sr2,%r20)
        /* Enable interrupts */
        ssm     PSW_SM_I, %r0
@@ -858,6 +861,7 @@ cas2_end:
 22:
        /* Error occurred on load or store */
        /* Free lock */
+       sync
        stw     %r20, 0(%sr2,%r20)
        ssm     PSW_SM_I, %r0
        ldo     1(%r0),%r28
index 8a1863d9ed53385cd5e72813e962bb4310c38abc..4fe5b2affa23306d5d7c07086e05d5baab293d62 100644 (file)
@@ -106,7 +106,6 @@ config S390
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_WANTS_DYNAMIC_TASK_STRUCT
-       select ARCH_WANTS_UBSAN_NO_NULL
        select ARCH_WANT_IPC_PARSE_VERSION
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS2
index e98522ea6f09ee2fb52c2d69f333e6d4916940e5..1458b1700fc7e4c580ea963920d199fd7a4fb625 100644 (file)
@@ -34,74 +34,13 @@ static void setup_boot_services##bits(struct efi_config *c)         \
                                                                        \
        table = (typeof(table))sys_table;                               \
                                                                        \
-       c->runtime_services = table->runtime;                           \
-       c->boot_services = table->boottime;                             \
-       c->text_output = table->con_out;                                \
+       c->runtime_services     = table->runtime;                       \
+       c->boot_services        = table->boottime;                      \
+       c->text_output          = table->con_out;                       \
 }
 BOOT_SERVICES(32);
 BOOT_SERVICES(64);
 
-static inline efi_status_t __open_volume32(void *__image, void **__fh)
-{
-       efi_file_io_interface_t *io;
-       efi_loaded_image_32_t *image = __image;
-       efi_file_handle_32_t *fh;
-       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
-       efi_status_t status;
-       void *handle = (void *)(unsigned long)image->device_handle;
-       unsigned long func;
-
-       status = efi_call_early(handle_protocol, handle,
-                               &fs_proto, (void **)&io);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to handle fs_proto\n");
-               return status;
-       }
-
-       func = (unsigned long)io->open_volume;
-       status = efi_early->call(func, io, &fh);
-       if (status != EFI_SUCCESS)
-               efi_printk(sys_table, "Failed to open volume\n");
-
-       *__fh = fh;
-       return status;
-}
-
-static inline efi_status_t __open_volume64(void *__image, void **__fh)
-{
-       efi_file_io_interface_t *io;
-       efi_loaded_image_64_t *image = __image;
-       efi_file_handle_64_t *fh;
-       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
-       efi_status_t status;
-       void *handle = (void *)(unsigned long)image->device_handle;
-       unsigned long func;
-
-       status = efi_call_early(handle_protocol, handle,
-                               &fs_proto, (void **)&io);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to handle fs_proto\n");
-               return status;
-       }
-
-       func = (unsigned long)io->open_volume;
-       status = efi_early->call(func, io, &fh);
-       if (status != EFI_SUCCESS)
-               efi_printk(sys_table, "Failed to open volume\n");
-
-       *__fh = fh;
-       return status;
-}
-
-efi_status_t
-efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh)
-{
-       if (efi_early->is64)
-               return __open_volume64(__image, __fh);
-
-       return __open_volume32(__image, __fh);
-}
-
 void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
 {
        efi_call_proto(efi_simple_text_output_protocol, output_string,
@@ -109,7 +48,7 @@ void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str)
 }
 
 static efi_status_t
-__setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
+preserve_pci_rom_image(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
 {
        struct pci_setup_rom *rom = NULL;
        efi_status_t status;
@@ -134,16 +73,16 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
 
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc mem for rom\n");
+               efi_printk(sys_table, "Failed to allocate memory for 'rom'\n");
                return status;
        }
 
        memset(rom, 0, sizeof(*rom));
 
-       rom->data.type = SETUP_PCI;
-       rom->data.len = size - sizeof(struct setup_data);
-       rom->data.next = 0;
-       rom->pcilen = pci->romsize;
+       rom->data.type  = SETUP_PCI;
+       rom->data.len   = size - sizeof(struct setup_data);
+       rom->data.next  = 0;
+       rom->pcilen     = pci->romsize;
        *__rom = rom;
 
        status = efi_call_proto(efi_pci_io_protocol, pci.read, pci,
@@ -179,96 +118,6 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
        return status;
 }
 
-static void
-setup_efi_pci32(struct boot_params *params, void **pci_handle,
-               unsigned long size)
-{
-       efi_pci_io_protocol_t *pci = NULL;
-       efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
-       u32 *handles = (u32 *)(unsigned long)pci_handle;
-       efi_status_t status;
-       unsigned long nr_pci;
-       struct setup_data *data;
-       int i;
-
-       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
-       while (data && data->next)
-               data = (struct setup_data *)(unsigned long)data->next;
-
-       nr_pci = size / sizeof(u32);
-       for (i = 0; i < nr_pci; i++) {
-               struct pci_setup_rom *rom = NULL;
-               u32 h = handles[i];
-
-               status = efi_call_early(handle_protocol, h,
-                                       &pci_proto, (void **)&pci);
-
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               if (!pci)
-                       continue;
-
-               status = __setup_efi_pci(pci, &rom);
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               if (data)
-                       data->next = (unsigned long)rom;
-               else
-                       params->hdr.setup_data = (unsigned long)rom;
-
-               data = (struct setup_data *)rom;
-
-       }
-}
-
-static void
-setup_efi_pci64(struct boot_params *params, void **pci_handle,
-               unsigned long size)
-{
-       efi_pci_io_protocol_t *pci = NULL;
-       efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
-       u64 *handles = (u64 *)(unsigned long)pci_handle;
-       efi_status_t status;
-       unsigned long nr_pci;
-       struct setup_data *data;
-       int i;
-
-       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
-       while (data && data->next)
-               data = (struct setup_data *)(unsigned long)data->next;
-
-       nr_pci = size / sizeof(u64);
-       for (i = 0; i < nr_pci; i++) {
-               struct pci_setup_rom *rom = NULL;
-               u64 h = handles[i];
-
-               status = efi_call_early(handle_protocol, h,
-                                       &pci_proto, (void **)&pci);
-
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               if (!pci)
-                       continue;
-
-               status = __setup_efi_pci(pci, &rom);
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               if (data)
-                       data->next = (unsigned long)rom;
-               else
-                       params->hdr.setup_data = (unsigned long)rom;
-
-               data = (struct setup_data *)rom;
-
-       }
-}
-
 /*
  * There's no way to return an informative status from this function,
  * because any analysis (and printing of error messages) needs to be
@@ -284,6 +133,9 @@ static void setup_efi_pci(struct boot_params *params)
        void **pci_handle = NULL;
        efi_guid_t pci_proto = EFI_PCI_IO_PROTOCOL_GUID;
        unsigned long size = 0;
+       unsigned long nr_pci;
+       struct setup_data *data;
+       int i;
 
        status = efi_call_early(locate_handle,
                                EFI_LOCATE_BY_PROTOCOL,
@@ -295,7 +147,7 @@ static void setup_efi_pci(struct boot_params *params)
                                        size, (void **)&pci_handle);
 
                if (status != EFI_SUCCESS) {
-                       efi_printk(sys_table, "Failed to alloc mem for pci_handle\n");
+                       efi_printk(sys_table, "Failed to allocate memory for 'pci_handle'\n");
                        return;
                }
 
@@ -307,10 +159,34 @@ static void setup_efi_pci(struct boot_params *params)
        if (status != EFI_SUCCESS)
                goto free_handle;
 
-       if (efi_early->is64)
-               setup_efi_pci64(params, pci_handle, size);
-       else
-               setup_efi_pci32(params, pci_handle, size);
+       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+       while (data && data->next)
+               data = (struct setup_data *)(unsigned long)data->next;
+
+       nr_pci = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
+       for (i = 0; i < nr_pci; i++) {
+               efi_pci_io_protocol_t *pci = NULL;
+               struct pci_setup_rom *rom;
+
+               status = efi_call_early(handle_protocol,
+                                       efi_is_64bit() ? ((u64 *)pci_handle)[i]
+                                                      : ((u32 *)pci_handle)[i],
+                                       &pci_proto, (void **)&pci);
+               if (status != EFI_SUCCESS || !pci)
+                       continue;
+
+               status = preserve_pci_rom_image(pci, &rom);
+               if (status != EFI_SUCCESS)
+                       continue;
+
+               if (data)
+                       data->next = (unsigned long)rom;
+               else
+                       params->hdr.setup_data = (unsigned long)rom;
+
+               data = (struct setup_data *)rom;
+       }
 
 free_handle:
        efi_call_early(free_pool, pci_handle);
@@ -341,8 +217,7 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
                status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
                                        size + sizeof(struct setup_data), &new);
                if (status != EFI_SUCCESS) {
-                       efi_printk(sys_table,
-                                       "Failed to alloc mem for properties\n");
+                       efi_printk(sys_table, "Failed to allocate memory for 'properties'\n");
                        return;
                }
 
@@ -358,9 +233,9 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
        new->next = 0;
 
        data = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
-       if (!data)
+       if (!data) {
                boot_params->hdr.setup_data = (unsigned long)new;
-       else {
+       else {
                while (data->next)
                        data = (struct setup_data *)(unsigned long)data->next;
                data->next = (unsigned long)new;
@@ -380,81 +255,55 @@ static void setup_quirks(struct boot_params *boot_params)
        }
 }
 
+/*
+ * See if we have Universal Graphics Adapter (UGA) protocol
+ */
 static efi_status_t
-setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
+setup_uga(struct screen_info *si, efi_guid_t *uga_proto, unsigned long size)
 {
-       struct efi_uga_draw_protocol *uga = NULL, *first_uga;
-       efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
+       efi_status_t status;
+       u32 width, height;
+       void **uga_handle = NULL;
+       efi_uga_draw_protocol_t *uga = NULL, *first_uga;
        unsigned long nr_ugas;
-       u32 *handles = (u32 *)uga_handle;
-       efi_status_t status = EFI_INVALID_PARAMETER;
        int i;
 
-       first_uga = NULL;
-       nr_ugas = size / sizeof(u32);
-       for (i = 0; i < nr_ugas; i++) {
-               efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
-               u32 w, h, depth, refresh;
-               void *pciio;
-               u32 handle = handles[i];
-
-               status = efi_call_early(handle_protocol, handle,
-                                       &uga_proto, (void **)&uga);
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
-
-               status = efi_early->call((unsigned long)uga->get_mode, uga,
-                                        &w, &h, &depth, &refresh);
-               if (status == EFI_SUCCESS && (!first_uga || pciio)) {
-                       *width = w;
-                       *height = h;
-
-                       /*
-                        * Once we've found a UGA supporting PCIIO,
-                        * don't bother looking any further.
-                        */
-                       if (pciio)
-                               break;
-
-                       first_uga = uga;
-               }
-       }
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+                               size, (void **)&uga_handle);
+       if (status != EFI_SUCCESS)
+               return status;
 
-       return status;
-}
+       status = efi_call_early(locate_handle,
+                               EFI_LOCATE_BY_PROTOCOL,
+                               uga_proto, NULL, &size, uga_handle);
+       if (status != EFI_SUCCESS)
+               goto free_handle;
 
-static efi_status_t
-setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
-{
-       struct efi_uga_draw_protocol *uga = NULL, *first_uga;
-       efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID;
-       unsigned long nr_ugas;
-       u64 *handles = (u64 *)uga_handle;
-       efi_status_t status = EFI_INVALID_PARAMETER;
-       int i;
+       height = 0;
+       width = 0;
 
        first_uga = NULL;
-       nr_ugas = size / sizeof(u64);
+       nr_ugas = size / (efi_is_64bit() ? sizeof(u64) : sizeof(u32));
        for (i = 0; i < nr_ugas; i++) {
                efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID;
                u32 w, h, depth, refresh;
                void *pciio;
-               u64 handle = handles[i];
+               unsigned long handle = efi_is_64bit() ? ((u64 *)uga_handle)[i]
+                                                     : ((u32 *)uga_handle)[i];
 
                status = efi_call_early(handle_protocol, handle,
-                                       &uga_proto, (void **)&uga);
+                                       uga_proto, (void **)&uga);
                if (status != EFI_SUCCESS)
                        continue;
 
+               pciio = NULL;
                efi_call_early(handle_protocol, handle, &pciio_proto, &pciio);
 
-               status = efi_early->call((unsigned long)uga->get_mode, uga,
-                                        &w, &h, &depth, &refresh);
+               status = efi_call_proto(efi_uga_draw_protocol, get_mode, uga,
+                                       &w, &h, &depth, &refresh);
                if (status == EFI_SUCCESS && (!first_uga || pciio)) {
-                       *width = w;
-                       *height = h;
+                       width = w;
+                       height = h;
 
                        /*
                         * Once we've found a UGA supporting PCIIO,
@@ -467,59 +316,28 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height)
                }
        }
 
-       return status;
-}
-
-/*
- * See if we have Universal Graphics Adapter (UGA) protocol
- */
-static efi_status_t setup_uga(struct screen_info *si, efi_guid_t *uga_proto,
-                             unsigned long size)
-{
-       efi_status_t status;
-       u32 width, height;
-       void **uga_handle = NULL;
-
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               size, (void **)&uga_handle);
-       if (status != EFI_SUCCESS)
-               return status;
-
-       status = efi_call_early(locate_handle,
-                               EFI_LOCATE_BY_PROTOCOL,
-                               uga_proto, NULL, &size, uga_handle);
-       if (status != EFI_SUCCESS)
-               goto free_handle;
-
-       height = 0;
-       width = 0;
-
-       if (efi_early->is64)
-               status = setup_uga64(uga_handle, size, &width, &height);
-       else
-               status = setup_uga32(uga_handle, size, &width, &height);
-
        if (!width && !height)
                goto free_handle;
 
        /* EFI framebuffer */
-       si->orig_video_isVGA = VIDEO_TYPE_EFI;
+       si->orig_video_isVGA    = VIDEO_TYPE_EFI;
 
-       si->lfb_depth = 32;
-       si->lfb_width = width;
-       si->lfb_height = height;
+       si->lfb_depth           = 32;
+       si->lfb_width           = width;
+       si->lfb_height          = height;
 
-       si->red_size = 8;
-       si->red_pos = 16;
-       si->green_size = 8;
-       si->green_pos = 8;
-       si->blue_size = 8;
-       si->blue_pos = 0;
-       si->rsvd_size = 8;
-       si->rsvd_pos = 24;
+       si->red_size            = 8;
+       si->red_pos             = 16;
+       si->green_size          = 8;
+       si->green_pos           = 8;
+       si->blue_size           = 8;
+       si->blue_pos            = 0;
+       si->rsvd_size           = 8;
+       si->rsvd_pos            = 24;
 
 free_handle:
        efi_call_early(free_pool, uga_handle);
+
        return status;
 }
 
@@ -586,7 +404,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
        if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
                return NULL;
 
-       if (efi_early->is64)
+       if (efi_is_64bit())
                setup_boot_services64(efi_early);
        else
                setup_boot_services32(efi_early);
@@ -601,7 +419,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
        status = efi_low_alloc(sys_table, 0x4000, 1,
                               (unsigned long *)&boot_params);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc lowmem for boot params\n");
+               efi_printk(sys_table, "Failed to allocate lowmem for boot params\n");
                return NULL;
        }
 
@@ -617,9 +435,9 @@ struct boot_params *make_boot_params(struct efi_config *c)
         * Fill out some of the header fields ourselves because the
         * EFI firmware loader doesn't load the first sector.
         */
-       hdr->root_flags = 1;
-       hdr->vid_mode = 0xffff;
-       hdr->boot_flag = 0xAA55;
+       hdr->root_flags = 1;
+       hdr->vid_mode   = 0xffff;
+       hdr->boot_flag  = 0xAA55;
 
        hdr->type_of_loader = 0x21;
 
@@ -627,6 +445,7 @@ struct boot_params *make_boot_params(struct efi_config *c)
        cmdline_ptr = efi_convert_cmdline(sys_table, image, &options_size);
        if (!cmdline_ptr)
                goto fail;
+
        hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
        /* Fill in upper bits of command line address, NOP on 32 bit  */
        boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
@@ -663,10 +482,12 @@ struct boot_params *make_boot_params(struct efi_config *c)
        boot_params->ext_ramdisk_size  = (u64)ramdisk_size >> 32;
 
        return boot_params;
+
 fail2:
        efi_free(sys_table, options_size, hdr->cmd_line_ptr);
 fail:
        efi_free(sys_table, 0x4000, (unsigned long)boot_params);
+
        return NULL;
 }
 
@@ -678,7 +499,7 @@ static void add_e820ext(struct boot_params *params,
        unsigned long size;
 
        e820ext->type = SETUP_E820_EXT;
-       e820ext->len = nr_entries * sizeof(struct boot_e820_entry);
+       e820ext->len  = nr_entries * sizeof(struct boot_e820_entry);
        e820ext->next = 0;
 
        data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
@@ -692,8 +513,8 @@ static void add_e820ext(struct boot_params *params,
                params->hdr.setup_data = (unsigned long)e820ext;
 }
 
-static efi_status_t setup_e820(struct boot_params *params,
-                              struct setup_data *e820ext, u32 e820ext_size)
+static efi_status_t
+setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_size)
 {
        struct boot_e820_entry *entry = params->e820_table;
        struct efi_info *efi = &params->efi_info;
@@ -814,11 +635,10 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
 }
 
 struct exit_boot_struct {
-       struct boot_params *boot_params;
-       struct efi_info *efi;
-       struct setup_data *e820ext;
-       __u32 e820ext_size;
-       bool is64;
+       struct boot_params      *boot_params;
+       struct efi_info         *efi;
+       struct setup_data       *e820ext;
+       __u32                   e820ext_size;
 };
 
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -845,25 +665,25 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
                first = false;
        }
 
-       signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+       signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
+                                  : EFI32_LOADER_SIGNATURE;
        memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
 
-       p->efi->efi_systab = (unsigned long)sys_table_arg;
-       p->efi->efi_memdesc_size = *map->desc_size;
-       p->efi->efi_memdesc_version = *map->desc_ver;
-       p->efi->efi_memmap = (unsigned long)*map->map;
-       p->efi->efi_memmap_size = *map->map_size;
+       p->efi->efi_systab              = (unsigned long)sys_table_arg;
+       p->efi->efi_memdesc_size        = *map->desc_size;
+       p->efi->efi_memdesc_version     = *map->desc_ver;
+       p->efi->efi_memmap              = (unsigned long)*map->map;
+       p->efi->efi_memmap_size         = *map->map_size;
 
 #ifdef CONFIG_X86_64
-       p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
-       p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
+       p->efi->efi_systab_hi           = (unsigned long)sys_table_arg >> 32;
+       p->efi->efi_memmap_hi           = (unsigned long)*map->map >> 32;
 #endif
 
        return EFI_SUCCESS;
 }
 
-static efi_status_t exit_boot(struct boot_params *boot_params,
-                             void *handle, bool is64)
+static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
 {
        unsigned long map_sz, key, desc_size, buff_size;
        efi_memory_desc_t *mem_map;
@@ -874,17 +694,16 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
        struct efi_boot_memmap map;
        struct exit_boot_struct priv;
 
-       map.map =               &mem_map;
-       map.map_size =          &map_sz;
-       map.desc_size =         &desc_size;
-       map.desc_ver =          &desc_version;
-       map.key_ptr =           &key;
-       map.buff_size =         &buff_size;
-       priv.boot_params =      boot_params;
-       priv.efi =              &boot_params->efi_info;
-       priv.e820ext =          NULL;
-       priv.e820ext_size =     0;
-       priv.is64 =             is64;
+       map.map                 = &mem_map;
+       map.map_size            = &map_sz;
+       map.desc_size           = &desc_size;
+       map.desc_ver            = &desc_version;
+       map.key_ptr             = &key;
+       map.buff_size           = &buff_size;
+       priv.boot_params        = boot_params;
+       priv.efi                = &boot_params->efi_info;
+       priv.e820ext            = NULL;
+       priv.e820ext_size       = 0;
 
        /* Might as well exit boot services now */
        status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -892,10 +711,11 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
        if (status != EFI_SUCCESS)
                return status;
 
-       e820ext = priv.e820ext;
-       e820ext_size = priv.e820ext_size;
+       e820ext                 = priv.e820ext;
+       e820ext_size            = priv.e820ext_size;
+
        /* Historic? */
-       boot_params->alt_mem_k = 32 * 1024;
+       boot_params->alt_mem_k  = 32 * 1024;
 
        status = setup_e820(boot_params, e820ext, e820ext_size);
        if (status != EFI_SUCCESS)
@@ -908,8 +728,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params,
  * On success we return a pointer to a boot_params structure, and NULL
  * on failure.
  */
-struct boot_params *efi_main(struct efi_config *c,
-                            struct boot_params *boot_params)
+struct boot_params *
+efi_main(struct efi_config *c, struct boot_params *boot_params)
 {
        struct desc_ptr *gdt = NULL;
        efi_loaded_image_t *image;
@@ -918,13 +738,11 @@ struct boot_params *efi_main(struct efi_config *c,
        struct desc_struct *desc;
        void *handle;
        efi_system_table_t *_table;
-       bool is64;
 
        efi_early = c;
 
        _table = (efi_system_table_t *)(unsigned long)efi_early->table;
        handle = (void *)(unsigned long)efi_early->image_handle;
-       is64 = efi_early->is64;
 
        sys_table = _table;
 
@@ -932,7 +750,7 @@ struct boot_params *efi_main(struct efi_config *c,
        if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
                goto fail;
 
-       if (is64)
+       if (efi_is_64bit())
                setup_boot_services64(efi_early);
        else
                setup_boot_services32(efi_early);
@@ -957,7 +775,7 @@ struct boot_params *efi_main(struct efi_config *c,
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
                                sizeof(*gdt), (void **)&gdt);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc mem for gdt structure\n");
+               efi_printk(sys_table, "Failed to allocate memory for 'gdt' structure\n");
                goto fail;
        }
 
@@ -965,7 +783,7 @@ struct boot_params *efi_main(struct efi_config *c,
        status = efi_low_alloc(sys_table, gdt->size, 8,
                           (unsigned long *)&gdt->address);
        if (status != EFI_SUCCESS) {
-               efi_printk(sys_table, "Failed to alloc mem for gdt\n");
+               efi_printk(sys_table, "Failed to allocate memory for 'gdt'\n");
                goto fail;
        }
 
@@ -988,7 +806,7 @@ struct boot_params *efi_main(struct efi_config *c,
                hdr->code32_start = bzimage_addr;
        }
 
-       status = exit_boot(boot_params, handle, is64);
+       status = exit_boot(boot_params, handle);
        if (status != EFI_SUCCESS) {
                efi_printk(sys_table, "exit_boot() failed!\n");
                goto fail;
@@ -1002,19 +820,20 @@ struct boot_params *efi_main(struct efi_config *c,
 
        if (IS_ENABLED(CONFIG_X86_64)) {
                /* __KERNEL32_CS */
-               desc->limit0 = 0xffff;
-               desc->base0 = 0x0000;
-               desc->base1 = 0x0000;
-               desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
-               desc->s = DESC_TYPE_CODE_DATA;
-               desc->dpl = 0;
-               desc->p = 1;
-               desc->limit1 = 0xf;
-               desc->avl = 0;
-               desc->l = 0;
-               desc->d = SEG_OP_SIZE_32BIT;
-               desc->g = SEG_GRANULARITY_4KB;
-               desc->base2 = 0x00;
+               desc->limit0    = 0xffff;
+               desc->base0     = 0x0000;
+               desc->base1     = 0x0000;
+               desc->type      = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+               desc->s         = DESC_TYPE_CODE_DATA;
+               desc->dpl       = 0;
+               desc->p         = 1;
+               desc->limit1    = 0xf;
+               desc->avl       = 0;
+               desc->l         = 0;
+               desc->d         = SEG_OP_SIZE_32BIT;
+               desc->g         = SEG_GRANULARITY_4KB;
+               desc->base2     = 0x00;
+
                desc++;
        } else {
                /* Second entry is unused on 32-bit */
@@ -1022,15 +841,16 @@ struct boot_params *efi_main(struct efi_config *c,
        }
 
        /* __KERNEL_CS */
-       desc->limit0 = 0xffff;
-       desc->base0 = 0x0000;
-       desc->base1 = 0x0000;
-       desc->type = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
-       desc->s = DESC_TYPE_CODE_DATA;
-       desc->dpl = 0;
-       desc->p = 1;
-       desc->limit1 = 0xf;
-       desc->avl = 0;
+       desc->limit0    = 0xffff;
+       desc->base0     = 0x0000;
+       desc->base1     = 0x0000;
+       desc->type      = SEG_TYPE_CODE | SEG_TYPE_EXEC_READ;
+       desc->s         = DESC_TYPE_CODE_DATA;
+       desc->dpl       = 0;
+       desc->p         = 1;
+       desc->limit1    = 0xf;
+       desc->avl       = 0;
+
        if (IS_ENABLED(CONFIG_X86_64)) {
                desc->l = 1;
                desc->d = 0;
@@ -1038,41 +858,41 @@ struct boot_params *efi_main(struct efi_config *c,
                desc->l = 0;
                desc->d = SEG_OP_SIZE_32BIT;
        }
-       desc->g = SEG_GRANULARITY_4KB;
-       desc->base2 = 0x00;
+       desc->g         = SEG_GRANULARITY_4KB;
+       desc->base2     = 0x00;
        desc++;
 
        /* __KERNEL_DS */
-       desc->limit0 = 0xffff;
-       desc->base0 = 0x0000;
-       desc->base1 = 0x0000;
-       desc->type = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
-       desc->s = DESC_TYPE_CODE_DATA;
-       desc->dpl = 0;
-       desc->p = 1;
-       desc->limit1 = 0xf;
-       desc->avl = 0;
-       desc->l = 0;
-       desc->d = SEG_OP_SIZE_32BIT;
-       desc->g = SEG_GRANULARITY_4KB;
-       desc->base2 = 0x00;
+       desc->limit0    = 0xffff;
+       desc->base0     = 0x0000;
+       desc->base1     = 0x0000;
+       desc->type      = SEG_TYPE_DATA | SEG_TYPE_READ_WRITE;
+       desc->s         = DESC_TYPE_CODE_DATA;
+       desc->dpl       = 0;
+       desc->p         = 1;
+       desc->limit1    = 0xf;
+       desc->avl       = 0;
+       desc->l         = 0;
+       desc->d         = SEG_OP_SIZE_32BIT;
+       desc->g         = SEG_GRANULARITY_4KB;
+       desc->base2     = 0x00;
        desc++;
 
        if (IS_ENABLED(CONFIG_X86_64)) {
                /* Task segment value */
-               desc->limit0 = 0x0000;
-               desc->base0 = 0x0000;
-               desc->base1 = 0x0000;
-               desc->type = SEG_TYPE_TSS;
-               desc->s = 0;
-               desc->dpl = 0;
-               desc->p = 1;
-               desc->limit1 = 0x0;
-               desc->avl = 0;
-               desc->l = 0;
-               desc->d = 0;
-               desc->g = SEG_GRANULARITY_4KB;
-               desc->base2 = 0x00;
+               desc->limit0    = 0x0000;
+               desc->base0     = 0x0000;
+               desc->base1     = 0x0000;
+               desc->type      = SEG_TYPE_TSS;
+               desc->s         = 0;
+               desc->dpl       = 0;
+               desc->p         = 1;
+               desc->limit1    = 0x0;
+               desc->avl       = 0;
+               desc->l         = 0;
+               desc->d         = 0;
+               desc->g         = SEG_GRANULARITY_4KB;
+               desc->base2     = 0x00;
                desc++;
        }
 
@@ -1082,5 +902,6 @@ struct boot_params *efi_main(struct efi_config *c,
        return boot_params;
 fail:
        efi_printk(sys_table, "efi_main() failed!\n");
+
        return NULL;
 }
index e799dc5c644872e65625b9442d194470a49d663c..8297387c4676124167207dc1383467a0b221c6ff 100644 (file)
 
 #define DESC_TYPE_CODE_DATA    (1 << 0)
 
-struct efi_uga_draw_protocol_32 {
+typedef struct {
        u32 get_mode;
        u32 set_mode;
        u32 blt;
-};
+} efi_uga_draw_protocol_32_t;
 
-struct efi_uga_draw_protocol_64 {
+typedef struct {
        u64 get_mode;
        u64 set_mode;
        u64 blt;
-};
+} efi_uga_draw_protocol_64_t;
 
-struct efi_uga_draw_protocol {
+typedef struct {
        void *get_mode;
        void *set_mode;
        void *blt;
-};
+} efi_uga_draw_protocol_t;
 
 #endif /* BOOT_COMPRESSED_EBOOT_H */
index 5de7c0d46edfc56459280519e8987e28349baee9..acd11b3bf639e0a50013014e5eaeca6b11083c28 100644 (file)
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128_aesni_alg[] = {
        }
 };
 
-static const struct x86_cpu_id aesni_cpu_id[] = {
-       X86_FEATURE_MATCH(X86_FEATURE_AES),
-       X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-       {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
 static int __init crypto_aegis128_aesni_module_init(void)
 {
-       if (!x86_match_cpu(aesni_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_aegis128_aesni_alg,
index 876e4866e63386ec0dd0231c6be35b9f94bc709c..2071c3d1ae07575143cc4d6262e92eaeef9ba560 100644 (file)
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis128l_aesni_alg[] = {
        }
 };
 
-static const struct x86_cpu_id aesni_cpu_id[] = {
-       X86_FEATURE_MATCH(X86_FEATURE_AES),
-       X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-       {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
 static int __init crypto_aegis128l_aesni_module_init(void)
 {
-       if (!x86_match_cpu(aesni_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_aegis128l_aesni_alg,
index 2b5dd3af8f4dc4c20caad2a96825576dc17fe2be..b5f2a8fd5a713ca986e2d3ef24aa1b69d421ced2 100644 (file)
@@ -375,16 +375,12 @@ static struct aead_alg crypto_aegis256_aesni_alg[] = {
        }
 };
 
-static const struct x86_cpu_id aesni_cpu_id[] = {
-       X86_FEATURE_MATCH(X86_FEATURE_AES),
-       X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-       {}
-};
-MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
-
 static int __init crypto_aegis256_aesni_module_init(void)
 {
-       if (!x86_match_cpu(aesni_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_AES) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_aegis256_aesni_alg,
index f111f36d26dce558ddb1e4ad5427a048011aded2..6634907d6ccdf17eb3c425337ca96bf746927ff7 100644 (file)
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_avx2_final(void *state, void *tag_xor,
 
 MORUS1280_DECLARE_ALGS(avx2, "morus1280-avx2", 400);
 
-static const struct x86_cpu_id avx2_cpu_id[] = {
-    X86_FEATURE_MATCH(X86_FEATURE_AVX2),
-    {}
-};
-MODULE_DEVICE_TABLE(x86cpu, avx2_cpu_id);
-
 static int __init crypto_morus1280_avx2_module_init(void)
 {
-       if (!x86_match_cpu(avx2_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_AVX2) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_morus1280_avx2_algs,
index 839270aa713cab55dfebc55f9d02c53aa13c9f73..95cf857d2cbb1943ba8ce356c48416837f1655d9 100644 (file)
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus1280_sse2_final(void *state, void *tag_xor,
 
 MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
 
-static const struct x86_cpu_id sse2_cpu_id[] = {
-    X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-    {}
-};
-MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
-
 static int __init crypto_morus1280_sse2_module_init(void)
 {
-       if (!x86_match_cpu(sse2_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_morus1280_sse2_algs,
index 26b47e2db8d2149c64b407ee10ccd4f013fcb542..615fb7bc9a323d949d038a8496125eab0c4bc4ba 100644 (file)
@@ -37,15 +37,11 @@ asmlinkage void crypto_morus640_sse2_final(void *state, void *tag_xor,
 
 MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
 
-static const struct x86_cpu_id sse2_cpu_id[] = {
-    X86_FEATURE_MATCH(X86_FEATURE_XMM2),
-    {}
-};
-MODULE_DEVICE_TABLE(x86cpu, sse2_cpu_id);
-
 static int __init crypto_morus640_sse2_module_init(void)
 {
-       if (!x86_match_cpu(sse2_cpu_id))
+       if (!boot_cpu_has(X86_FEATURE_XMM2) ||
+           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
+           !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
        return crypto_register_aeads(crypto_morus640_sse2_algs,
index 8c50754c09c1d83c8700824348036715142d11d6..4b767284b7f5e59e529c5c7e1ae90174d7d23654 100644 (file)
@@ -123,8 +123,8 @@ void mce_setup(struct mce *m)
 {
        memset(m, 0, sizeof(struct mce));
        m->cpu = m->extcpu = smp_processor_id();
-       /* We hope get_seconds stays lockless */
-       m->time = get_seconds();
+       /* need the internal __ version to avoid deadlocks */
+       m->time = __ktime_get_real_seconds();
        m->cpuvendor = boot_cpu_data.x86_vendor;
        m->cpuid = cpuid_eax(1);
        m->socketid = cpu_data(m->extcpu).phys_proc_id;
@@ -1104,6 +1104,101 @@ static void mce_unmap_kpfn(unsigned long pfn)
 }
 #endif
 
+
+/*
+ * Cases where we avoid rendezvous handler timeout:
+ * 1) If this CPU is offline.
+ *
+ * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
+ *  skip those CPUs which remain looping in the 1st kernel - see
+ *  crash_nmi_callback().
+ *
+ * Note: there still is a small window between kexec-ing and the new,
+ * kdump kernel establishing a new #MC handler where a broadcasted MCE
+ * might not get handled properly.
+ */
+static bool __mc_check_crashing_cpu(int cpu)
+{
+       if (cpu_is_offline(cpu) ||
+           (crashing_cpu != -1 && crashing_cpu != cpu)) {
+               u64 mcgstatus;
+
+               mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
+               if (mcgstatus & MCG_STATUS_RIPV) {
+                       mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
+                       return true;
+               }
+       }
+       return false;
+}
+
+static void __mc_scan_banks(struct mce *m, struct mce *final,
+                           unsigned long *toclear, unsigned long *valid_banks,
+                           int no_way_out, int *worst)
+{
+       struct mca_config *cfg = &mca_cfg;
+       int severity, i;
+
+       for (i = 0; i < cfg->banks; i++) {
+               __clear_bit(i, toclear);
+               if (!test_bit(i, valid_banks))
+                       continue;
+
+               if (!mce_banks[i].ctl)
+                       continue;
+
+               m->misc = 0;
+               m->addr = 0;
+               m->bank = i;
+
+               m->status = mce_rdmsrl(msr_ops.status(i));
+               if (!(m->status & MCI_STATUS_VAL))
+                       continue;
+
+               /*
+                * Corrected or non-signaled errors are handled by
+                * machine_check_poll(). Leave them alone, unless this panics.
+                */
+               if (!(m->status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
+                       !no_way_out)
+                       continue;
+
+               /* Set taint even when machine check was not enabled. */
+               add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
+               severity = mce_severity(m, cfg->tolerant, NULL, true);
+
+               /*
+                * When machine check was for corrected/deferred handler don't
+                * touch, unless we're panicking.
+                */
+               if ((severity == MCE_KEEP_SEVERITY ||
+                    severity == MCE_UCNA_SEVERITY) && !no_way_out)
+                       continue;
+
+               __set_bit(i, toclear);
+
+               /* Machine check event was not enabled. Clear, but ignore. */
+               if (severity == MCE_NO_SEVERITY)
+                       continue;
+
+               mce_read_aux(m, i);
+
+               /* assuming valid severity level != 0 */
+               m->severity = severity;
+
+               mce_log(m);
+
+               if (severity > *worst) {
+                       *final = *m;
+                       *worst = severity;
+               }
+       }
+
+       /* mce_clear_state will clear *final, save locally for use later */
+       *m = *final;
+}
+
 /*
  * The actual machine check handler. This only handles real
  * exceptions when something got corrupted coming in through int 18.
@@ -1118,68 +1213,45 @@ static void mce_unmap_kpfn(unsigned long pfn)
  */
 void do_machine_check(struct pt_regs *regs, long error_code)
 {
+       DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
+       DECLARE_BITMAP(toclear, MAX_NR_BANKS);
        struct mca_config *cfg = &mca_cfg;
+       int cpu = smp_processor_id();
+       char *msg = "Unknown";
        struct mce m, *final;
-       int i;
        int worst = 0;
-       int severity;
 
        /*
         * Establish sequential order between the CPUs entering the machine
         * check handler.
         */
        int order = -1;
+
        /*
         * If no_way_out gets set, there is no safe way to recover from this
         * MCE.  If mca_cfg.tolerant is cranked up, we'll try anyway.
         */
        int no_way_out = 0;
+
        /*
         * If kill_it gets set, there might be a way to recover from this
         * error.
         */
        int kill_it = 0;
-       DECLARE_BITMAP(toclear, MAX_NR_BANKS);
-       DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
-       char *msg = "Unknown";
 
        /*
         * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
         * on Intel.
         */
        int lmce = 1;
-       int cpu = smp_processor_id();
 
-       /*
-        * Cases where we avoid rendezvous handler timeout:
-        * 1) If this CPU is offline.
-        *
-        * 2) If crashing_cpu was set, e.g. we're entering kdump and we need to
-        *  skip those CPUs which remain looping in the 1st kernel - see
-        *  crash_nmi_callback().
-        *
-        * Note: there still is a small window between kexec-ing and the new,
-        * kdump kernel establishing a new #MC handler where a broadcasted MCE
-        * might not get handled properly.
-        */
-       if (cpu_is_offline(cpu) ||
-           (crashing_cpu != -1 && crashing_cpu != cpu)) {
-               u64 mcgstatus;
-
-               mcgstatus = mce_rdmsrl(MSR_IA32_MCG_STATUS);
-               if (mcgstatus & MCG_STATUS_RIPV) {
-                       mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
-                       return;
-               }
-       }
+       if (__mc_check_crashing_cpu(cpu))
+               return;
 
        ist_enter(regs);
 
        this_cpu_inc(mce_exception_count);
 
-       if (!cfg->banks)
-               goto out;
-
        mce_gather_info(&m, regs);
        m.tsc = rdtsc();
 
@@ -1220,67 +1292,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                order = mce_start(&no_way_out);
        }
 
-       for (i = 0; i < cfg->banks; i++) {
-               __clear_bit(i, toclear);
-               if (!test_bit(i, valid_banks))
-                       continue;
-               if (!mce_banks[i].ctl)
-                       continue;
-
-               m.misc = 0;
-               m.addr = 0;
-               m.bank = i;
-
-               m.status = mce_rdmsrl(msr_ops.status(i));
-               if ((m.status & MCI_STATUS_VAL) == 0)
-                       continue;
-
-               /*
-                * Non uncorrected or non signaled errors are handled by
-                * machine_check_poll. Leave them alone, unless this panics.
-                */
-               if (!(m.status & (cfg->ser ? MCI_STATUS_S : MCI_STATUS_UC)) &&
-                       !no_way_out)
-                       continue;
-
-               /*
-                * Set taint even when machine check was not enabled.
-                */
-               add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
-               severity = mce_severity(&m, cfg->tolerant, NULL, true);
-
-               /*
-                * When machine check was for corrected/deferred handler don't
-                * touch, unless we're panicing.
-                */
-               if ((severity == MCE_KEEP_SEVERITY ||
-                    severity == MCE_UCNA_SEVERITY) && !no_way_out)
-                       continue;
-               __set_bit(i, toclear);
-               if (severity == MCE_NO_SEVERITY) {
-                       /*
-                        * Machine check event was not enabled. Clear, but
-                        * ignore.
-                        */
-                       continue;
-               }
-
-               mce_read_aux(&m, i);
-
-               /* assuming valid severity level != 0 */
-               m.severity = severity;
-
-               mce_log(&m);
-
-               if (severity > worst) {
-                       *final = m;
-                       worst = severity;
-               }
-       }
-
-       /* mce_clear_state will clear *final, save locally for use later */
-       m = *final;
+       __mc_scan_banks(&m, final, toclear, valid_banks, no_way_out, &worst);
 
        if (!no_way_out)
                mce_clear_state(toclear);
@@ -1319,7 +1331,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
        if (worst > 0)
                mce_report_event(regs);
        mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);
-out:
+
        sync_core();
 
        if (worst != MCE_AR_SEVERITY && !kill_it)
index 5f2eb32316073f15307aa28ac8b37a17230ea751..ee5d08f25ce45f21aa81550ce317760a2c745900 100644 (file)
@@ -636,6 +636,8 @@ void efi_switch_mm(struct mm_struct *mm)
 #ifdef CONFIG_EFI_MIXED
 extern efi_status_t efi64_thunk(u32, ...);
 
+static DEFINE_SPINLOCK(efi_runtime_lock);
+
 #define runtime_service32(func)                                                 \
 ({                                                                      \
        u32 table = (u32)(unsigned long)efi.systab;                      \
@@ -657,17 +659,14 @@ extern efi_status_t efi64_thunk(u32, ...);
 #define efi_thunk(f, ...)                                              \
 ({                                                                     \
        efi_status_t __s;                                               \
-       unsigned long __flags;                                          \
        u32 __func;                                                     \
                                                                        \
-       local_irq_save(__flags);                                        \
        arch_efi_call_virt_setup();                                     \
                                                                        \
        __func = runtime_service32(f);                                  \
        __s = efi64_thunk(__func, __VA_ARGS__);                         \
                                                                        \
        arch_efi_call_virt_teardown();                                  \
-       local_irq_restore(__flags);                                     \
                                                                        \
        __s;                                                            \
 })
@@ -702,14 +701,17 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 {
        efi_status_t status;
        u32 phys_tm, phys_tc;
+       unsigned long flags;
 
        spin_lock(&rtc_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_tm = virt_to_phys_or_null(tm);
        phys_tc = virt_to_phys_or_null(tc);
 
        status = efi_thunk(get_time, phys_tm, phys_tc);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        spin_unlock(&rtc_lock);
 
        return status;
@@ -719,13 +721,16 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
 {
        efi_status_t status;
        u32 phys_tm;
+       unsigned long flags;
 
        spin_lock(&rtc_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_tm = virt_to_phys_or_null(tm);
 
        status = efi_thunk(set_time, phys_tm);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        spin_unlock(&rtc_lock);
 
        return status;
@@ -737,8 +742,10 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
 {
        efi_status_t status;
        u32 phys_enabled, phys_pending, phys_tm;
+       unsigned long flags;
 
        spin_lock(&rtc_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_enabled = virt_to_phys_or_null(enabled);
        phys_pending = virt_to_phys_or_null(pending);
@@ -747,6 +754,7 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
        status = efi_thunk(get_wakeup_time, phys_enabled,
                             phys_pending, phys_tm);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        spin_unlock(&rtc_lock);
 
        return status;
@@ -757,13 +765,16 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 {
        efi_status_t status;
        u32 phys_tm;
+       unsigned long flags;
 
        spin_lock(&rtc_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_tm = virt_to_phys_or_null(tm);
 
        status = efi_thunk(set_wakeup_time, enabled, phys_tm);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        spin_unlock(&rtc_lock);
 
        return status;
@@ -781,6 +792,9 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
        efi_status_t status;
        u32 phys_name, phys_vendor, phys_attr;
        u32 phys_data_size, phys_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_data_size = virt_to_phys_or_null(data_size);
        phys_vendor = virt_to_phys_or_null(vendor);
@@ -791,6 +805,8 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
        status = efi_thunk(get_variable, phys_name, phys_vendor,
                           phys_attr, phys_data_size, phys_data);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -800,6 +816,34 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
 {
        u32 phys_name, phys_vendor, phys_data;
        efi_status_t status;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
+
+       phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+       phys_vendor = virt_to_phys_or_null(vendor);
+       phys_data = virt_to_phys_or_null_size(data, data_size);
+
+       /* If data_size is > sizeof(u32) we've got problems */
+       status = efi_thunk(set_variable, phys_name, phys_vendor,
+                          attr, data_size, phys_data);
+
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+       return status;
+}
+
+static efi_status_t
+efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
+                                  u32 attr, unsigned long data_size,
+                                  void *data)
+{
+       u32 phys_name, phys_vendor, phys_data;
+       efi_status_t status;
+       unsigned long flags;
+
+       if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+               return EFI_NOT_READY;
 
        phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
        phys_vendor = virt_to_phys_or_null(vendor);
@@ -809,6 +853,8 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
        status = efi_thunk(set_variable, phys_name, phys_vendor,
                           attr, data_size, phys_data);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -819,6 +865,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
 {
        efi_status_t status;
        u32 phys_name_size, phys_name, phys_vendor;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_name_size = virt_to_phys_or_null(name_size);
        phys_vendor = virt_to_phys_or_null(vendor);
@@ -827,6 +876,8 @@ efi_thunk_get_next_variable(unsigned long *name_size,
        status = efi_thunk(get_next_variable, phys_name_size,
                           phys_name, phys_vendor);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -835,10 +886,15 @@ efi_thunk_get_next_high_mono_count(u32 *count)
 {
        efi_status_t status;
        u32 phys_count;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_count = virt_to_phys_or_null(count);
        status = efi_thunk(get_next_high_mono_count, phys_count);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -847,10 +903,15 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
                       unsigned long data_size, efi_char16_t *data)
 {
        u32 phys_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&efi_runtime_lock, flags);
 
        phys_data = virt_to_phys_or_null_size(data, data_size);
 
        efi_thunk(reset_system, reset_type, status, data_size, phys_data);
+
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
 }
 
 static efi_status_t
@@ -872,10 +933,13 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
 {
        efi_status_t status;
        u32 phys_storage, phys_remaining, phys_max;
+       unsigned long flags;
 
        if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
                return EFI_UNSUPPORTED;
 
+       spin_lock_irqsave(&efi_runtime_lock, flags);
+
        phys_storage = virt_to_phys_or_null(storage_space);
        phys_remaining = virt_to_phys_or_null(remaining_space);
        phys_max = virt_to_phys_or_null(max_variable_size);
@@ -883,6 +947,35 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
        status = efi_thunk(query_variable_info, attr, phys_storage,
                           phys_remaining, phys_max);
 
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+       return status;
+}
+
+static efi_status_t
+efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
+                                         u64 *remaining_space,
+                                         u64 *max_variable_size)
+{
+       efi_status_t status;
+       u32 phys_storage, phys_remaining, phys_max;
+       unsigned long flags;
+
+       if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+               return EFI_UNSUPPORTED;
+
+       if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+               return EFI_NOT_READY;
+
+       phys_storage = virt_to_phys_or_null(storage_space);
+       phys_remaining = virt_to_phys_or_null(remaining_space);
+       phys_max = virt_to_phys_or_null(max_variable_size);
+
+       status = efi_thunk(query_variable_info, attr, phys_storage,
+                          phys_remaining, phys_max);
+
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
        return status;
 }
 
@@ -908,9 +1001,11 @@ void efi_thunk_runtime_setup(void)
        efi.get_variable = efi_thunk_get_variable;
        efi.get_next_variable = efi_thunk_get_next_variable;
        efi.set_variable = efi_thunk_set_variable;
+       efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
        efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
        efi.reset_system = efi_thunk_reset_system;
        efi.query_variable_info = efi_thunk_query_variable_info;
+       efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
        efi.update_capsule = efi_thunk_update_capsule;
        efi.query_capsule_caps = efi_thunk_query_capsule_caps;
 }
index 36c1f8b9f7e0c2df3dec1828e9f786f9d6c1ecfb..844d31cb8a0c7eae1dcb37ed48fa373564e83f22 100644 (file)
@@ -105,12 +105,11 @@ early_param("efi_no_storage_paranoia", setup_storage_paranoia);
 */
 void efi_delete_dummy_variable(void)
 {
-       efi.set_variable((efi_char16_t *)efi_dummy_name,
-                        &EFI_DUMMY_GUID,
-                        EFI_VARIABLE_NON_VOLATILE |
-                        EFI_VARIABLE_BOOTSERVICE_ACCESS |
-                        EFI_VARIABLE_RUNTIME_ACCESS,
-                        0, NULL);
+       efi.set_variable_nonblocking((efi_char16_t *)efi_dummy_name,
+                                    &EFI_DUMMY_GUID,
+                                    EFI_VARIABLE_NON_VOLATILE |
+                                    EFI_VARIABLE_BOOTSERVICE_ACCESS |
+                                    EFI_VARIABLE_RUNTIME_ACCESS, 0, NULL);
 }
 
 /*
@@ -249,7 +248,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
        int num_entries;
        void *new;
 
-       if (efi_mem_desc_lookup(addr, &md)) {
+       if (efi_mem_desc_lookup(addr, &md) ||
+           md.type != EFI_BOOT_SERVICES_DATA) {
                pr_err("Failed to lookup EFI memory descriptor for %pa\n", &addr);
                return;
        }
index 7436b2d27fa38513602207c6b4bc9213c97af436..a390c6d4f72df976e0880150207fef26acc3db97 100644 (file)
@@ -298,7 +298,8 @@ static void reset_bdev(struct zram *zram)
        zram->backing_dev = NULL;
        zram->old_block_size = 0;
        zram->bdev = NULL;
-
+       zram->disk->queue->backing_dev_info->capabilities |=
+                               BDI_CAP_SYNCHRONOUS_IO;
        kvfree(zram->bitmap);
        zram->bitmap = NULL;
 }
@@ -400,6 +401,18 @@ static ssize_t backing_dev_store(struct device *dev,
        zram->backing_dev = backing_dev;
        zram->bitmap = bitmap;
        zram->nr_pages = nr_pages;
+       /*
+        * With writeback feature, zram does asynchronous IO so it's no longer
+        * synchronous device so let's remove synchronous io flag. Othewise,
+        * upper layer(e.g., swap) could wait IO completion rather than
+        * (submit and return), which will cause system sluggish.
+        * Furthermore, when the IO function returns(e.g., swap_readpage),
+        * upper layer expects IO was done so it could deallocate the page
+        * freely but in fact, IO is going on so finally could cause
+        * use-after-free when the IO is really done.
+        */
+       zram->disk->queue->backing_dev_info->capabilities &=
+                       ~BDI_CAP_SYNCHRONOUS_IO;
        up_write(&zram->init_lock);
 
        pr_info("setup backing device %s\n", file_name);
index 781a4a33755739438acb6430576f6f2221601f88..d8e159feb573f1a6cdc171bc90b5948b72cc45bd 100644 (file)
@@ -87,6 +87,18 @@ config EFI_RUNTIME_WRAPPERS
 config EFI_ARMSTUB
        bool
 
+config EFI_ARMSTUB_DTB_LOADER
+       bool "Enable the DTB loader"
+       depends on EFI_ARMSTUB
+       help
+         Select this config option to add support for the dtb= command
+         line parameter, allowing a device tree blob to be loaded into
+         memory from the EFI System Partition by the stub.
+
+         The device tree is typically provided by the platform or by
+         the bootloader, so this option is mostly for development
+         purposes only.
+
 config EFI_BOOTLOADER_CONTROL
        tristate "EFI Bootloader Control"
        depends on EFI_VARS
index 3bf0dca378a647f34e5fbc9092df224816e8f3a3..a7902fccdcfa4c5f39e42d6a4561d46a5a2a017b 100644 (file)
@@ -48,8 +48,21 @@ u64 cper_next_record_id(void)
 {
        static atomic64_t seq;
 
-       if (!atomic64_read(&seq))
-               atomic64_set(&seq, ((u64)get_seconds()) << 32);
+       if (!atomic64_read(&seq)) {
+               time64_t time = ktime_get_real_seconds();
+
+               /*
+                * This code is unlikely to still be needed in year 2106,
+                * but just in case, let's use a few more bits for timestamps
+                * after y2038 to be sure they keep increasing monotonically
+                * for the next few hundred years...
+                */
+               if (time < 0x80000000)
+                       atomic64_set(&seq, (ktime_get_real_seconds()) << 32);
+               else
+                       atomic64_set(&seq, 0x8000000000000000ull |
+                                          ktime_get_real_seconds() << 24);
+       }
 
        return atomic64_inc_return(&seq);
 }
@@ -459,7 +472,7 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata
                else
                        goto err_section_too_small;
 #if defined(CONFIG_ARM64) || defined(CONFIG_ARM)
-       } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_ARM)) {
+       } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
                struct cper_sec_proc_arm *arm_err = acpi_hest_get_payload(gdata);
 
                printk("%ssection_type: ARM processor error\n", newpfx);
index 232f4915223b519b3ee6580cb08354ac9b0a7016..d8a33a781a5709a69e74bb68a4218fa79f3b4e4e 100644 (file)
@@ -84,6 +84,8 @@ struct mm_struct efi_mm = {
        .mmlist                 = LIST_HEAD_INIT(efi_mm.mmlist),
 };
 
+struct workqueue_struct *efi_rts_wq;
+
 static bool disable_runtime;
 static int __init setup_noefi(char *arg)
 {
@@ -337,6 +339,18 @@ static int __init efisubsys_init(void)
        if (!efi_enabled(EFI_BOOT))
                return 0;
 
+       /*
+        * Since we process only one efi_runtime_service() at a time, an
+        * ordered workqueue (which creates only one execution context)
+        * should suffice all our needs.
+        */
+       efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
+       if (!efi_rts_wq) {
+               pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
+               clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+               return 0;
+       }
+
        /* We register the efi directory at /sys/firmware/efi */
        efi_kobj = kobject_create_and_add("efi", firmware_kobj);
        if (!efi_kobj) {
@@ -388,7 +402,7 @@ subsys_initcall(efisubsys_init);
  * and if so, populate the supplied memory descriptor with the appropriate
  * data.
  */
-int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
 {
        efi_memory_desc_t *md;
 
@@ -406,12 +420,6 @@ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
                u64 size;
                u64 end;
 
-               if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
-                   md->type != EFI_BOOT_SERVICES_DATA &&
-                   md->type != EFI_RUNTIME_SERVICES_DATA) {
-                       continue;
-               }
-
                size = md->num_pages << EFI_PAGE_SHIFT;
                end = md->phys_addr + size;
                if (phys_addr >= md->phys_addr && phys_addr < end) {
index 1ab80e06e7c510b0ad730c2de0cbe30b4f826f48..5d06bd247d0731a652424d504293f028d291c1b8 100644 (file)
@@ -250,7 +250,10 @@ void __init efi_esrt_init(void)
                return;
 
        rc = efi_mem_desc_lookup(efi.esrt, &md);
-       if (rc < 0) {
+       if (rc < 0 ||
+           (!(md.attribute & EFI_MEMORY_RUNTIME) &&
+            md.type != EFI_BOOT_SERVICES_DATA &&
+            md.type != EFI_RUNTIME_SERVICES_DATA)) {
                pr_warn("ESRT header is not in the memory map.\n");
                return;
        }
@@ -326,7 +329,8 @@ void __init efi_esrt_init(void)
 
        end = esrt_data + size;
        pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
-       efi_mem_reserve(esrt_data, esrt_data_size);
+       if (md.type == EFI_BOOT_SERVICES_DATA)
+               efi_mem_reserve(esrt_data, esrt_data_size);
 
        pr_debug("esrt-init: loaded.\n");
 }
index 01a9d78ee4154702e89ab8534727dc1ab66998d4..6920033de6d411689719e64226112a19a8c8021d 100644 (file)
 
 static u64 virtmap_base = EFI_RT_VIRTUAL_BASE;
 
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
-                            void *__image, void **__fh)
-{
-       efi_file_io_interface_t *io;
-       efi_loaded_image_t *image = __image;
-       efi_file_handle_t *fh;
-       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
-       efi_status_t status;
-       void *handle = (void *)(unsigned long)image->device_handle;
-
-       status = sys_table_arg->boottime->handle_protocol(handle,
-                                &fs_proto, (void **)&io);
-       if (status != EFI_SUCCESS) {
-               efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
-               return status;
-       }
-
-       status = io->open_volume(io, &fh);
-       if (status != EFI_SUCCESS)
-               efi_printk(sys_table_arg, "Failed to open volume\n");
-
-       *__fh = fh;
-       return status;
-}
-
 void efi_char16_printk(efi_system_table_t *sys_table_arg,
                              efi_char16_t *str)
 {
@@ -202,9 +177,10 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
         * 'dtb=' unless UEFI Secure Boot is disabled.  We assume that secure
         * boot is enabled if we can't determine its state.
         */
-       if (secure_boot != efi_secureboot_mode_disabled &&
-           strstr(cmdline_ptr, "dtb=")) {
-               pr_efi(sys_table, "Ignoring DTB from command line.\n");
+       if (!IS_ENABLED(CONFIG_EFI_ARMSTUB_DTB_LOADER) ||
+            secure_boot != efi_secureboot_mode_disabled) {
+               if (strstr(cmdline_ptr, "dtb="))
+                       pr_efi(sys_table, "Ignoring DTB from command line.\n");
        } else {
                status = handle_cmdline_files(sys_table, image, cmdline_ptr,
                                              "dtb=",
index 50a9cab5a8340e542e2f8d12172bd4d500934d91..e94975f4655bdd50d1db4b4a8be728d266d7d02b 100644 (file)
@@ -413,6 +413,34 @@ static efi_status_t efi_file_close(void *handle)
        return efi_call_proto(efi_file_handle, close, handle);
 }
 
+static efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg,
+                                   efi_loaded_image_t *image,
+                                   efi_file_handle_t **__fh)
+{
+       efi_file_io_interface_t *io;
+       efi_file_handle_t *fh;
+       efi_guid_t fs_proto = EFI_FILE_SYSTEM_GUID;
+       efi_status_t status;
+       void *handle = (void *)(unsigned long)efi_table_attr(efi_loaded_image,
+                                                            device_handle,
+                                                            image);
+
+       status = efi_call_early(handle_protocol, handle,
+                               &fs_proto, (void **)&io);
+       if (status != EFI_SUCCESS) {
+               efi_printk(sys_table_arg, "Failed to handle fs_proto\n");
+               return status;
+       }
+
+       status = efi_call_proto(efi_file_io_interface, open_volume, io, &fh);
+       if (status != EFI_SUCCESS)
+               efi_printk(sys_table_arg, "Failed to open volume\n");
+       else
+               *__fh = fh;
+
+       return status;
+}
+
 /*
  * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
  * option, e.g. efi=nochunk.
@@ -563,8 +591,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
 
                /* Only open the volume once. */
                if (!i) {
-                       status = efi_open_volume(sys_table_arg, image,
-                                                (void **)&fh);
+                       status = efi_open_volume(sys_table_arg, image, &fh);
                        if (status != EFI_SUCCESS)
                                goto free_files;
                }
index f59564b72ddcdc0fae2c438f71c0c3d414f4a627..32799cf039ef1562afc8f124ab7eb9d64342083c 100644 (file)
@@ -36,9 +36,6 @@ extern int __pure is_quiet(void);
 
 void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
 
-efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
-                            void **__fh);
-
 unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
 
 efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
index ae54870b27886b016b0924fb85004b2fc4323700..aa66cbf23512af3c6e9195cd5b3471533a8f8e42 100644 (file)
@@ -1,6 +1,15 @@
 /*
  * runtime-wrappers.c - Runtime Services function call wrappers
  *
+ * Implementation summary:
+ * -----------------------
+ * 1. When user/kernel thread requests to execute efi_runtime_service(),
+ * enqueue work to efi_rts_wq.
+ * 2. Caller thread waits for completion until the work is finished
+ * because it's dependent on the return status and execution of
+ * efi_runtime_service().
+ * For instance, get_variable() and get_next_variable().
+ *
  * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
  *
  * Split off from arch/x86/platform/efi/efi.c
@@ -22,6 +31,9 @@
 #include <linux/mutex.h>
 #include <linux/semaphore.h>
 #include <linux/stringify.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
 #include <asm/efi.h>
 
 /*
 #define __efi_call_virt(f, args...) \
        __efi_call_virt_pointer(efi.systab->runtime, f, args)
 
+/* efi_runtime_service() function identifiers */
+enum efi_rts_ids {
+       GET_TIME,
+       SET_TIME,
+       GET_WAKEUP_TIME,
+       SET_WAKEUP_TIME,
+       GET_VARIABLE,
+       GET_NEXT_VARIABLE,
+       SET_VARIABLE,
+       QUERY_VARIABLE_INFO,
+       GET_NEXT_HIGH_MONO_COUNT,
+       UPDATE_CAPSULE,
+       QUERY_CAPSULE_CAPS,
+};
+
+/*
+ * efi_runtime_work:   Details of EFI Runtime Service work
+ * @arg<1-5>:          EFI Runtime Service function arguments
+ * @status:            Status of executing EFI Runtime Service
+ * @efi_rts_id:                EFI Runtime Service function identifier
+ * @efi_rts_comp:      Struct used for handling completions
+ */
+struct efi_runtime_work {
+       void *arg1;
+       void *arg2;
+       void *arg3;
+       void *arg4;
+       void *arg5;
+       efi_status_t status;
+       struct work_struct work;
+       enum efi_rts_ids efi_rts_id;
+       struct completion efi_rts_comp;
+};
+
+/*
+ * efi_queue_work:     Queue efi_runtime_service() and wait until it's done
+ * @rts:               efi_runtime_service() function identifier
+ * @rts_arg<1-5>:      efi_runtime_service() function arguments
+ *
+ * Accesses to efi_runtime_services() are serialized by a binary
+ * semaphore (efi_runtime_lock) and caller waits until the work is
+ * finished, hence _only_ one work is queued at a time and the caller
+ * thread waits for completion.
+ */
+#define efi_queue_work(_rts, _arg1, _arg2, _arg3, _arg4, _arg5)                \
+({                                                                     \
+       struct efi_runtime_work efi_rts_work;                           \
+       efi_rts_work.status = EFI_ABORTED;                              \
+                                                                       \
+       init_completion(&efi_rts_work.efi_rts_comp);                    \
+       INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts);            \
+       efi_rts_work.arg1 = _arg1;                                      \
+       efi_rts_work.arg2 = _arg2;                                      \
+       efi_rts_work.arg3 = _arg3;                                      \
+       efi_rts_work.arg4 = _arg4;                                      \
+       efi_rts_work.arg5 = _arg5;                                      \
+       efi_rts_work.efi_rts_id = _rts;                                 \
+                                                                       \
+       /*                                                              \
+        * queue_work() returns 0 if work was already on queue,         \
+        * _ideally_ this should never happen.                          \
+        */                                                             \
+       if (queue_work(efi_rts_wq, &efi_rts_work.work))                 \
+               wait_for_completion(&efi_rts_work.efi_rts_comp);        \
+       else                                                            \
+               pr_err("Failed to queue work to efi_rts_wq.\n");        \
+                                                                       \
+       efi_rts_work.status;                                            \
+})
+
 void efi_call_virt_check_flags(unsigned long flags, const char *call)
 {
        unsigned long cur_flags, mismatch;
@@ -90,13 +172,98 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
  */
 static DEFINE_SEMAPHORE(efi_runtime_lock);
 
+/*
+ * Calls the appropriate efi_runtime_service() with the appropriate
+ * arguments.
+ *
+ * Semantics followed by efi_call_rts() to understand efi_runtime_work:
+ * 1. If argument was a pointer, recast it from void pointer to original
+ * pointer type.
+ * 2. If argument was a value, recast it from void pointer to original
+ * pointer type and dereference it.
+ */
+static void efi_call_rts(struct work_struct *work)
+{
+       struct efi_runtime_work *efi_rts_work;
+       void *arg1, *arg2, *arg3, *arg4, *arg5;
+       efi_status_t status = EFI_NOT_FOUND;
+
+       efi_rts_work = container_of(work, struct efi_runtime_work, work);
+       arg1 = efi_rts_work->arg1;
+       arg2 = efi_rts_work->arg2;
+       arg3 = efi_rts_work->arg3;
+       arg4 = efi_rts_work->arg4;
+       arg5 = efi_rts_work->arg5;
+
+       switch (efi_rts_work->efi_rts_id) {
+       case GET_TIME:
+               status = efi_call_virt(get_time, (efi_time_t *)arg1,
+                                      (efi_time_cap_t *)arg2);
+               break;
+       case SET_TIME:
+               status = efi_call_virt(set_time, (efi_time_t *)arg1);
+               break;
+       case GET_WAKEUP_TIME:
+               status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
+                                      (efi_bool_t *)arg2, (efi_time_t *)arg3);
+               break;
+       case SET_WAKEUP_TIME:
+               status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
+                                      (efi_time_t *)arg2);
+               break;
+       case GET_VARIABLE:
+               status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
+                                      (efi_guid_t *)arg2, (u32 *)arg3,
+                                      (unsigned long *)arg4, (void *)arg5);
+               break;
+       case GET_NEXT_VARIABLE:
+               status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
+                                      (efi_char16_t *)arg2,
+                                      (efi_guid_t *)arg3);
+               break;
+       case SET_VARIABLE:
+               status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
+                                      (efi_guid_t *)arg2, *(u32 *)arg3,
+                                      *(unsigned long *)arg4, (void *)arg5);
+               break;
+       case QUERY_VARIABLE_INFO:
+               status = efi_call_virt(query_variable_info, *(u32 *)arg1,
+                                      (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
+               break;
+       case GET_NEXT_HIGH_MONO_COUNT:
+               status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
+               break;
+       case UPDATE_CAPSULE:
+               status = efi_call_virt(update_capsule,
+                                      (efi_capsule_header_t **)arg1,
+                                      *(unsigned long *)arg2,
+                                      *(unsigned long *)arg3);
+               break;
+       case QUERY_CAPSULE_CAPS:
+               status = efi_call_virt(query_capsule_caps,
+                                      (efi_capsule_header_t **)arg1,
+                                      *(unsigned long *)arg2, (u64 *)arg3,
+                                      (int *)arg4);
+               break;
+       default:
+               /*
+                * Ideally, we should never reach here because a caller of this
+                * function should have put the right efi_runtime_service()
+                * function identifier into efi_rts_work->efi_rts_id
+                */
+               pr_err("Requested executing invalid EFI Runtime Service.\n");
+       }
+       efi_rts_work->status = status;
+       complete(&efi_rts_work->efi_rts_comp);
+}
+
 static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 {
        efi_status_t status;
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_time, tm, tc);
+       status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -107,7 +274,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(set_time, tm);
+       status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -120,7 +287,8 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_wakeup_time, enabled, pending, tm);
+       status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
+                               NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -131,7 +299,8 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(set_wakeup_time, enabled, tm);
+       status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
+                               NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -146,8 +315,8 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_variable, name, vendor, attr, data_size,
-                              data);
+       status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
+                               data);
        up(&efi_runtime_lock);
        return status;
 }
@@ -160,7 +329,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_next_variable, name_size, name, vendor);
+       status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
+                               NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -175,8 +345,8 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(set_variable, name, vendor, attr, data_size,
-                              data);
+       status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
+                               data);
        up(&efi_runtime_lock);
        return status;
 }
@@ -210,8 +380,8 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(query_variable_info, attr, storage_space,
-                              remaining_space, max_variable_size);
+       status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
+                               remaining_space, max_variable_size, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -242,7 +412,8 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(get_next_high_mono_count, count);
+       status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
+                               NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -272,7 +443,8 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(update_capsule, capsules, count, sg_list);
+       status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
+                               NULL, NULL);
        up(&efi_runtime_lock);
        return status;
 }
@@ -289,8 +461,8 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
 
        if (down_interruptible(&efi_runtime_lock))
                return EFI_ABORTED;
-       status = efi_call_virt(query_capsule_caps, capsules, count, max_size,
-                              reset_type);
+       status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
+                               max_size, reset_type, NULL);
        up(&efi_runtime_lock);
        return status;
 }
index e2232cbcec8bbe6276e8e9d02e8c0f9e36d34030..addd9fecc198360d03a4cfb420e1e2d6ecf90a67 100644 (file)
@@ -25,6 +25,7 @@
 
 struct acpi_gpio_event {
        struct list_head node;
+       struct list_head initial_sync_list;
        acpi_handle handle;
        unsigned int pin;
        unsigned int irq;
@@ -50,6 +51,9 @@ struct acpi_gpio_chip {
        struct list_head events;
 };
 
+static LIST_HEAD(acpi_gpio_initial_sync_list);
+static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
+
 static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
 {
        if (!gc->parent)
@@ -85,6 +89,21 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
        return gpiochip_get_desc(chip, pin);
 }
 
+static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
+{
+       mutex_lock(&acpi_gpio_initial_sync_list_lock);
+       list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
+       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+}
+
+static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
+{
+       mutex_lock(&acpi_gpio_initial_sync_list_lock);
+       if (!list_empty(&event->initial_sync_list))
+               list_del_init(&event->initial_sync_list);
+       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+}
+
 static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
 {
        struct acpi_gpio_event *event = data;
@@ -136,7 +155,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
        irq_handler_t handler = NULL;
        struct gpio_desc *desc;
        unsigned long irqflags;
-       int ret, pin, irq;
+       int ret, pin, irq, value;
 
        if (!acpi_gpio_get_irq_resource(ares, &agpio))
                return AE_OK;
@@ -167,6 +186,8 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 
        gpiod_direction_input(desc);
 
+       value = gpiod_get_value(desc);
+
        ret = gpiochip_lock_as_irq(chip, pin);
        if (ret) {
                dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@@ -208,6 +229,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
        event->irq = irq;
        event->pin = pin;
        event->desc = desc;
+       INIT_LIST_HEAD(&event->initial_sync_list);
 
        ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
                                   "ACPI:Event", event);
@@ -222,6 +244,18 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
                enable_irq_wake(irq);
 
        list_add_tail(&event->node, &acpi_gpio->events);
+
+       /*
+        * Make sure we trigger the initial state of the IRQ when using RISING
+        * or FALLING.  Note we run the handlers on late_init, the AML code
+        * may refer to OperationRegions from other (builtin) drivers which
+        * may be probed after us.
+        */
+       if (handler == acpi_gpio_irq_handler &&
+           (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+            ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
+               acpi_gpio_add_to_initial_sync_list(event);
+
        return AE_OK;
 
 fail_free_event:
@@ -294,6 +328,8 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
        list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
                struct gpio_desc *desc;
 
+               acpi_gpio_del_from_initial_sync_list(event);
+
                if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
                        disable_irq_wake(event->irq);
 
@@ -1158,3 +1194,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
 
        return con_id == NULL;
 }
+
+/* Sync the initial state of handlers after all builtin drivers have probed */
+static int acpi_gpio_initial_sync(void)
+{
+       struct acpi_gpio_event *event, *ep;
+
+       mutex_lock(&acpi_gpio_initial_sync_list_lock);
+       list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
+                                initial_sync_list) {
+               acpi_evaluate_object(event->handle, NULL, NULL, NULL);
+               list_del_init(&event->initial_sync_list);
+       }
+       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+
+       return 0;
+}
+/* We must use _sync so that this runs after the first deferred_probe run */
+late_initcall_sync(acpi_gpio_initial_sync);
index 1f41a4f89c08f4c4b2da8cf3f0feb2bd643e9b5e..8a873975cf1252006e91d437893b2aff2791e843 100644 (file)
@@ -191,28 +191,43 @@ static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv)
        if (priv->len_recv) {
                /* read length byte */
                rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+
+               /*
+                * We expect at least 2 interrupts for I2C_M_RECV_LEN
+                * transactions. The length is updated during the first
+                * interrupt, and the buffer contents are only copied
+                * during subsequent interrupts. If in case the interrupts
+                * get merged we would complete the transaction without
+                * copying out the bytes from RX fifo. To avoid this now we
+                * drain the fifo as and when data is available.
+                * We drained the rlen byte already, decrement total length
+                * by one.
+                */
+
+               len--;
                if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) {
                        rlen = 0;       /*abort transfer */
                        priv->msg_buf_remaining = 0;
                        priv->msg_len = 0;
-               } else {
-                       *buf++ = rlen;
-                       if (priv->client_pec)
-                               ++rlen; /* account for error check byte */
-                       /* update remaining bytes and message length */
-                       priv->msg_buf_remaining = rlen;
-                       priv->msg_len = rlen + 1;
+                       xlp9xx_i2c_update_rlen(priv);
+                       return;
                }
+
+               *buf++ = rlen;
+               if (priv->client_pec)
+                       ++rlen; /* account for error check byte */
+               /* update remaining bytes and message length */
+               priv->msg_buf_remaining = rlen;
+               priv->msg_len = rlen + 1;
                xlp9xx_i2c_update_rlen(priv);
                priv->len_recv = false;
-       } else {
-               len = min(priv->msg_buf_remaining, len);
-               for (i = 0; i < len; i++, buf++)
-                       *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
-
-               priv->msg_buf_remaining -= len;
        }
 
+       len = min(priv->msg_buf_remaining, len);
+       for (i = 0; i < len; i++, buf++)
+               *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);
+
+       priv->msg_buf_remaining -= len;
        priv->msg_buf = buf;
 
        if (priv->msg_buf_remaining)
index a4e404aaf64bdb822685519bbf6e8c01d329af45..5c7afdec192c139b4e65003a00d9549e1aea9b67 100644 (file)
@@ -57,8 +57,8 @@ MODULE_LICENSE("GPL v2");
  #define HIL_DATA              0x1
  #define HIL_CMD               0x3
  #define HIL_IRQ               2
- #define hil_readb(p)          readb(p)
- #define hil_writeb(v,p)       writeb((v),(p))
+ #define hil_readb(p)          readb((const volatile void __iomem *)(p))
+ #define hil_writeb(v, p)      writeb((v), (volatile void __iomem *)(p))
 
 #else
 #error "HIL is not supported on this platform"
index e9233db16e039f08c43083eb99cc4bf3a2d744ae..d564d21245c5c3aa7ccafc39dc0964170e1de6e0 100644 (file)
@@ -8,7 +8,7 @@ config ARM_GIC
        bool
        select IRQ_DOMAIN
        select IRQ_DOMAIN_HIERARCHY
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select GENERIC_IRQ_EFFECTIVE_AFF_MASK
 
 config ARM_GIC_PM
@@ -34,7 +34,7 @@ config GIC_NON_BANKED
 config ARM_GIC_V3
        bool
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select IRQ_DOMAIN_HIERARCHY
        select PARTITION_PERCPU
        select GENERIC_IRQ_EFFECTIVE_AFF_MASK
@@ -66,7 +66,7 @@ config ARM_NVIC
 config ARM_VIC
        bool
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
 
 config ARM_VIC_NR
        int
@@ -93,14 +93,14 @@ config ATMEL_AIC_IRQ
        bool
        select GENERIC_IRQ_CHIP
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select SPARSE_IRQ
 
 config ATMEL_AIC5_IRQ
        bool
        select GENERIC_IRQ_CHIP
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select SPARSE_IRQ
 
 config I8259
@@ -137,7 +137,7 @@ config DW_APB_ICTL
 config FARADAY_FTINTC010
        bool
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select SPARSE_IRQ
 
 config HISILICON_IRQ_MBIGEN
@@ -162,7 +162,7 @@ config CLPS711X_IRQCHIP
        bool
        depends on ARCH_CLPS711X
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
        select SPARSE_IRQ
        default y
 
@@ -181,7 +181,7 @@ config OMAP_IRQCHIP
 config ORION_IRQCHIP
        bool
        select IRQ_DOMAIN
-       select MULTI_IRQ_HANDLER
+       select GENERIC_IRQ_MULTI_HANDLER
 
 config PIC32_EVIC
        bool
index 4eca5c763766b50824734ae9af88da824badfeb2..606efa64adff5bb58d2f0f40ab4067aa14bb2f48 100644 (file)
@@ -45,6 +45,9 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
         */
        info->scratchpad[0].ul = mc_bus_dev->icid;
        msi_info = msi_get_domain_info(msi_domain->parent);
+
+       /* Allocate at least 32 MSIs, and always as a power of 2 */
+       nvec = max_t(int, 32, roundup_pow_of_two(nvec));
        return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
 }
 
index 25a98de5cfb2831fa61d33fd3be3ef30d3f4e534..8d6d009d1d586f9271c508138bdbc55c064c8f9e 100644 (file)
@@ -66,7 +66,7 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
 {
        struct pci_dev *pdev, *alias_dev;
        struct msi_domain_info *msi_info;
-       int alias_count = 0;
+       int alias_count = 0, minnvec = 1;
 
        if (!dev_is_pci(dev))
                return -EINVAL;
@@ -86,8 +86,18 @@ static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
        /* ITS specific DeviceID, as the core ITS ignores dev. */
        info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev);
 
-       return msi_info->ops->msi_prepare(domain->parent,
-                                         dev, max(nvec, alias_count), info);
+       /*
+        * Always allocate a power of 2, and special case device 0 for
+        * broken systems where the DevID is not wired (and all devices
+        * appear as DevID 0). For that reason, we generously allocate a
+        * minimum of 32 MSIs for DevID 0. If you want more because all
+        * your devices are aliasing to DevID 0, consider fixing your HW.
+        */
+       nvec = max(nvec, alias_count);
+       if (!info->scratchpad[0].ul)
+               minnvec = 32;
+       nvec = max_t(int, minnvec, roundup_pow_of_two(nvec));
+       return msi_info->ops->msi_prepare(domain->parent, dev, nvec, info);
 }
 
 static struct msi_domain_ops its_pci_msi_ops = {
index 8881a053c173edfdb11ad322b9b60f9b61ef57aa..7b8e87b493fe5defd6ebd5968f92c856c8fdcc8c 100644 (file)
@@ -73,6 +73,8 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
        /* ITS specific DeviceID, as the core ITS ignores dev. */
        info->scratchpad[0].ul = dev_id;
 
+       /* Allocate at least 32 MSIs, and always as a power of 2 */
+       nvec = max_t(int, 32, roundup_pow_of_two(nvec));
        return msi_info->ops->msi_prepare(domain->parent,
                                          dev, nvec, info);
 }
index d7842d312d3eacd7d07853caa6a529a1c8080c99..316a57530f6d108ace5345f0e4077e8f20771924 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/dma-iommu.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
 #include <linux/log2.h>
 #include <linux/mm.h>
 #include <linux/msi.h>
@@ -160,7 +162,7 @@ static struct {
 } vpe_proxy;
 
 static LIST_HEAD(its_nodes);
-static DEFINE_SPINLOCK(its_lock);
+static DEFINE_RAW_SPINLOCK(its_lock);
 static struct rdists *gic_rdists;
 static struct irq_domain *its_parent;
 
@@ -1421,112 +1423,176 @@ static struct irq_chip its_irq_chip = {
        .irq_set_vcpu_affinity  = its_irq_set_vcpu_affinity,
 };
 
+
 /*
  * How we allocate LPIs:
  *
- * The GIC has id_bits bits for interrupt identifiers. From there, we
- * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as
- * we allocate LPIs by chunks of 32, we can shift the whole thing by 5
- * bits to the right.
+ * lpi_range_list contains ranges of LPIs that are to available to
+ * allocate from. To allocate LPIs, just pick the first range that
+ * fits the required allocation, and reduce it by the required
+ * amount. Once empty, remove the range from the list.
+ *
+ * To free a range of LPIs, add a free range to the list, sort it and
+ * merge the result if the new range happens to be adjacent to an
+ * already free block.
  *
- * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
+ * The consequence of the above is that allocation is cost is low, but
+ * freeing is expensive. We assumes that freeing rarely occurs.
  */
-#define IRQS_PER_CHUNK_SHIFT   5
-#define IRQS_PER_CHUNK         (1UL << IRQS_PER_CHUNK_SHIFT)
-#define ITS_MAX_LPI_NRBITS     16 /* 64K LPIs */
 
-static unsigned long *lpi_bitmap;
-static u32 lpi_chunks;
-static DEFINE_SPINLOCK(lpi_lock);
+static DEFINE_MUTEX(lpi_range_lock);
+static LIST_HEAD(lpi_range_list);
+
+struct lpi_range {
+       struct list_head        entry;
+       u32                     base_id;
+       u32                     span;
+};
 
-static int its_lpi_to_chunk(int lpi)
+static struct lpi_range *mk_lpi_range(u32 base, u32 span)
 {
-       return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT;
+       struct lpi_range *range;
+
+       range = kzalloc(sizeof(*range), GFP_KERNEL);
+       if (range) {
+               INIT_LIST_HEAD(&range->entry);
+               range->base_id = base;
+               range->span = span;
+       }
+
+       return range;
 }
 
-static int its_chunk_to_lpi(int chunk)
+static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
 {
-       return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192;
+       struct lpi_range *ra, *rb;
+
+       ra = container_of(a, struct lpi_range, entry);
+       rb = container_of(b, struct lpi_range, entry);
+
+       return rb->base_id - ra->base_id;
 }
 
-static int __init its_lpi_init(u32 id_bits)
+static void merge_lpi_ranges(void)
 {
-       lpi_chunks = its_lpi_to_chunk(1UL << id_bits);
+       struct lpi_range *range, *tmp;
 
-       lpi_bitmap = kcalloc(BITS_TO_LONGS(lpi_chunks), sizeof(long),
-                            GFP_KERNEL);
-       if (!lpi_bitmap) {
-               lpi_chunks = 0;
-               return -ENOMEM;
+       list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+               if (!list_is_last(&range->entry, &lpi_range_list) &&
+                   (tmp->base_id == (range->base_id + range->span))) {
+                       tmp->base_id = range->base_id;
+                       tmp->span += range->span;
+                       list_del(&range->entry);
+                       kfree(range);
+               }
        }
+}
 
-       pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks);
-       return 0;
+static int alloc_lpi_range(u32 nr_lpis, u32 *base)
+{
+       struct lpi_range *range, *tmp;
+       int err = -ENOSPC;
+
+       mutex_lock(&lpi_range_lock);
+
+       list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
+               if (range->span >= nr_lpis) {
+                       *base = range->base_id;
+                       range->base_id += nr_lpis;
+                       range->span -= nr_lpis;
+
+                       if (range->span == 0) {
+                               list_del(&range->entry);
+                               kfree(range);
+                       }
+
+                       err = 0;
+                       break;
+               }
+       }
+
+       mutex_unlock(&lpi_range_lock);
+
+       pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
+       return err;
 }
 
-static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
+static int free_lpi_range(u32 base, u32 nr_lpis)
 {
-       unsigned long *bitmap = NULL;
-       int chunk_id;
-       int nr_chunks;
-       int i;
+       struct lpi_range *new;
+       int err = 0;
+
+       mutex_lock(&lpi_range_lock);
+
+       new = mk_lpi_range(base, nr_lpis);
+       if (!new) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       list_add(&new->entry, &lpi_range_list);
+       list_sort(NULL, &lpi_range_list, lpi_range_cmp);
+       merge_lpi_ranges();
+out:
+       mutex_unlock(&lpi_range_lock);
+       return err;
+}
+
+static int __init its_lpi_init(u32 id_bits)
+{
+       u32 lpis = (1UL << id_bits) - 8192;
+       u32 numlpis;
+       int err;
+
+       numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
+
+       if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
+               lpis = numlpis;
+               pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
+                       lpis);
+       }
 
-       nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK);
+       /*
+        * Initializing the allocator is just the same as freeing the
+        * full range of LPIs.
+        */
+       err = free_lpi_range(8192, lpis);
+       pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
+       return err;
+}
 
-       spin_lock(&lpi_lock);
+static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
+{
+       unsigned long *bitmap = NULL;
+       int err = 0;
 
        do {
-               chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks,
-                                                     0, nr_chunks, 0);
-               if (chunk_id < lpi_chunks)
+               err = alloc_lpi_range(nr_irqs, base);
+               if (!err)
                        break;
 
-               nr_chunks--;
-       } while (nr_chunks > 0);
+               nr_irqs /= 2;
+       } while (nr_irqs > 0);
 
-       if (!nr_chunks)
+       if (err)
                goto out;
 
-       bitmap = kcalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK),
-                        sizeof(long),
-                        GFP_ATOMIC);
+       bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
        if (!bitmap)
                goto out;
 
-       for (i = 0; i < nr_chunks; i++)
-               set_bit(chunk_id + i, lpi_bitmap);
-
-       *base = its_chunk_to_lpi(chunk_id);
-       *nr_ids = nr_chunks * IRQS_PER_CHUNK;
+       *nr_ids = nr_irqs;
 
 out:
-       spin_unlock(&lpi_lock);
-
        if (!bitmap)
                *base = *nr_ids = 0;
 
        return bitmap;
 }
 
-static void its_lpi_free_chunks(unsigned long *bitmap, int base, int nr_ids)
+static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
 {
-       int lpi;
-
-       spin_lock(&lpi_lock);
-
-       for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) {
-               int chunk = its_lpi_to_chunk(lpi);
-
-               BUG_ON(chunk > lpi_chunks);
-               if (test_bit(chunk, lpi_bitmap)) {
-                       clear_bit(chunk, lpi_bitmap);
-               } else {
-                       pr_err("Bad LPI chunk %d\n", chunk);
-               }
-       }
-
-       spin_unlock(&lpi_lock);
-
+       WARN_ON(free_lpi_range(base, nr_ids));
        kfree(bitmap);
 }
 
@@ -1559,7 +1625,7 @@ static int __init its_alloc_lpi_tables(void)
 {
        phys_addr_t paddr;
 
-       lpi_id_bits = min_t(u32, gic_rdists->id_bits, ITS_MAX_LPI_NRBITS);
+       lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
        gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
        if (!gic_rdists->prop_page) {
                pr_err("Failed to allocate PROPBASE\n");
@@ -1997,12 +2063,12 @@ static void its_cpu_init_collections(void)
 {
        struct its_node *its;
 
-       spin_lock(&its_lock);
+       raw_spin_lock(&its_lock);
 
        list_for_each_entry(its, &its_nodes, entry)
                its_cpu_init_collection(its);
 
-       spin_unlock(&its_lock);
+       raw_spin_unlock(&its_lock);
 }
 
 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
@@ -2134,17 +2200,20 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
        if (!its_alloc_device_table(its, dev_id))
                return NULL;
 
+       if (WARN_ON(!is_power_of_2(nvecs)))
+               nvecs = roundup_pow_of_two(nvecs);
+
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        /*
-        * We allocate at least one chunk worth of LPIs bet device,
-        * and thus that many ITEs. The device may require less though.
+        * Even if the device wants a single LPI, the ITT must be
+        * sized as a power of two (and you need at least one bit...).
         */
-       nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
+       nr_ites = max(2, nvecs);
        sz = nr_ites * its->ite_size;
        sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
        itt = kzalloc(sz, GFP_KERNEL);
        if (alloc_lpis) {
-               lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
+               lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
                if (lpi_map)
                        col_map = kcalloc(nr_lpis, sizeof(*col_map),
                                          GFP_KERNEL);
@@ -2379,9 +2448,9 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
        /* If all interrupts have been freed, start mopping the floor */
        if (bitmap_empty(its_dev->event_map.lpi_map,
                         its_dev->event_map.nr_lpis)) {
-               its_lpi_free_chunks(its_dev->event_map.lpi_map,
-                                   its_dev->event_map.lpi_base,
-                                   its_dev->event_map.nr_lpis);
+               its_lpi_free(its_dev->event_map.lpi_map,
+                            its_dev->event_map.lpi_base,
+                            its_dev->event_map.nr_lpis);
                kfree(its_dev->event_map.col_map);
 
                /* Unmap device/itt */
@@ -2780,7 +2849,7 @@ static void its_vpe_irq_domain_free(struct irq_domain *domain,
        }
 
        if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
-               its_lpi_free_chunks(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
+               its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
                its_free_prop_table(vm->vprop_page);
        }
 }
@@ -2795,18 +2864,18 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
 
        BUG_ON(!vm);
 
-       bitmap = its_lpi_alloc_chunks(nr_irqs, &base, &nr_ids);
+       bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
        if (!bitmap)
                return -ENOMEM;
 
        if (nr_ids < nr_irqs) {
-               its_lpi_free_chunks(bitmap, base, nr_ids);
+               its_lpi_free(bitmap, base, nr_ids);
                return -ENOMEM;
        }
 
        vprop_page = its_allocate_prop_table(GFP_KERNEL);
        if (!vprop_page) {
-               its_lpi_free_chunks(bitmap, base, nr_ids);
+               its_lpi_free(bitmap, base, nr_ids);
                return -ENOMEM;
        }
 
@@ -2833,7 +2902,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
                if (i > 0)
                        its_vpe_irq_domain_free(domain, virq, i - 1);
 
-               its_lpi_free_chunks(bitmap, base, nr_ids);
+               its_lpi_free(bitmap, base, nr_ids);
                its_free_prop_table(vprop_page);
        }
 
@@ -3070,7 +3139,7 @@ static int its_save_disable(void)
        struct its_node *its;
        int err = 0;
 
-       spin_lock(&its_lock);
+       raw_spin_lock(&its_lock);
        list_for_each_entry(its, &its_nodes, entry) {
                void __iomem *base;
 
@@ -3102,7 +3171,7 @@ static int its_save_disable(void)
                        writel_relaxed(its->ctlr_save, base + GITS_CTLR);
                }
        }
-       spin_unlock(&its_lock);
+       raw_spin_unlock(&its_lock);
 
        return err;
 }
@@ -3112,7 +3181,7 @@ static void its_restore_enable(void)
        struct its_node *its;
        int ret;
 
-       spin_lock(&its_lock);
+       raw_spin_lock(&its_lock);
        list_for_each_entry(its, &its_nodes, entry) {
                void __iomem *base;
                int i;
@@ -3164,7 +3233,7 @@ static void its_restore_enable(void)
                    GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
                        its_cpu_init_collection(its);
        }
-       spin_unlock(&its_lock);
+       raw_spin_unlock(&its_lock);
 }
 
 static struct syscore_ops its_syscore_ops = {
@@ -3398,9 +3467,9 @@ static int __init its_probe_one(struct resource *res,
        if (err)
                goto out_free_tables;
 
-       spin_lock(&its_lock);
+       raw_spin_lock(&its_lock);
        list_add(&its->entry, &its_nodes);
-       spin_unlock(&its_lock);
+       raw_spin_unlock(&its_lock);
 
        return 0;
 
index 76ea56d779a15825654cdf13573ac263fc14d717..e214181b77b7720568c072c0b0cb07bc81766565 100644 (file)
@@ -877,7 +877,7 @@ static struct irq_chip gic_eoimode1_chip = {
        .flags                  = IRQCHIP_SET_TYPE_MASKED,
 };
 
-#define GIC_ID_NR              (1U << gic_data.rdists.id_bits)
+#define GIC_ID_NR      (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
 
 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
                              irq_hw_number_t hw)
@@ -1091,7 +1091,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
         * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
         */
        typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
-       gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer);
+       gic_data.rdists.gicd_typer = typer;
        gic_irqs = GICD_TYPER_IRQS(typer);
        if (gic_irqs > 1020)
                gic_irqs = 1020;
index fc5953dea509af10b38abf783dac4fbd83efdaaa..2ff08986b536195ca97eab893d31a594abc2490b 100644 (file)
@@ -165,6 +165,7 @@ static int __init intc_1chip_of_init(struct device_node *node,
        return ingenic_intc_of_init(node, 1);
 }
 IRQCHIP_DECLARE(jz4740_intc, "ingenic,jz4740-intc", intc_1chip_of_init);
+IRQCHIP_DECLARE(jz4725b_intc, "ingenic,jz4725b-intc", intc_1chip_of_init);
 
 static int __init intc_2chip_of_init(struct device_node *node,
        struct device_node *parent)
index 3a7e8905a97e93e42fa9b7646a3ada306c18c4be..3df527fcf4e154b44bed2cc3c65787fef503db6c 100644 (file)
@@ -159,6 +159,7 @@ static const struct stm32_exti_bank *stm32mp1_exti_banks[] = {
 };
 
 static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
+       { .exti = 0, .irq_parent = 6 },
        { .exti = 1, .irq_parent = 7 },
        { .exti = 2, .irq_parent = 8 },
        { .exti = 3, .irq_parent = 9 },
index b6d735bf80117e27496e54d29f23ad32c215c2ac..342ae08ec3c29832ae5be0da8d93e59d6441cab1 100644 (file)
@@ -153,9 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
 static void dayna_block_output(struct net_device *dev, int count,
                               const unsigned char *buf, int start_page);
 
-#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
-#define memcpy_toio(a, b, c)   memcpy((void *)(a), (b), (c))
-
 #define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
 
 /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
@@ -239,7 +236,7 @@ static enum mac8390_access mac8390_testio(unsigned long membase)
        unsigned long outdata = 0xA5A0B5B0;
        unsigned long indata =  0x00000000;
        /* Try writing 32 bits */
-       memcpy_toio(membase, &outdata, 4);
+       memcpy_toio((void __iomem *)membase, &outdata, 4);
        /* Now compare them */
        if (memcmp_withio(&outdata, membase, 4) == 0)
                return ACCESS_32;
@@ -711,7 +708,7 @@ static void sane_get_8390_hdr(struct net_device *dev,
                              struct e8390_pkt_hdr *hdr, int ring_page)
 {
        unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
-       memcpy_fromio(hdr, dev->mem_start + hdr_start, 4);
+       memcpy_fromio(hdr, (void __iomem *)dev->mem_start + hdr_start, 4);
        /* Fix endianness */
        hdr->count = swab16(hdr->count);
 }
@@ -725,13 +722,16 @@ static void sane_block_input(struct net_device *dev, int count,
        if (xfer_start + count > ei_status.rmem_end) {
                /* We must wrap the input move. */
                int semi_count = ei_status.rmem_end - xfer_start;
-               memcpy_fromio(skb->data, dev->mem_start + xfer_base,
+               memcpy_fromio(skb->data,
+                             (void __iomem *)dev->mem_start + xfer_base,
                              semi_count);
                count -= semi_count;
-               memcpy_fromio(skb->data + semi_count, ei_status.rmem_start,
-                             count);
+               memcpy_fromio(skb->data + semi_count,
+                             (void __iomem *)ei_status.rmem_start, count);
        } else {
-               memcpy_fromio(skb->data, dev->mem_start + xfer_base, count);
+               memcpy_fromio(skb->data,
+                             (void __iomem *)dev->mem_start + xfer_base,
+                             count);
        }
 }
 
@@ -740,7 +740,7 @@ static void sane_block_output(struct net_device *dev, int count,
 {
        long shmem = (start_page - WD_START_PG)<<8;
 
-       memcpy_toio(dev->mem_start + shmem, buf, count);
+       memcpy_toio((void __iomem *)dev->mem_start + shmem, buf, count);
 }
 
 /* dayna block input/output */
index 956860a697970ab427be0357d8541e929a85c489..3bdab972420bce0e846b707136268fe155dc3290 100644 (file)
@@ -762,7 +762,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
 
        hw_atl_rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
        hw_atl_rpfl2multicast_flr_en_set(self,
-                                        IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+                                        IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
 
        hw_atl_rpfl2_accept_all_mc_packets_set(self,
                                               IS_FILTER_ENABLED(IFF_ALLMULTI));
index 5d08d2aeb1722cf16976d115634ed3dd283a5b8b..e337da6ba2a4c16973f4728c9fd9564da162942c 100644 (file)
@@ -1083,6 +1083,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
        lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count);
        lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs),
                              GFP_KERNEL);
+       if (!lmac->dmacs)
+               return -ENOMEM;
 
        /* Enable lmac */
        bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
index 00fc5f1afb1d0024b6477d6c63408afcd9858ad9..7dddb9e748b8146fbcbb441153ab16669305599e 100644 (file)
@@ -1038,10 +1038,8 @@ static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
        req->local_port = cpu_to_be16(f->fs.val.lport);
        req->peer_port = cpu_to_be16(f->fs.val.fport);
-       req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
-               f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
-       req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
-               f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
+       memcpy(&req->local_ip, f->fs.val.lip, 4);
+       memcpy(&req->peer_ip, f->fs.val.fip, 4);
        req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
                                        f->fs.newvlan == VLAN_REWRITE) |
                                DELACK_V(f->fs.hitcnts) |
index eb9eb7aa953ae5560db4746569a02f177bcddf13..405236cf0b0434c4d0e5a9c67264156702f0b6d9 100644 (file)
@@ -858,8 +858,6 @@ struct mlx5e_profile {
                mlx5e_fp_handle_rx_cqe handle_rx_cqe;
                mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
        } rx_handlers;
-       void    (*netdev_registered_init)(struct mlx5e_priv *priv);
-       void    (*netdev_registered_remove)(struct mlx5e_priv *priv);
        int     max_tc;
 };
 
index e33afa8d241798a93df91f8df8b2625bfb912a20..722998d685646ebd2502dd25479a5d51127ac35c 100644 (file)
@@ -443,16 +443,12 @@ static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
        bool is_new;
        int err;
 
-       if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
-               return -EINVAL;
-
-       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
-               return -EINVAL;
-
-       if (!MLX5_DSCP_SUPPORTED(priv->mdev))
-               return -EINVAL;
+       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
+           !MLX5_DSCP_SUPPORTED(priv->mdev))
+               return -EOPNOTSUPP;
 
-       if (app->protocol >= MLX5E_MAX_DSCP)
+       if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
+           (app->protocol >= MLX5E_MAX_DSCP))
                return -EINVAL;
 
        /* Save the old entry info */
@@ -500,16 +496,12 @@ static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
        struct mlx5e_priv *priv = netdev_priv(dev);
        int err;
 
-       if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
-               return -EINVAL;
-
-       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
-               return -EINVAL;
-
-       if (!MLX5_DSCP_SUPPORTED(priv->mdev))
-               return -EINVAL;
+       if  (!MLX5_CAP_GEN(priv->mdev, vport_group_manager) ||
+            !MLX5_DSCP_SUPPORTED(priv->mdev))
+               return -EOPNOTSUPP;
 
-       if (app->protocol >= MLX5E_MAX_DSCP)
+       if ((app->selector != IEEE_8021QAZ_APP_SEL_DSCP) ||
+           (app->protocol >= MLX5E_MAX_DSCP))
                return -EINVAL;
 
        /* Skip if no dscp app entry */
@@ -1146,7 +1138,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
 {
        int err;
 
-       err =  mlx5_set_trust_state(priv->mdev, trust_state);
+       err = mlx5_set_trust_state(priv->mdev, trust_state);
        if (err)
                return err;
        priv->dcbx_dp.trust_state = trust_state;
index 3a2c4e548226e2e66e867ffbb9b6370ae767be78..dfbcda0d0e0808f859abc826162ba027cc937f52 100644 (file)
@@ -1970,15 +1970,15 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
 {
        struct mlx5_core_dev *fmdev, *pmdev;
-       u16 func_id, peer_id;
+       u64 fsystem_guid, psystem_guid;
 
        fmdev = priv->mdev;
        pmdev = peer_priv->mdev;
 
-       func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
-       peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
+       mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid);
+       mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid);
 
-       return (func_id == peer_id);
+       return (fsystem_guid == psystem_guid);
 }
 
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
index 358edab9e72eeee18b9c17d74e66f2de92d5cc87..3e34cb8ac1d3e66f58196c67595a7baa3624e597 100644 (file)
@@ -2086,14 +2086,16 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
                int i;
 
                for (i = 0; i < cpsw->data.slaves; i++) {
-                       if (vid == cpsw->slaves[i].port_vlan)
-                               return -EINVAL;
+                       if (vid == cpsw->slaves[i].port_vlan) {
+                               ret = -EINVAL;
+                               goto err;
+                       }
                }
        }
 
        dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
        ret = cpsw_add_vlan_ale_entry(priv, vid);
-
+err:
        pm_runtime_put(cpsw->dev);
        return ret;
 }
@@ -2119,22 +2121,17 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
 
                for (i = 0; i < cpsw->data.slaves; i++) {
                        if (vid == cpsw->slaves[i].port_vlan)
-                               return -EINVAL;
+                               goto err;
                }
        }
 
        dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
        ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
-       if (ret != 0)
-               return ret;
-
-       ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
-                                HOST_PORT_NUM, ALE_VLAN, vid);
-       if (ret != 0)
-               return ret;
-
-       ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
-                                0, ALE_VLAN, vid);
+       ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+                                 HOST_PORT_NUM, ALE_VLAN, vid);
+       ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+                                 0, ALE_VLAN, vid);
+err:
        pm_runtime_put(cpsw->dev);
        return ret;
 }
index 93dc05c194d381b51336e95d552180a6997a5084..5766225a4ce117957cdd77fe2a9d4ad4abc8f150 100644 (file)
@@ -394,7 +394,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
 
        idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
        if (idx < 0)
-               return -EINVAL;
+               return -ENOENT;
 
        cpsw_ale_read(ale, idx, ale_entry);
 
index 2d8812dd1534ae5ad2a9b6a6725861b2da3473d9..9dd2ca62d84af79b82fb59111d471b09658cfe57 100644 (file)
@@ -894,7 +894,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                                  struct sk_buff *skb,
                                  struct sk_buff_head *list)
 {
-       struct skb_shared_info *shinfo = skb_shinfo(skb);
        RING_IDX cons = queue->rx.rsp_cons;
        struct sk_buff *nskb;
 
@@ -903,15 +902,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                        RING_GET_RESPONSE(&queue->rx, ++cons);
                skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
 
-               if (shinfo->nr_frags == MAX_SKB_FRAGS) {
+               if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
                        unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
 
                        BUG_ON(pull_to <= skb_headlen(skb));
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
-               BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
+               BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
 
-               skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                               skb_frag_page(nfrag),
                                rx->offset, rx->status, PAGE_SIZE);
 
                skb_shinfo(nskb)->nr_frags = 0;
index a59b6c4bb5b89444139ae3907deb73c9947147d0..ad3d17c42e23ccef936430210b36e9e649034e01 100644 (file)
@@ -5,6 +5,7 @@
 // Copyright (C) 2017 Finn Thain
 
 #include <linux/device.h>
+#include <linux/dma-mapping.h>
 #include <linux/list.h>
 #include <linux/nubus.h>
 #include <linux/seq_file.h>
@@ -93,6 +94,8 @@ int nubus_device_register(struct nubus_board *board)
        board->dev.release = nubus_device_release;
        board->dev.bus = &nubus_bus_type;
        dev_set_name(&board->dev, "slot.%X", board->slot);
+       board->dev.dma_mask = &board->dev.coherent_dma_mask;
+       dma_set_mask(&board->dev, DMA_BIT_MASK(32));
        return device_register(&board->dev);
 }
 
index ea23c8dffc252af3d0209167e99118c98575d848..ffec695e0bfb2309c8c2b658c42f964dd5b5a996 100644 (file)
@@ -754,9 +754,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
        case ELS_LOGO:
                if (fip->mode == FIP_MODE_VN2VN) {
                        if (fip->state != FIP_ST_VNMP_UP)
-                               return -EINVAL;
+                               goto drop;
                        if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
-                               return -EINVAL;
+                               goto drop;
                } else {
                        if (fip->state != FIP_ST_ENABLED)
                                return 0;
@@ -799,9 +799,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
        fip->send(fip, skb);
        return -EINPROGRESS;
 drop:
-       kfree_skb(skb);
        LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
                        op, ntoh24(fh->fh_d_id));
+       kfree_skb(skb);
        return -EINVAL;
 }
 EXPORT_SYMBOL(fcoe_ctlr_els_send);
index 31d31aad3de1d3fd0f2ff58d2141cabddec474bf..89b1f1af2fd456c38e45e0a905af9a76385c1d89 100644 (file)
@@ -2164,6 +2164,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
                FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
                             fc_rport_state(rdata));
 
+               rdata->flags &= ~FC_RP_STARTED;
                fc_rport_enter_delete(rdata, RPORT_EV_STOP);
                mutex_unlock(&rdata->rp_mutex);
                kref_put(&rdata->kref, fc_rport_destroy);
index 569392d0d4c9e478b8e7d1fc745cb7e741005ff3..e44c91edf92d0fae3e73dd38f2b65961a07e5ed9 100644 (file)
@@ -3343,11 +3343,10 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
                                        spinlock_t *writeq_lock)
 {
        unsigned long flags;
-       __u64 data_out = b;
 
        spin_lock_irqsave(writeq_lock, flags);
-       writel((u32)(data_out), addr);
-       writel((u32)(data_out >> 32), (addr + 4));
+       __raw_writel((u32)(b), addr);
+       __raw_writel((u32)(b >> 32), (addr + 4));
        mmiowb();
        spin_unlock_irqrestore(writeq_lock, flags);
 }
@@ -3367,7 +3366,8 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
 static inline void
 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
 {
-       writeq(b, addr);
+       __raw_writeq(b, addr);
+       mmiowb();
 }
 #else
 static inline void
@@ -5268,7 +5268,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
 
        /* send message 32-bits at a time */
        for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
-               writel((u32)(request[i]), &ioc->chip->Doorbell);
+               writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
                if ((_base_wait_for_doorbell_ack(ioc, 5)))
                        failed = 1;
        }
@@ -5289,7 +5289,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
        }
 
        /* read the first two 16-bits, it gives the total length of the reply */
-       reply[0] = (u16)(readl(&ioc->chip->Doorbell)
+       reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
            & MPI2_DOORBELL_DATA_MASK);
        writel(0, &ioc->chip->HostInterruptStatus);
        if ((_base_wait_for_doorbell_int(ioc, 5))) {
@@ -5298,7 +5298,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
                        ioc->name, __LINE__);
                return -EFAULT;
        }
-       reply[1] = (u16)(readl(&ioc->chip->Doorbell)
+       reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
            & MPI2_DOORBELL_DATA_MASK);
        writel(0, &ioc->chip->HostInterruptStatus);
 
@@ -5312,7 +5312,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
                if (i >=  reply_bytes/2) /* overflow case */
                        readl(&ioc->chip->Doorbell);
                else
-                       reply[i] = (u16)(readl(&ioc->chip->Doorbell)
+                       reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
                            & MPI2_DOORBELL_DATA_MASK);
                writel(0, &ioc->chip->HostInterruptStatus);
        }
index 091ec1207beae23040f17c0c6b1d32d8ca30192c..cff83b9457f7fe95bb39784996fedc06b21969ee 100644 (file)
@@ -888,7 +888,7 @@ static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
        ipv6_en = !!(block->generic.ctrl_flags &
                     NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
 
-       snprintf(tgt->iscsi_name, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+       snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n",
                 block->target[index].target_name.byte);
 
        tgt->ipv6_en = ipv6_en;
index a91cca52b5d5be5a262270ae6b0cbefe5a72a8bf..dd93a22fe84368fc827441983bcb64a0807be3ce 100644 (file)
@@ -2130,34 +2130,11 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
        req_cnt = 1;
        handle = 0;
 
-       if (!sp)
-               goto skip_cmd_array;
-
-       /* Check for room in outstanding command list. */
-       handle = req->current_outstanding_cmd;
-       for (index = 1; index < req->num_outstanding_cmds; index++) {
-               handle++;
-               if (handle == req->num_outstanding_cmds)
-                       handle = 1;
-               if (!req->outstanding_cmds[handle])
-                       break;
-       }
-       if (index == req->num_outstanding_cmds) {
-               ql_log(ql_log_warn, vha, 0x700b,
-                   "No room on outstanding cmd array.\n");
-               goto queuing_error;
-       }
-
-       /* Prep command array. */
-       req->current_outstanding_cmd = handle;
-       req->outstanding_cmds[handle] = sp;
-       sp->handle = handle;
-
-       /* Adjust entry-counts as needed. */
-       if (sp->type != SRB_SCSI_CMD)
+       if (sp && (sp->type != SRB_SCSI_CMD)) {
+               /* Adjust entry-counts as needed. */
                req_cnt = sp->iocbs;
+       }
 
-skip_cmd_array:
        /* Check for room on request queue. */
        if (req->cnt < req_cnt + 2) {
                if (qpair->use_shadow_reg)
@@ -2183,6 +2160,28 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
        if (req->cnt < req_cnt + 2)
                goto queuing_error;
 
+       if (sp) {
+               /* Check for room in outstanding command list. */
+               handle = req->current_outstanding_cmd;
+               for (index = 1; index < req->num_outstanding_cmds; index++) {
+                       handle++;
+                       if (handle == req->num_outstanding_cmds)
+                               handle = 1;
+                       if (!req->outstanding_cmds[handle])
+                               break;
+               }
+               if (index == req->num_outstanding_cmds) {
+                       ql_log(ql_log_warn, vha, 0x700b,
+                           "No room on outstanding cmd array.\n");
+                       goto queuing_error;
+               }
+
+               /* Prep command array. */
+               req->current_outstanding_cmd = handle;
+               req->outstanding_cmds[handle] = sp;
+               sp->handle = handle;
+       }
+
        /* Prep packet */
        req->cnt -= req_cnt;
        pkt = req->ring_ptr;
@@ -2195,6 +2194,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
                pkt->handle = handle;
        }
 
+       return pkt;
+
 queuing_error:
        qpair->tgt_counters.num_alloc_iocb_failed++;
        return pkt;
index 3f3cb72e0c0cdab6a76ea8c4057229f76924899c..d0389b20574d0f778e2bfd95b07e80458970dbd5 100644 (file)
@@ -523,18 +523,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
 static int sr_block_open(struct block_device *bdev, fmode_t mode)
 {
        struct scsi_cd *cd;
+       struct scsi_device *sdev;
        int ret = -ENXIO;
 
+       cd = scsi_cd_get(bdev->bd_disk);
+       if (!cd)
+               goto out;
+
+       sdev = cd->device;
+       scsi_autopm_get_device(sdev);
        check_disk_change(bdev);
 
        mutex_lock(&sr_mutex);
-       cd = scsi_cd_get(bdev->bd_disk);
-       if (cd) {
-               ret = cdrom_open(&cd->cdi, bdev, mode);
-               if (ret)
-                       scsi_cd_put(cd);
-       }
+       ret = cdrom_open(&cd->cdi, bdev, mode);
        mutex_unlock(&sr_mutex);
+
+       scsi_autopm_put_device(sdev);
+       if (ret)
+               scsi_cd_put(cd);
+
+out:
        return ret;
 }
 
@@ -562,6 +570,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
        if (ret)
                goto out;
 
+       scsi_autopm_get_device(sdev);
+
        /*
         * Send SCSI addressing ioctls directly to mid level, send other
         * ioctls to cdrom/block level.
@@ -570,15 +580,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
        case SCSI_IOCTL_GET_IDLUN:
        case SCSI_IOCTL_GET_BUS_NUMBER:
                ret = scsi_ioctl(sdev, cmd, argp);
-               goto out;
+               goto put;
        }
 
        ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
        if (ret != -ENOSYS)
-               goto out;
+               goto put;
 
        ret = scsi_ioctl(sdev, cmd, argp);
 
+put:
+       scsi_autopm_put_device(sdev);
+
 out:
        mutex_unlock(&sr_mutex);
        return ret;
index 777e5f1e52d10968d5f23e0e316db05b8209511d..0cd947f78b5bfdfa013ce4909afdc8f894b6146b 100644 (file)
@@ -561,9 +561,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
            (btstat == BTSTAT_SUCCESS ||
             btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
             btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
-               cmd->result = (DID_OK << 16) | sdstat;
-               if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
-                       cmd->result |= (DRIVER_SENSE << 24);
+               if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
+                       cmd->result = (DID_RESET << 16);
+               } else {
+                       cmd->result = (DID_OK << 16) | sdstat;
+                       if (sdstat == SAM_STAT_CHECK_CONDITION &&
+                           cmd->sense_buffer)
+                               cmd->result |= (DRIVER_SENSE << 24);
+               }
        } else
                switch (btstat) {
                case BTSTAT_SUCCESS:
index a502f1af4a213607adec4aa28fa6ae8eb9ce0389..ed3114556fdaf96eb130832c54a80cc4c41973b7 100644 (file)
@@ -1560,9 +1560,12 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
        d->iotlb = niotlb;
 
        for (i = 0; i < d->nvqs; ++i) {
-               mutex_lock(&d->vqs[i]->mutex);
-               d->vqs[i]->iotlb = niotlb;
-               mutex_unlock(&d->vqs[i]->mutex);
+               struct vhost_virtqueue *vq = d->vqs[i];
+
+               mutex_lock(&vq->mutex);
+               vq->iotlb = niotlb;
+               __vhost_vq_meta_reset(vq);
+               mutex_unlock(&vq->mutex);
        }
 
        vhost_umem_clean(oiotlb);
index 46a4484e3da79196b32c06cfbd3d0f092d03147c..c6f78d27947b9dae5f00b148c325198956910095 100644 (file)
@@ -20,7 +20,7 @@
 #include <drm/drm_connector.h>  /* For DRM_MODE_PANEL_ORIENTATION_* */
 
 static bool request_mem_succeeded = false;
-static bool nowc = false;
+static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
 
 static struct fb_var_screeninfo efifb_defined = {
        .activate               = FB_ACTIVATE_NOW,
@@ -68,8 +68,12 @@ static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
 
 static void efifb_destroy(struct fb_info *info)
 {
-       if (info->screen_base)
-               iounmap(info->screen_base);
+       if (info->screen_base) {
+               if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
+                       iounmap(info->screen_base);
+               else
+                       memunmap(info->screen_base);
+       }
        if (request_mem_succeeded)
                release_mem_region(info->apertures->ranges[0].base,
                                   info->apertures->ranges[0].size);
@@ -104,7 +108,7 @@ static int efifb_setup(char *options)
                        else if (!strncmp(this_opt, "width:", 6))
                                screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
                        else if (!strcmp(this_opt, "nowc"))
-                               nowc = true;
+                               mem_flags &= ~EFI_MEMORY_WC;
                }
        }
 
@@ -164,6 +168,7 @@ static int efifb_probe(struct platform_device *dev)
        unsigned int size_remap;
        unsigned int size_total;
        char *option = NULL;
+       efi_memory_desc_t md;
 
        if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
                return -ENODEV;
@@ -272,12 +277,35 @@ static int efifb_probe(struct platform_device *dev)
        info->apertures->ranges[0].base = efifb_fix.smem_start;
        info->apertures->ranges[0].size = size_remap;
 
-       if (nowc)
-               info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len);
-       else
-               info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
+       if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
+               if ((efifb_fix.smem_start + efifb_fix.smem_len) >
+                   (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
+                       pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
+                              efifb_fix.smem_start);
+                       err = -EIO;
+                       goto err_release_fb;
+               }
+               /*
+                * If the UEFI memory map covers the efifb region, we may only
+                * remap it using the attributes the memory map prescribes.
+                */
+               mem_flags |= EFI_MEMORY_WT | EFI_MEMORY_WB;
+               mem_flags &= md.attribute;
+       }
+       if (mem_flags & EFI_MEMORY_WC)
+               info->screen_base = ioremap_wc(efifb_fix.smem_start,
+                                              efifb_fix.smem_len);
+       else if (mem_flags & EFI_MEMORY_UC)
+               info->screen_base = ioremap(efifb_fix.smem_start,
+                                           efifb_fix.smem_len);
+       else if (mem_flags & EFI_MEMORY_WT)
+               info->screen_base = memremap(efifb_fix.smem_start,
+                                            efifb_fix.smem_len, MEMREMAP_WT);
+       else if (mem_flags & EFI_MEMORY_WB)
+               info->screen_base = memremap(efifb_fix.smem_start,
+                                            efifb_fix.smem_len, MEMREMAP_WB);
        if (!info->screen_base) {
-               pr_err("efifb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n",
+               pr_err("efifb: abort, cannot remap video memory 0x%x @ 0x%lx\n",
                        efifb_fix.smem_len, efifb_fix.smem_start);
                err = -EIO;
                goto err_release_fb;
@@ -371,7 +399,10 @@ static int efifb_probe(struct platform_device *dev)
 err_groups:
        sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
 err_unmap:
-       iounmap(info->screen_base);
+       if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
+               iounmap(info->screen_base);
+       else
+               memunmap(info->screen_base);
 err_release_fb:
        framebuffer_release(info);
 err_release_mem:
index 0e8e5de3c48a56e24a70b0185932644ba92b93b0..ceb7b491d1b9c32844b40caeab21207eabeddc68 100644 (file)
@@ -358,14 +358,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
        __releases(dentry->d_inode->i_lock)
 {
        struct inode *inode = dentry->d_inode;
-       bool hashed = !d_unhashed(dentry);
 
-       if (hashed)
-               raw_write_seqcount_begin(&dentry->d_seq);
+       raw_write_seqcount_begin(&dentry->d_seq);
        __d_clear_type_and_inode(dentry);
        hlist_del_init(&dentry->d_u.d_alias);
-       if (hashed)
-               raw_write_seqcount_end(&dentry->d_seq);
+       raw_write_seqcount_end(&dentry->d_seq);
        spin_unlock(&dentry->d_lock);
        spin_unlock(&inode->i_lock);
        if (!inode->i_nlink)
@@ -1932,10 +1929,12 @@ struct dentry *d_make_root(struct inode *root_inode)
 
        if (root_inode) {
                res = d_alloc_anon(root_inode->i_sb);
-               if (res)
+               if (res) {
+                       res->d_flags |= DCACHE_RCUACCESS;
                        d_instantiate(res, root_inode);
-               else
+               } else {
                        iput(root_inode);
+               }
        }
        return res;
 }
index 71fccccf317e8849580457680d6bd6ac3376c170..8c6ab6c95727ef219a9cb02845f1bb3e7046ad4d 100644 (file)
@@ -86,7 +86,9 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
        /* length of the variable name itself: remove GUID and separator */
        namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
 
-       uuid_le_to_bin(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+       err = guid_parse(dentry->d_name.name + namelen + 1, &var->var.VendorGuid);
+       if (err)
+               goto out;
 
        if (efivar_variable_is_removable(var->var.VendorGuid,
                                         dentry->d_name.name, namelen))
index 8ddd14806799db5d701ffd1eee41b650dfba3313..bd2f4c68506afb79023c4423abebe392397c53ea 100644 (file)
@@ -659,12 +659,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
                return 0;
        mnt = real_mount(bastard);
        mnt_add_count(mnt, 1);
+       smp_mb();                       // see mntput_no_expire()
        if (likely(!read_seqretry(&mount_lock, seq)))
                return 0;
        if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
                mnt_add_count(mnt, -1);
                return 1;
        }
+       lock_mount_hash();
+       if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
+               mnt_add_count(mnt, -1);
+               unlock_mount_hash();
+               return 1;
+       }
+       unlock_mount_hash();
+       /* caller will mntput() */
        return -1;
 }
 
@@ -1195,12 +1204,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
 static void mntput_no_expire(struct mount *mnt)
 {
        rcu_read_lock();
-       mnt_add_count(mnt, -1);
-       if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
+       if (likely(READ_ONCE(mnt->mnt_ns))) {
+               /*
+                * Since we don't do lock_mount_hash() here,
+                * ->mnt_ns can change under us.  However, if it's
+                * non-NULL, then there's a reference that won't
+                * be dropped until after an RCU delay done after
+                * turning ->mnt_ns NULL.  So if we observe it
+                * non-NULL under rcu_read_lock(), the reference
+                * we are dropping is not the final one.
+                */
+               mnt_add_count(mnt, -1);
                rcu_read_unlock();
                return;
        }
        lock_mount_hash();
+       /*
+        * make sure that if __legitimize_mnt() has not seen us grab
+        * mount_lock, we'll see their refcount increment here.
+        */
+       smp_mb();
+       mnt_add_count(mnt, -1);
        if (mnt_get_count(mnt)) {
                rcu_read_unlock();
                unlock_mount_hash();
index a97a63eef59f629bc54e0c7bc6be4d6c4b966bcf..3233fbe23594e8b6cc163fa9d2960677574fa32f 100644 (file)
@@ -30,7 +30,7 @@ struct cpu {
 };
 
 extern void boot_cpu_init(void);
-extern void boot_cpu_state_init(void);
+extern void boot_cpu_hotplug_init(void);
 extern void cpu_init(void);
 extern void trap_init(void);
 
index 56add823f1909e28c9e46775f12303c48096a13a..401e4b254e30b06a9b19e826bd289c893c0cfe86 100644 (file)
@@ -894,6 +894,16 @@ typedef struct _efi_file_handle {
        void *flush;
 } efi_file_handle_t;
 
+typedef struct {
+       u64 revision;
+       u32 open_volume;
+} efi_file_io_interface_32_t;
+
+typedef struct {
+       u64 revision;
+       u64 open_volume;
+} efi_file_io_interface_64_t;
+
 typedef struct _efi_file_io_interface {
        u64 revision;
        int (*open_volume)(struct _efi_file_io_interface *,
@@ -988,14 +998,12 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
 extern void efi_gettimeofday (struct timespec64 *ts);
 extern void efi_enter_virtual_mode (void);     /* switch EFI to virtual mode, if possible */
 #ifdef CONFIG_X86
-extern void efi_late_init(void);
 extern void efi_free_boot_services(void);
 extern efi_status_t efi_query_variable_store(u32 attributes,
                                             unsigned long size,
                                             bool nonblocking);
 extern void efi_find_mirror(void);
 #else
-static inline void efi_late_init(void) {}
 static inline void efi_free_boot_services(void) {}
 
 static inline efi_status_t efi_query_variable_store(u32 attributes,
@@ -1651,4 +1659,7 @@ struct linux_efi_tpm_eventlog {
 
 extern int efi_tpm_eventlog_init(void);
 
+/* Workqueue to queue EFI Runtime Services */
+extern struct workqueue_struct *efi_rts_wq;
+
 #endif /* _LINUX_EFI_H */
index cbb872c1b607cec198f2bf374d8bd06690601a81..9d2ea3e907d0fc018d8f7e13d2912537c83c5e70 100644 (file)
@@ -73,6 +73,7 @@
 #define GICD_TYPER_MBIS                        (1U << 16)
 
 #define GICD_TYPER_ID_BITS(typer)      ((((typer) >> 19) & 0x1f) + 1)
+#define GICD_TYPER_NUM_LPIS(typer)     ((((typer) >> 11) & 0x1f) + 1)
 #define GICD_TYPER_IRQS(typer)         ((((typer) & 0x1f) + 1) * 32)
 
 #define GICD_IROUTER_SPI_MODE_ONE      (0U << 31)
@@ -576,8 +577,8 @@ struct rdists {
                phys_addr_t     phys_base;
        } __percpu              *rdist;
        struct page             *prop_page;
-       int                     id_bits;
        u64                     flags;
+       u32                     gicd_typer;
        bool                    has_vlpis;
        bool                    has_direct_lpi;
 };
index 36df6ccbc874b6655fa1ac64ebdcaff150d26215..4786c2235b98124919afda4c7203d59c10200702 100644 (file)
@@ -396,7 +396,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
  * @member:    the name of the list_head within the struct.
  *
  * Continue to iterate over list of given type, continuing after
- * the current position.
+ * the current position which must have been in the list when the RCU read
+ * lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_from_rcu() except
+ * this starts after the given position and that one starts at the given
+ * position.
  */
 #define list_for_each_entry_continue_rcu(pos, head, member)            \
        for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
@@ -411,6 +420,14 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
  *
  * Iterate over the tail of a list starting from a given position,
  * which must have been in the list when the RCU read lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_continue_rcu() except
+ * this starts from the given position and that one starts from the position
+ * after the given position.
  */
 #define list_for_each_entry_from_rcu(pos, head, member)                        \
        for (; &(pos)->member != (head);                                        \
index 65163aa0bb04ffa7e0b147be32d70965b84589d7..75e5b393cf4408bca6c7dad1a55ff5f585015570 100644 (file)
@@ -64,7 +64,6 @@ void rcu_barrier_tasks(void);
 
 void __rcu_read_lock(void);
 void __rcu_read_unlock(void);
-void rcu_read_unlock_special(struct task_struct *t);
 void synchronize_rcu(void);
 
 /*
@@ -159,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
        } while (0)
 
 /*
- * Note a voluntary context switch for RCU-tasks benefit.  This is a
- * macro rather than an inline function to avoid #include hell.
+ * Note a quasi-voluntary context switch for RCU-tasks's benefit.
+ * This is a macro rather than an inline function to avoid #include hell.
  */
 #ifdef CONFIG_TASKS_RCU
-#define rcu_note_voluntary_context_switch_lite(t) \
+#define rcu_tasks_qs(t) \
        do { \
                if (READ_ONCE((t)->rcu_tasks_holdout)) \
                        WRITE_ONCE((t)->rcu_tasks_holdout, false); \
@@ -171,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
 #define rcu_note_voluntary_context_switch(t) \
        do { \
                rcu_all_qs(); \
-               rcu_note_voluntary_context_switch_lite(t); \
+               rcu_tasks_qs(t); \
        } while (0)
 void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
 void synchronize_rcu_tasks(void);
 void exit_tasks_rcu_start(void);
 void exit_tasks_rcu_finish(void);
 #else /* #ifdef CONFIG_TASKS_RCU */
-#define rcu_note_voluntary_context_switch_lite(t)      do { } while (0)
+#define rcu_tasks_qs(t)        do { } while (0)
 #define rcu_note_voluntary_context_switch(t)           rcu_all_qs()
 #define call_rcu_tasks call_rcu_sched
 #define synchronize_rcu_tasks synchronize_sched
@@ -195,8 +194,8 @@ static inline void exit_tasks_rcu_finish(void) { }
  */
 #define cond_resched_tasks_rcu_qs() \
 do { \
-       if (!cond_resched()) \
-               rcu_note_voluntary_context_switch_lite(current); \
+       rcu_tasks_qs(current); \
+       cond_resched(); \
 } while (0)
 
 /*
@@ -567,8 +566,8 @@ static inline void rcu_preempt_sleep_check(void) { }
  * This is simply an identity function, but it documents where a pointer
  * is handed off from RCU to some other synchronization mechanism, for
  * example, reference counting or locking.  In C11, it would map to
- * kill_dependency().  It could be used as follows:
- * ``
+ * kill_dependency().  It could be used as follows::
+ *
  *     rcu_read_lock();
  *     p = rcu_dereference(gp);
  *     long_lived = is_long_lived(p);
@@ -579,7 +578,6 @@ static inline void rcu_preempt_sleep_check(void) { }
  *                     p = rcu_pointer_handoff(p);
  *     }
  *     rcu_read_unlock();
- *``
  */
 #define rcu_pointer_handoff(p) (p)
 
index 7b3c82e8a625dcdb421bd0f2ac97316e3fd1d4be..8d9a0ea8f0b5be65dd78e9c662204dc112b9388f 100644 (file)
@@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
 #define rcu_note_context_switch(preempt) \
        do { \
                rcu_sched_qs(); \
-               rcu_note_voluntary_context_switch_lite(current); \
+               rcu_tasks_qs(current); \
        } while (0)
 
 static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
index 91494d7e8e41276def7f4f28768f516ca78fd8cc..3e72a291c40142d45ae4ee9722239ac8ad2896b0 100644 (file)
@@ -195,6 +195,16 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
        return retval;
 }
 
+/* Used by tracing, cannot be traced and cannot invoke lockdep. */
+static inline notrace int
+srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
+{
+       int retval;
+
+       retval = __srcu_read_lock(sp);
+       return retval;
+}
+
 /**
  * srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
  * @sp: srcu_struct in which to unregister the old reader.
@@ -209,6 +219,13 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
        __srcu_read_unlock(sp, idx);
 }
 
+/* Used by tracing, cannot be traced and cannot call lockdep. */
+static inline notrace void
+srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
+{
+       __srcu_read_unlock(sp, idx);
+}
+
 /**
  * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
  *
index 66272862070b1c7610e8f8587ccb38fa627076a4..61dfd93b6ee4d6f47f2609319e01c6192620b3fb 100644 (file)
@@ -64,6 +64,8 @@ struct torture_random_state {
        long trs_count;
 };
 #define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
+#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
+       DEFINE_PER_CPU(struct torture_random_state, name)
 unsigned long torture_random(struct torture_random_state *trsp);
 
 /* Task shuffler, which causes CPUs to occasionally go idle. */
@@ -79,7 +81,7 @@ void stutter_wait(const char *title);
 int torture_stutter_init(int s);
 
 /* Initialization and cleanup. */
-bool torture_init_begin(char *ttype, bool v);
+bool torture_init_begin(char *ttype, int v);
 void torture_init_end(void);
 bool torture_cleanup_begin(void);
 void torture_cleanup_end(void);
index 9324ac2d9ff2db234cd5f3b8bf9f7e1a2bc60e52..43913ae79f644dd9a8dfaea06e81df9d9005d520 100644 (file)
@@ -64,7 +64,8 @@ struct vsock_sock {
        struct list_head pending_links;
        struct list_head accept_queue;
        bool rejected;
-       struct delayed_work dwork;
+       struct delayed_work connect_work;
+       struct delayed_work pending_work;
        struct delayed_work close_work;
        bool close_work_scheduled;
        u32 peer_shutdown;
@@ -77,7 +78,6 @@ struct vsock_sock {
 
 s64 vsock_stream_has_data(struct vsock_sock *vsk);
 s64 vsock_stream_has_space(struct vsock_sock *vsk);
-void vsock_pending_work(struct work_struct *work);
 struct sock *__vsock_create(struct net *net,
                            struct socket *sock,
                            struct sock *parent,
index dc35f25eb679d4651b59a20c785005aa0cbb4d0f..890a87318014d528a78a3a9631ebdb75bf037a4b 100644 (file)
@@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap)
        refcount_inc(&sap->refcnt);
 }
 
+static inline bool llc_sap_hold_safe(struct llc_sap *sap)
+{
+       return refcount_inc_not_zero(&sap->refcnt);
+}
+
 void llc_sap_close(struct llc_sap *sap);
 
 static inline void llc_sap_put(struct llc_sap *sap)
index 5936aac357ab0d8eb4425d4c789ef687805f9258..a8d07feff6a0c0c26b0927448641e1ddaed78d4f 100644 (file)
@@ -52,6 +52,7 @@ TRACE_EVENT(rcu_utilization,
  *     "cpuqs": CPU passes through a quiescent state.
  *     "cpuonl": CPU comes online.
  *     "cpuofl": CPU goes offline.
+ *     "cpuofl-bgp": CPU goes offline while blocking a grace period.
  *     "reqwait": GP kthread sleeps waiting for grace-period request.
  *     "reqwaitsig": GP kthread awakened by signal from reqwait state.
  *     "fqswait": GP kthread waiting until time to force quiescent states.
@@ -63,24 +64,24 @@ TRACE_EVENT(rcu_utilization,
  */
 TRACE_EVENT(rcu_grace_period,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
+       TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
 
-       TP_ARGS(rcuname, gpnum, gpevent),
+       TP_ARGS(rcuname, gp_seq, gpevent),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(const char *, gpevent)
        ),
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->gpevent = gpevent;
        ),
 
        TP_printk("%s %lu %s",
-                 __entry->rcuname, __entry->gpnum, __entry->gpevent)
+                 __entry->rcuname, __entry->gp_seq, __entry->gpevent)
 );
 
 /*
@@ -90,8 +91,8 @@ TRACE_EVENT(rcu_grace_period,
  *
  * "Startleaf": Request a grace period based on leaf-node data.
  * "Prestarted": Someone beat us to the request
- * "Startedleaf": Leaf-node start proved sufficient.
- * "Startedleafroot": Leaf-node start proved sufficient after checking root.
+ * "Startedleaf": Leaf node marked for future GP.
+ * "Startedleafroot": All nodes from leaf to root marked for future GP.
  * "Startedroot": Requested a nocb grace period based on root-node data.
  * "NoGPkthread": The RCU grace-period kthread has not yet started.
  * "StartWait": Start waiting for the requested grace period.
@@ -102,17 +103,16 @@ TRACE_EVENT(rcu_grace_period,
  */
 TRACE_EVENT(rcu_future_grace_period,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed,
-                unsigned long c, u8 level, int grplo, int grphi,
+       TP_PROTO(const char *rcuname, unsigned long gp_seq,
+                unsigned long gp_seq_req, u8 level, int grplo, int grphi,
                 const char *gpevent),
 
-       TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent),
+       TP_ARGS(rcuname, gp_seq, gp_seq_req, level, grplo, grphi, gpevent),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
-               __field(unsigned long, completed)
-               __field(unsigned long, c)
+               __field(unsigned long, gp_seq)
+               __field(unsigned long, gp_seq_req)
                __field(u8, level)
                __field(int, grplo)
                __field(int, grphi)
@@ -121,19 +121,17 @@ TRACE_EVENT(rcu_future_grace_period,
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
-               __entry->completed = completed;
-               __entry->c = c;
+               __entry->gp_seq = gp_seq;
+               __entry->gp_seq_req = gp_seq_req;
                __entry->level = level;
                __entry->grplo = grplo;
                __entry->grphi = grphi;
                __entry->gpevent = gpevent;
        ),
 
-       TP_printk("%s %lu %lu %lu %u %d %d %s",
-                 __entry->rcuname, __entry->gpnum, __entry->completed,
-                 __entry->c, __entry->level, __entry->grplo, __entry->grphi,
-                 __entry->gpevent)
+       TP_printk("%s %lu %lu %u %d %d %s",
+                 __entry->rcuname, __entry->gp_seq, __entry->gp_seq_req, __entry->level,
+                 __entry->grplo, __entry->grphi, __entry->gpevent)
 );
 
 /*
@@ -145,14 +143,14 @@ TRACE_EVENT(rcu_future_grace_period,
  */
 TRACE_EVENT(rcu_grace_period_init,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
+       TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
                 int grplo, int grphi, unsigned long qsmask),
 
-       TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
+       TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(u8, level)
                __field(int, grplo)
                __field(int, grphi)
@@ -161,7 +159,7 @@ TRACE_EVENT(rcu_grace_period_init,
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->level = level;
                __entry->grplo = grplo;
                __entry->grphi = grphi;
@@ -169,7 +167,7 @@ TRACE_EVENT(rcu_grace_period_init,
        ),
 
        TP_printk("%s %lu %u %d %d %lx",
-                 __entry->rcuname, __entry->gpnum, __entry->level,
+                 __entry->rcuname, __entry->gp_seq, __entry->level,
                  __entry->grplo, __entry->grphi, __entry->qsmask)
 );
 
@@ -301,24 +299,24 @@ TRACE_EVENT(rcu_nocb_wake,
  */
 TRACE_EVENT(rcu_preempt_task,
 
-       TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
+       TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
 
-       TP_ARGS(rcuname, pid, gpnum),
+       TP_ARGS(rcuname, pid, gp_seq),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(int, pid)
        ),
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->pid = pid;
        ),
 
        TP_printk("%s %lu %d",
-                 __entry->rcuname, __entry->gpnum, __entry->pid)
+                 __entry->rcuname, __entry->gp_seq, __entry->pid)
 );
 
 /*
@@ -328,23 +326,23 @@ TRACE_EVENT(rcu_preempt_task,
  */
 TRACE_EVENT(rcu_unlock_preempted_task,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
+       TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
 
-       TP_ARGS(rcuname, gpnum, pid),
+       TP_ARGS(rcuname, gp_seq, pid),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(int, pid)
        ),
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->pid = pid;
        ),
 
-       TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid)
+       TP_printk("%s %lu %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
 );
 
 /*
@@ -357,15 +355,15 @@ TRACE_EVENT(rcu_unlock_preempted_task,
  */
 TRACE_EVENT(rcu_quiescent_state_report,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum,
+       TP_PROTO(const char *rcuname, unsigned long gp_seq,
                 unsigned long mask, unsigned long qsmask,
                 u8 level, int grplo, int grphi, int gp_tasks),
 
-       TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
+       TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(unsigned long, mask)
                __field(unsigned long, qsmask)
                __field(u8, level)
@@ -376,7 +374,7 @@ TRACE_EVENT(rcu_quiescent_state_report,
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->mask = mask;
                __entry->qsmask = qsmask;
                __entry->level = level;
@@ -386,41 +384,41 @@ TRACE_EVENT(rcu_quiescent_state_report,
        ),
 
        TP_printk("%s %lu %lx>%lx %u %d %d %u",
-                 __entry->rcuname, __entry->gpnum,
+                 __entry->rcuname, __entry->gp_seq,
                  __entry->mask, __entry->qsmask, __entry->level,
                  __entry->grplo, __entry->grphi, __entry->gp_tasks)
 );
 
 /*
  * Tracepoint for quiescent states detected by force_quiescent_state().
- * These trace events include the type of RCU, the grace-period number that
- * was blocked by the CPU, the CPU itself, and the type of quiescent state,
- * which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, "kick"
- * when kicking a CPU that has been in dyntick-idle mode for too long, or
- * "rqc" if the CPU got a quiescent state via its rcu_qs_ctr.
+ * These trace events include the type of RCU, the grace-period number
+ * that was blocked by the CPU, the CPU itself, and the type of quiescent
+ * state, which can be "dti" for dyntick-idle mode, "kick" when kicking
+ * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the
+ * CPU got a quiescent state via its rcu_qs_ctr.
  */
 TRACE_EVENT(rcu_fqs,
 
-       TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
+       TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
 
-       TP_ARGS(rcuname, gpnum, cpu, qsevent),
+       TP_ARGS(rcuname, gp_seq, cpu, qsevent),
 
        TP_STRUCT__entry(
                __field(const char *, rcuname)
-               __field(unsigned long, gpnum)
+               __field(unsigned long, gp_seq)
                __field(int, cpu)
                __field(const char *, qsevent)
        ),
 
        TP_fast_assign(
                __entry->rcuname = rcuname;
-               __entry->gpnum = gpnum;
+               __entry->gp_seq = gp_seq;
                __entry->cpu = cpu;
                __entry->qsevent = qsevent;
        ),
 
        TP_printk("%s %lu %d %s",
-                 __entry->rcuname, __entry->gpnum,
+                 __entry->rcuname, __entry->gp_seq,
                  __entry->cpu, __entry->qsevent)
 );
 
@@ -753,23 +751,23 @@ TRACE_EVENT(rcu_barrier,
 
 #else /* #ifdef CONFIG_RCU_TRACE */
 
-#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
+#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
+#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
                                      level, grplo, grphi, event) \
                                      do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
                                    qsmask) do { } while (0)
 #define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
        do { } while (0)
 #define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
        do { } while (0)
 #define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
-#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
-#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
-#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
+#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
+#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
                                         grplo, grphi, gp_tasks) do { } \
        while (0)
-#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
+#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
 #define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
 #define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
 #define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
index 3b4ada11ed521a1bb25ead3ca461640ba1026923..5e13c544bbf456e0af9baa0471f9229ac6ae3c7e 100644 (file)
@@ -561,8 +561,8 @@ asmlinkage __visible void __init start_kernel(void)
        setup_command_line(command_line);
        setup_nr_cpu_ids();
        setup_per_cpu_areas();
-       boot_cpu_state_init();
        smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+       boot_cpu_hotplug_init();
 
        build_all_zonelists(NULL);
        page_alloc_init();
index e0918d180f08e6e25c55310963b0a1c4cf7441e7..46f5f29605d474cff41ab302dd9898349addfa3a 100644 (file)
@@ -69,7 +69,7 @@ struct bpf_cpu_map {
 };
 
 static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
-                            struct xdp_bulk_queue *bq);
+                            struct xdp_bulk_queue *bq, bool in_napi_ctx);
 
 static u64 cpu_map_bitmap_size(const union bpf_attr *attr)
 {
@@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
                struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);
 
                /* No concurrent bq_enqueue can run at this point */
-               bq_flush_to_queue(rcpu, bq);
+               bq_flush_to_queue(rcpu, bq, false);
        }
        free_percpu(rcpu->bulkq);
        /* Cannot kthread_stop() here, last put free rcpu resources */
@@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = {
 };
 
 static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
-                            struct xdp_bulk_queue *bq)
+                            struct xdp_bulk_queue *bq, bool in_napi_ctx)
 {
        unsigned int processed = 0, drops = 0;
        const int to_cpu = rcpu->cpu;
@@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
                err = __ptr_ring_produce(q, xdpf);
                if (err) {
                        drops++;
-                       xdp_return_frame_rx_napi(xdpf);
+                       if (likely(in_napi_ctx))
+                               xdp_return_frame_rx_napi(xdpf);
+                       else
+                               xdp_return_frame(xdpf);
                }
                processed++;
        }
@@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
        struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
 
        if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
-               bq_flush_to_queue(rcpu, bq);
+               bq_flush_to_queue(rcpu, bq, true);
 
        /* Notice, xdp_buff/page MUST be queued here, long enough for
         * driver to code invoking us to finished, due to driver
@@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map)
 
                /* Flush all frames in bulkq to real queue */
                bq = this_cpu_ptr(rcpu->bulkq);
-               bq_flush_to_queue(rcpu, bq);
+               bq_flush_to_queue(rcpu, bq, true);
 
                /* If already running, costs spin_lock_irqsave + smb_mb */
                wake_up_process(rcpu->kthread);
index d361fc1e3bf35fd54d485e72c2e258171e3394d5..750d45edae7989e612bc6d57a217dc594b8856bc 100644 (file)
@@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
 }
 
 static int bq_xmit_all(struct bpf_dtab_netdev *obj,
-                      struct xdp_bulk_queue *bq, u32 flags)
+                      struct xdp_bulk_queue *bq, u32 flags,
+                      bool in_napi_ctx)
 {
        struct net_device *dev = obj->dev;
        int sent = 0, drops = 0, err = 0;
@@ -254,7 +255,10 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
                struct xdp_frame *xdpf = bq->q[i];
 
                /* RX path under NAPI protection, can return frames faster */
-               xdp_return_frame_rx_napi(xdpf);
+               if (likely(in_napi_ctx))
+                       xdp_return_frame_rx_napi(xdpf);
+               else
+                       xdp_return_frame(xdpf);
                drops++;
        }
        goto out;
@@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
                __clear_bit(bit, bitmap);
 
                bq = this_cpu_ptr(dev->bulkq);
-               bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+               bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
        }
 }
 
@@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
        struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
 
        if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
-               bq_xmit_all(obj, bq, 0);
+               bq_xmit_all(obj, bq, 0, true);
 
        /* Ingress dev_rx will be the same for all xdp_frame's in
         * bulk_queue, because bq stored per-CPU and must be flushed
@@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
                        __clear_bit(dev->bit, bitmap);
 
                        bq = per_cpu_ptr(dev->bulkq, cpu);
-                       bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
+                       bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
                }
        }
 }
index 98fb7938beea9dd18a255ad77ebe797b01660dea..c4d75c52b4fc13d28c53cae1f9d8084b0f8e41e5 100644 (file)
@@ -1048,12 +1048,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 
        while (msg_data_left(msg)) {
-               struct sk_msg_buff *m;
+               struct sk_msg_buff *m = NULL;
                bool enospc = false;
                int copy;
 
                if (sk->sk_err) {
-                       err = sk->sk_err;
+                       err = -sk->sk_err;
                        goto out_err;
                }
 
@@ -1116,8 +1116,11 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 wait_for_memory:
                err = sk_stream_wait_memory(sk, &timeo);
-               if (err)
+               if (err) {
+                       if (m && m != psock->cork)
+                               free_start_sg(sk, m);
                        goto out_err;
+               }
        }
 out_err:
        if (err < 0)
index 0db8938fbb236e58284927040e19998718ae74e7..2f8f338e77cf5920b4beecc28d84a62214b788e5 100644 (file)
@@ -2010,7 +2010,7 @@ void __init boot_cpu_init(void)
 /*
  * Must be called _AFTER_ setting up the per_cpu areas
  */
-void __init boot_cpu_state_init(void)
+void __init boot_cpu_hotplug_init(void)
 {
        per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
 }
index c6766f326072d54322d460cb54afe225b6754ac5..5f3e2baefca92e45442b9aa4a2b1399bbf0ce21e 100644 (file)
@@ -134,7 +134,6 @@ config GENERIC_IRQ_DEBUGFS
 endmenu
 
 config GENERIC_IRQ_MULTI_HANDLER
-       depends on !MULTI_IRQ_HANDLER
        bool
        help
          Allow to specify the low level IRQ handler at run time.
index afc7f902d74a3eff5d1f8a5e407159c5d28625f7..578d0e5f1b5b65a147aaf7fff037761ff091f4f2 100644 (file)
@@ -443,6 +443,7 @@ static void free_desc(unsigned int irq)
         * We free the descriptor, masks and stat fields via RCU. That
         * allows demultiplex interrupts to do rcu based management of
         * the child interrupts.
+        * This also allows us to use rcu in kstat_irqs_usr().
         */
        call_rcu(&desc->rcu, delayed_free_desc);
 }
@@ -928,17 +929,17 @@ unsigned int kstat_irqs(unsigned int irq)
  * kstat_irqs_usr - Get the statistics for an interrupt
  * @irq:       The interrupt number
  *
- * Returns the sum of interrupt counts on all cpus since boot for
- * @irq. Contrary to kstat_irqs() this can be called from any
- * preemptible context. It's protected against concurrent removal of
- * an interrupt descriptor when sparse irqs are enabled.
+ * Returns the sum of interrupt counts on all cpus since boot for @irq.
+ * Contrary to kstat_irqs() this can be called from any context.
+ * It uses rcu since a concurrent removal of an interrupt descriptor is
+ * observing an rcu grace period before delayed_free_desc()/irq_kobj_release().
  */
 unsigned int kstat_irqs_usr(unsigned int irq)
 {
        unsigned int sum;
 
-       irq_lock_sparse();
+       rcu_read_lock();
        sum = kstat_irqs(irq);
-       irq_unlock_sparse();
+       rcu_read_unlock();
        return sum;
 }
index 9a8b7ba9aa88de8bf89eff972c0fef32347e649f..fb86146037a745e85a862771f437866d9ab31707 100644 (file)
@@ -790,9 +790,19 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
 
 static int irq_wait_for_interrupt(struct irqaction *action)
 {
-       set_current_state(TASK_INTERRUPTIBLE);
+       for (;;) {
+               set_current_state(TASK_INTERRUPTIBLE);
 
-       while (!kthread_should_stop()) {
+               if (kthread_should_stop()) {
+                       /* may need to run one last time */
+                       if (test_and_clear_bit(IRQTF_RUNTHREAD,
+                                              &action->thread_flags)) {
+                               __set_current_state(TASK_RUNNING);
+                               return 0;
+                       }
+                       __set_current_state(TASK_RUNNING);
+                       return -1;
+               }
 
                if (test_and_clear_bit(IRQTF_RUNTHREAD,
                                       &action->thread_flags)) {
@@ -800,10 +810,7 @@ static int irq_wait_for_interrupt(struct irqaction *action)
                        return 0;
                }
                schedule();
-               set_current_state(TASK_INTERRUPTIBLE);
        }
-       __set_current_state(TASK_RUNNING);
-       return -1;
 }
 
 /*
@@ -1024,11 +1031,8 @@ static int irq_thread(void *data)
        /*
         * This is the regular exit path. __free_irq() is stopping the
         * thread via kthread_stop() after calling
-        * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
-        * oneshot mask bit can be set. We cannot verify that as we
-        * cannot touch the oneshot mask at this point anymore as
-        * __setup_irq() might have given out currents thread_mask
-        * again.
+        * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
+        * oneshot mask bit can be set.
         */
        task_work_cancel(current, irq_thread_dtor);
        return 0;
@@ -1251,8 +1255,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
        /*
         * Protects against a concurrent __free_irq() call which might wait
-        * for synchronize_irq() to complete without holding the optional
-        * chip bus lock and desc->lock.
+        * for synchronize_hardirq() to complete without holding the optional
+        * chip bus lock and desc->lock. Also protects against handing out
+        * a recycled oneshot thread_mask bit while it's still in use by
+        * its previous owner.
         */
        mutex_lock(&desc->request_mutex);
 
@@ -1571,9 +1577,6 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
 
        WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
 
-       if (!desc)
-               return NULL;
-
        mutex_lock(&desc->request_mutex);
        chip_bus_lock(desc);
        raw_spin_lock_irqsave(&desc->lock, flags);
@@ -1620,11 +1623,11 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
        /*
         * Drop bus_lock here so the changes which were done in the chip
         * callbacks above are synced out to the irq chips which hang
-        * behind a slow bus (I2C, SPI) before calling synchronize_irq().
+        * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
         *
         * Aside of that the bus_lock can also be taken from the threaded
         * handler in irq_finalize_oneshot() which results in a deadlock
-        * because synchronize_irq() would wait forever for the thread to
+        * because kthread_stop() would wait forever for the thread to
         * complete, which is blocked on the bus lock.
         *
         * The still held desc->request_mutex() protects against a
@@ -1636,7 +1639,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
        unregister_handler_proc(irq, action);
 
        /* Make sure it's not being used on another CPU: */
-       synchronize_irq(irq);
+       synchronize_hardirq(irq);
 
 #ifdef CONFIG_DEBUG_SHIRQ
        /*
@@ -1645,7 +1648,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
         * is so by doing an extra call to the handler ....
         *
         * ( We do this after actually deregistering it, to make sure that a
-        *   'real' IRQ doesn't run in parallel with our fake. )
+        *   'real' IRQ doesn't run in parallel with our fake. )
         */
        if (action->flags & IRQF_SHARED) {
                local_irq_save(flags);
@@ -1654,6 +1657,12 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
        }
 #endif
 
+       /*
+        * The action has already been removed above, but the thread writes
+        * its oneshot mask bit when it completes. Though request_mutex is
+        * held across this which prevents __setup_irq() from handing out
+        * the same bit to a newly requested action.
+        */
        if (action->thread) {
                kthread_stop(action->thread);
                put_task_struct(action->thread);
index 37eda10f5c362fb173f4280fe77fd24ebc8c195f..da9addb8d655719cbd67a59e5d8535670e28f885 100644 (file)
@@ -475,22 +475,24 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_putc(p, '\n');
        }
 
-       irq_lock_sparse();
+       rcu_read_lock();
        desc = irq_to_desc(i);
        if (!desc)
                goto outsparse;
 
-       raw_spin_lock_irqsave(&desc->lock, flags);
-       for_each_online_cpu(j)
-               any_count |= kstat_irqs_cpu(i, j);
-       action = desc->action;
-       if ((!action || irq_desc_is_chained(desc)) && !any_count)
-               goto out;
+       if (desc->kstat_irqs)
+               for_each_online_cpu(j)
+                       any_count |= *per_cpu_ptr(desc->kstat_irqs, j);
+
+       if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
+               goto outsparse;
 
        seq_printf(p, "%*d: ", prec, i);
        for_each_online_cpu(j)
-               seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
+               seq_printf(p, "%10u ", desc->kstat_irqs ?
+                                       *per_cpu_ptr(desc->kstat_irqs, j) : 0);
 
+       raw_spin_lock_irqsave(&desc->lock, flags);
        if (desc->irq_data.chip) {
                if (desc->irq_data.chip->irq_print_chip)
                        desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
@@ -511,6 +513,7 @@ int show_interrupts(struct seq_file *p, void *v)
        if (desc->name)
                seq_printf(p, "-%-8s", desc->name);
 
+       action = desc->action;
        if (action) {
                seq_printf(p, "  %s", action->name);
                while ((action = action->next) != NULL)
@@ -518,10 +521,9 @@ int show_interrupts(struct seq_file *p, void *v)
        }
 
        seq_putc(p, '\n');
-out:
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 outsparse:
-       irq_unlock_sparse();
+       rcu_read_unlock();
        return 0;
 }
 #endif
index 8402b3349dca40a53a82a34900165c1054ae2fff..57bef4fbfb31cb65788caaa2e8ee654378aa3246 100644 (file)
@@ -21,6 +21,9 @@
  *          Davidlohr Bueso <dave@stgolabs.net>
  *     Based on kernel/rcu/torture.c.
  */
+
+#define pr_fmt(fmt) fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/kthread.h>
@@ -57,7 +60,7 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
 torture_param(int, stat_interval, 60,
             "Number of seconds between stats printk()s");
 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
-torture_param(bool, verbose, true,
+torture_param(int, verbose, 1,
             "Enable verbose debugging printk()s");
 
 static char *torture_type = "spin_lock";
index 40cea6735c2df564655155597d410035309850bd..4d04683c31b2b20d011a4988e9d0e114878798c5 100644 (file)
@@ -91,7 +91,17 @@ static inline void rcu_seq_end(unsigned long *sp)
        WRITE_ONCE(*sp, rcu_seq_endval(sp));
 }
 
-/* Take a snapshot of the update side's sequence number. */
+/*
+ * rcu_seq_snap - Take a snapshot of the update side's sequence number.
+ *
+ * This function returns the earliest value of the grace-period sequence number
+ * that will indicate that a full grace period has elapsed since the current
+ * time.  Once the grace-period sequence number has reached this value, it will
+ * be safe to invoke all callbacks that have been registered prior to the
+ * current time. This value is the current grace-period number plus two to the
+ * power of the number of low-order bits reserved for state, then rounded up to
+ * the next value in which the state bits are all zero.
+ */
 static inline unsigned long rcu_seq_snap(unsigned long *sp)
 {
        unsigned long s;
@@ -107,6 +117,15 @@ static inline unsigned long rcu_seq_current(unsigned long *sp)
        return READ_ONCE(*sp);
 }
 
+/*
+ * Given a snapshot from rcu_seq_snap(), determine whether or not the
+ * corresponding update-side operation has started.
+ */
+static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
+{
+       return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
+}
+
 /*
  * Given a snapshot from rcu_seq_snap(), determine whether or not a
  * full update-side operation has occurred.
@@ -116,6 +135,45 @@ static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
        return ULONG_CMP_GE(READ_ONCE(*sp), s);
 }
 
+/*
+ * Has a grace period completed since the time the old gp_seq was collected?
+ */
+static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
+{
+       return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
+}
+
+/*
+ * Has a grace period started since the time the old gp_seq was collected?
+ */
+static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
+{
+       return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
+                           new);
+}
+
+/*
+ * Roughly how many full grace periods have elapsed between the collection
+ * of the two specified grace periods?
+ */
+static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
+{
+       unsigned long rnd_diff;
+
+       if (old == new)
+               return 0;
+       /*
+        * Compute the number of grace periods (still shifted up), plus
+        * one if either of new and old is not an exact grace period.
+        */
+       rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
+                  ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
+                  ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
+       if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
+               return 1; /* Definitely no grace period has elapsed. */
+       return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
+}
+
 /*
  * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
  * by call_rcu() and rcu callback execution, and are therefore not part of the
@@ -276,6 +334,9 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
 /* Is this rcu_node a leaf? */
 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
 
+/* Is this rcu_node the last leaf? */
+#define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1])
+
 /*
  * Do a full breadth-first scan of the rcu_node structures for the
  * specified rcu_state structure.
@@ -405,8 +466,7 @@ enum rcutorture_type {
 
 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
-                           unsigned long *gpnum, unsigned long *completed);
-void rcutorture_record_test_transition(void);
+                           unsigned long *gp_seq);
 void rcutorture_record_progress(unsigned long vernum);
 void do_trace_rcu_torture_read(const char *rcutorturename,
                               struct rcu_head *rhp,
@@ -415,15 +475,11 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
                               unsigned long c);
 #else
 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
-                                         int *flags,
-                                         unsigned long *gpnum,
-                                         unsigned long *completed)
+                                         int *flags, unsigned long *gp_seq)
 {
        *flags = 0;
-       *gpnum = 0;
-       *completed = 0;
+       *gp_seq = 0;
 }
-static inline void rcutorture_record_test_transition(void) { }
 static inline void rcutorture_record_progress(unsigned long vernum) { }
 #ifdef CONFIG_RCU_TRACE
 void do_trace_rcu_torture_read(const char *rcutorturename,
@@ -441,31 +497,26 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
 
 static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
                                           struct srcu_struct *sp, int *flags,
-                                          unsigned long *gpnum,
-                                          unsigned long *completed)
+                                          unsigned long *gp_seq)
 {
        if (test_type != SRCU_FLAVOR)
                return;
        *flags = 0;
-       *completed = sp->srcu_idx;
-       *gpnum = *completed;
+       *gp_seq = sp->srcu_idx;
 }
 
 #elif defined(CONFIG_TREE_SRCU)
 
 void srcutorture_get_gp_data(enum rcutorture_type test_type,
                             struct srcu_struct *sp, int *flags,
-                            unsigned long *gpnum, unsigned long *completed);
+                            unsigned long *gp_seq);
 
 #endif
 
 #ifdef CONFIG_TINY_RCU
-static inline unsigned long rcu_batches_started(void) { return 0; }
-static inline unsigned long rcu_batches_started_bh(void) { return 0; }
-static inline unsigned long rcu_batches_started_sched(void) { return 0; }
-static inline unsigned long rcu_batches_completed(void) { return 0; }
-static inline unsigned long rcu_batches_completed_bh(void) { return 0; }
-static inline unsigned long rcu_batches_completed_sched(void) { return 0; }
+static inline unsigned long rcu_get_gp_seq(void) { return 0; }
+static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; }
+static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; }
 static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
 static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
 static inline unsigned long
@@ -474,19 +525,16 @@ static inline void rcu_force_quiescent_state(void) { }
 static inline void rcu_bh_force_quiescent_state(void) { }
 static inline void rcu_sched_force_quiescent_state(void) { }
 static inline void show_rcu_gp_kthreads(void) { }
+static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
 #else /* #ifdef CONFIG_TINY_RCU */
-extern unsigned long rcutorture_testseq;
-extern unsigned long rcutorture_vernum;
-unsigned long rcu_batches_started(void);
-unsigned long rcu_batches_started_bh(void);
-unsigned long rcu_batches_started_sched(void);
-unsigned long rcu_batches_completed(void);
-unsigned long rcu_batches_completed_bh(void);
-unsigned long rcu_batches_completed_sched(void);
+unsigned long rcu_get_gp_seq(void);
+unsigned long rcu_bh_get_gp_seq(void);
+unsigned long rcu_sched_get_gp_seq(void);
 unsigned long rcu_exp_batches_completed(void);
 unsigned long rcu_exp_batches_completed_sched(void);
 unsigned long srcu_batches_completed(struct srcu_struct *sp);
 void show_rcu_gp_kthreads(void);
+int rcu_get_gp_kthreads_prio(void);
 void rcu_force_quiescent_state(void);
 void rcu_bh_force_quiescent_state(void);
 void rcu_sched_force_quiescent_state(void);
index e232846516b3b5e8ca22c3ff1aa5b0baea5d5ba3..34244523550e16e498754debe9917b7506c1196f 100644 (file)
@@ -19,6 +19,9 @@
  *
  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  */
+
+#define pr_fmt(fmt) fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -88,7 +91,7 @@ torture_param(int, nreaders, -1, "Number of RCU reader threads");
 torture_param(int, nwriters, -1, "Number of RCU updater threads");
 torture_param(bool, shutdown, !IS_ENABLED(MODULE),
              "Shutdown at end of performance tests.");
-torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
+torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
 
 static char *perf_type = "rcu";
@@ -135,8 +138,8 @@ struct rcu_perf_ops {
        void (*cleanup)(void);
        int (*readlock)(void);
        void (*readunlock)(int idx);
-       unsigned long (*started)(void);
-       unsigned long (*completed)(void);
+       unsigned long (*get_gp_seq)(void);
+       unsigned long (*gp_diff)(unsigned long new, unsigned long old);
        unsigned long (*exp_completed)(void);
        void (*async)(struct rcu_head *head, rcu_callback_t func);
        void (*gp_barrier)(void);
@@ -176,8 +179,8 @@ static struct rcu_perf_ops rcu_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = rcu_perf_read_lock,
        .readunlock     = rcu_perf_read_unlock,
-       .started        = rcu_batches_started,
-       .completed      = rcu_batches_completed,
+       .get_gp_seq     = rcu_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = rcu_exp_batches_completed,
        .async          = call_rcu,
        .gp_barrier     = rcu_barrier,
@@ -206,8 +209,8 @@ static struct rcu_perf_ops rcu_bh_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = rcu_bh_perf_read_lock,
        .readunlock     = rcu_bh_perf_read_unlock,
-       .started        = rcu_batches_started_bh,
-       .completed      = rcu_batches_completed_bh,
+       .get_gp_seq     = rcu_bh_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = rcu_exp_batches_completed_sched,
        .async          = call_rcu_bh,
        .gp_barrier     = rcu_barrier_bh,
@@ -263,8 +266,8 @@ static struct rcu_perf_ops srcu_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = srcu_perf_read_lock,
        .readunlock     = srcu_perf_read_unlock,
-       .started        = NULL,
-       .completed      = srcu_perf_completed,
+       .get_gp_seq     = srcu_perf_completed,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = srcu_perf_completed,
        .async          = srcu_call_rcu,
        .gp_barrier     = srcu_rcu_barrier,
@@ -292,8 +295,8 @@ static struct rcu_perf_ops srcud_ops = {
        .cleanup        = srcu_sync_perf_cleanup,
        .readlock       = srcu_perf_read_lock,
        .readunlock     = srcu_perf_read_unlock,
-       .started        = NULL,
-       .completed      = srcu_perf_completed,
+       .get_gp_seq     = srcu_perf_completed,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = srcu_perf_completed,
        .async          = srcu_call_rcu,
        .gp_barrier     = srcu_rcu_barrier,
@@ -322,8 +325,8 @@ static struct rcu_perf_ops sched_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = sched_perf_read_lock,
        .readunlock     = sched_perf_read_unlock,
-       .started        = rcu_batches_started_sched,
-       .completed      = rcu_batches_completed_sched,
+       .get_gp_seq     = rcu_sched_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .exp_completed  = rcu_exp_batches_completed_sched,
        .async          = call_rcu_sched,
        .gp_barrier     = rcu_barrier_sched,
@@ -350,8 +353,8 @@ static struct rcu_perf_ops tasks_ops = {
        .init           = rcu_sync_perf_init,
        .readlock       = tasks_perf_read_lock,
        .readunlock     = tasks_perf_read_unlock,
-       .started        = rcu_no_completed,
-       .completed      = rcu_no_completed,
+       .get_gp_seq     = rcu_no_completed,
+       .gp_diff        = rcu_seq_diff,
        .async          = call_rcu_tasks,
        .gp_barrier     = rcu_barrier_tasks,
        .sync           = synchronize_rcu_tasks,
@@ -359,9 +362,11 @@ static struct rcu_perf_ops tasks_ops = {
        .name           = "tasks"
 };
 
-static bool __maybe_unused torturing_tasks(void)
+static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
 {
-       return cur_ops == &tasks_ops;
+       if (!cur_ops->gp_diff)
+               return new - old;
+       return cur_ops->gp_diff(new, old);
 }
 
 /*
@@ -444,8 +449,7 @@ rcu_perf_writer(void *arg)
                        b_rcu_perf_writer_started =
                                cur_ops->exp_completed() / 2;
                } else {
-                       b_rcu_perf_writer_started =
-                               cur_ops->completed();
+                       b_rcu_perf_writer_started = cur_ops->get_gp_seq();
                }
        }
 
@@ -502,7 +506,7 @@ rcu_perf_writer(void *arg)
                                                cur_ops->exp_completed() / 2;
                                } else {
                                        b_rcu_perf_writer_finished =
-                                               cur_ops->completed();
+                                               cur_ops->get_gp_seq();
                                }
                                if (shutdown) {
                                        smp_mb(); /* Assign before wake. */
@@ -527,7 +531,7 @@ rcu_perf_writer(void *arg)
        return 0;
 }
 
-static inline void
+static void
 rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
 {
        pr_alert("%s" PERF_FLAG
@@ -582,8 +586,8 @@ rcu_perf_cleanup(void)
                         t_rcu_perf_writer_finished -
                         t_rcu_perf_writer_started,
                         ngps,
-                        b_rcu_perf_writer_finished -
-                        b_rcu_perf_writer_started);
+                        rcuperf_seq_diff(b_rcu_perf_writer_finished,
+                                         b_rcu_perf_writer_started));
                for (i = 0; i < nrealwriters; i++) {
                        if (!writer_durations)
                                break;
@@ -671,12 +675,11 @@ rcu_perf_init(void)
                        break;
        }
        if (i == ARRAY_SIZE(perf_ops)) {
-               pr_alert("rcu-perf: invalid perf type: \"%s\"\n",
-                        perf_type);
+               pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
                pr_alert("rcu-perf types:");
                for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
-                       pr_alert(" %s", perf_ops[i]->name);
-               pr_alert("\n");
+                       pr_cont(" %s", perf_ops[i]->name);
+               pr_cont("\n");
                firsterr = -EINVAL;
                goto unwind;
        }
index 42fcb7f05fac27dc6785fab9955db4f3cc2a8efc..c596c6f1e45717af99da5139d3c5078782deafbe 100644 (file)
@@ -22,6 +22,9 @@
  *
  * See also:  Documentation/RCU/torture.txt
  */
+
+#define pr_fmt(fmt) fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -52,6 +55,7 @@
 #include <linux/torture.h>
 #include <linux/vmalloc.h>
 #include <linux/sched/debug.h>
+#include <linux/sched/sysctl.h>
 
 #include "rcu.h"
 
@@ -59,6 +63,19 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
 
 
+/* Bits for ->extendables field, extendables param, and related definitions. */
+#define RCUTORTURE_RDR_SHIFT    8      /* Put SRCU index in upper bits. */
+#define RCUTORTURE_RDR_MASK     ((1 << RCUTORTURE_RDR_SHIFT) - 1)
+#define RCUTORTURE_RDR_BH       0x1    /* Extend readers by disabling bh. */
+#define RCUTORTURE_RDR_IRQ      0x2    /*  ... disabling interrupts. */
+#define RCUTORTURE_RDR_PREEMPT  0x4    /*  ... disabling preemption. */
+#define RCUTORTURE_RDR_RCU      0x8    /*  ... entering another RCU reader. */
+#define RCUTORTURE_RDR_NBITS    4      /* Number of bits defined above. */
+#define RCUTORTURE_MAX_EXTEND   (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \
+                                 RCUTORTURE_RDR_PREEMPT)
+#define RCUTORTURE_RDR_MAX_LOOPS 0x7   /* Maximum reader extensions. */
+                                       /* Must be power of two minus one. */
+
 torture_param(int, cbflood_inter_holdoff, HZ,
              "Holdoff between floods (jiffies)");
 torture_param(int, cbflood_intra_holdoff, 1,
@@ -66,6 +83,8 @@ torture_param(int, cbflood_intra_holdoff, 1,
 torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
 torture_param(int, cbflood_n_per_burst, 20000,
              "# callbacks per burst in flood");
+torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
+             "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
 torture_param(int, fqs_duration, 0,
              "Duration of fqs bursts (us), 0 to disable");
 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
@@ -84,7 +103,7 @@ torture_param(int, object_debug, 0,
             "Enable debug-object double call_rcu() testing");
 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
 torture_param(int, onoff_interval, 0,
-            "Time between CPU hotplugs (s), 0=disable");
+            "Time between CPU hotplugs (jiffies), 0=disable");
 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
@@ -101,7 +120,7 @@ torture_param(int, test_boost_interval, 7,
             "Interval between boost tests, seconds.");
 torture_param(bool, test_no_idle_hz, true,
             "Test support for tickless idle CPUs");
-torture_param(bool, verbose, true,
+torture_param(int, verbose, 1,
             "Enable verbose debugging printk()s");
 
 static char *torture_type = "rcu";
@@ -148,9 +167,9 @@ static long n_rcu_torture_boost_ktrerror;
 static long n_rcu_torture_boost_rterror;
 static long n_rcu_torture_boost_failure;
 static long n_rcu_torture_boosts;
-static long n_rcu_torture_timers;
+static atomic_long_t n_rcu_torture_timers;
 static long n_barrier_attempts;
-static long n_barrier_successes;
+static long n_barrier_successes; /* did rcu_barrier test succeed? */
 static atomic_long_t n_cbfloods;
 static struct list_head rcu_torture_removed;
 
@@ -261,8 +280,8 @@ struct rcu_torture_ops {
        int (*readlock)(void);
        void (*read_delay)(struct torture_random_state *rrsp);
        void (*readunlock)(int idx);
-       unsigned long (*started)(void);
-       unsigned long (*completed)(void);
+       unsigned long (*get_gp_seq)(void);
+       unsigned long (*gp_diff)(unsigned long new, unsigned long old);
        void (*deferred_free)(struct rcu_torture *p);
        void (*sync)(void);
        void (*exp_sync)(void);
@@ -274,6 +293,8 @@ struct rcu_torture_ops {
        void (*stats)(void);
        int irq_capable;
        int can_boost;
+       int extendables;
+       int ext_irq_conflict;
        const char *name;
 };
 
@@ -302,10 +323,10 @@ static void rcu_read_delay(struct torture_random_state *rrsp)
         * force_quiescent_state. */
 
        if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
-               started = cur_ops->completed();
+               started = cur_ops->get_gp_seq();
                ts = rcu_trace_clock_local();
                mdelay(longdelay_ms);
-               completed = cur_ops->completed();
+               completed = cur_ops->get_gp_seq();
                do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
                                          started, completed);
        }
@@ -397,8 +418,8 @@ static struct rcu_torture_ops rcu_ops = {
        .readlock       = rcu_torture_read_lock,
        .read_delay     = rcu_read_delay,
        .readunlock     = rcu_torture_read_unlock,
-       .started        = rcu_batches_started,
-       .completed      = rcu_batches_completed,
+       .get_gp_seq     = rcu_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .deferred_free  = rcu_torture_deferred_free,
        .sync           = synchronize_rcu,
        .exp_sync       = synchronize_rcu_expedited,
@@ -439,8 +460,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
        .readlock       = rcu_bh_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = rcu_bh_torture_read_unlock,
-       .started        = rcu_batches_started_bh,
-       .completed      = rcu_batches_completed_bh,
+       .get_gp_seq     = rcu_bh_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .deferred_free  = rcu_bh_torture_deferred_free,
        .sync           = synchronize_rcu_bh,
        .exp_sync       = synchronize_rcu_bh_expedited,
@@ -449,6 +470,8 @@ static struct rcu_torture_ops rcu_bh_ops = {
        .fqs            = rcu_bh_force_quiescent_state,
        .stats          = NULL,
        .irq_capable    = 1,
+       .extendables    = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ),
+       .ext_irq_conflict = RCUTORTURE_RDR_RCU,
        .name           = "rcu_bh"
 };
 
@@ -483,8 +506,7 @@ static struct rcu_torture_ops rcu_busted_ops = {
        .readlock       = rcu_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = rcu_torture_read_unlock,
-       .started        = rcu_no_completed,
-       .completed      = rcu_no_completed,
+       .get_gp_seq     = rcu_no_completed,
        .deferred_free  = rcu_busted_torture_deferred_free,
        .sync           = synchronize_rcu_busted,
        .exp_sync       = synchronize_rcu_busted,
@@ -572,8 +594,7 @@ static struct rcu_torture_ops srcu_ops = {
        .readlock       = srcu_torture_read_lock,
        .read_delay     = srcu_read_delay,
        .readunlock     = srcu_torture_read_unlock,
-       .started        = NULL,
-       .completed      = srcu_torture_completed,
+       .get_gp_seq     = srcu_torture_completed,
        .deferred_free  = srcu_torture_deferred_free,
        .sync           = srcu_torture_synchronize,
        .exp_sync       = srcu_torture_synchronize_expedited,
@@ -610,8 +631,7 @@ static struct rcu_torture_ops srcud_ops = {
        .readlock       = srcu_torture_read_lock,
        .read_delay     = srcu_read_delay,
        .readunlock     = srcu_torture_read_unlock,
-       .started        = NULL,
-       .completed      = srcu_torture_completed,
+       .get_gp_seq     = srcu_torture_completed,
        .deferred_free  = srcu_torture_deferred_free,
        .sync           = srcu_torture_synchronize,
        .exp_sync       = srcu_torture_synchronize_expedited,
@@ -622,6 +642,26 @@ static struct rcu_torture_ops srcud_ops = {
        .name           = "srcud"
 };
 
+/* As above, but broken due to inappropriate reader extension. */
+static struct rcu_torture_ops busted_srcud_ops = {
+       .ttype          = SRCU_FLAVOR,
+       .init           = srcu_torture_init,
+       .cleanup        = srcu_torture_cleanup,
+       .readlock       = srcu_torture_read_lock,
+       .read_delay     = rcu_read_delay,
+       .readunlock     = srcu_torture_read_unlock,
+       .get_gp_seq     = srcu_torture_completed,
+       .deferred_free  = srcu_torture_deferred_free,
+       .sync           = srcu_torture_synchronize,
+       .exp_sync       = srcu_torture_synchronize_expedited,
+       .call           = srcu_torture_call,
+       .cb_barrier     = srcu_torture_barrier,
+       .stats          = srcu_torture_stats,
+       .irq_capable    = 1,
+       .extendables    = RCUTORTURE_MAX_EXTEND,
+       .name           = "busted_srcud"
+};
+
 /*
  * Definitions for sched torture testing.
  */
@@ -648,8 +688,8 @@ static struct rcu_torture_ops sched_ops = {
        .readlock       = sched_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = sched_torture_read_unlock,
-       .started        = rcu_batches_started_sched,
-       .completed      = rcu_batches_completed_sched,
+       .get_gp_seq     = rcu_sched_get_gp_seq,
+       .gp_diff        = rcu_seq_diff,
        .deferred_free  = rcu_sched_torture_deferred_free,
        .sync           = synchronize_sched,
        .exp_sync       = synchronize_sched_expedited,
@@ -660,6 +700,7 @@ static struct rcu_torture_ops sched_ops = {
        .fqs            = rcu_sched_force_quiescent_state,
        .stats          = NULL,
        .irq_capable    = 1,
+       .extendables    = RCUTORTURE_MAX_EXTEND,
        .name           = "sched"
 };
 
@@ -687,8 +728,7 @@ static struct rcu_torture_ops tasks_ops = {
        .readlock       = tasks_torture_read_lock,
        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
        .readunlock     = tasks_torture_read_unlock,
-       .started        = rcu_no_completed,
-       .completed      = rcu_no_completed,
+       .get_gp_seq     = rcu_no_completed,
        .deferred_free  = rcu_tasks_torture_deferred_free,
        .sync           = synchronize_rcu_tasks,
        .exp_sync       = synchronize_rcu_tasks,
@@ -700,6 +740,13 @@ static struct rcu_torture_ops tasks_ops = {
        .name           = "tasks"
 };
 
+static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
+{
+       if (!cur_ops->gp_diff)
+               return new - old;
+       return cur_ops->gp_diff(new, old);
+}
+
 static bool __maybe_unused torturing_tasks(void)
 {
        return cur_ops == &tasks_ops;
@@ -726,6 +773,44 @@ static void rcu_torture_boost_cb(struct rcu_head *head)
        smp_store_release(&rbip->inflight, 0);
 }
 
+static int old_rt_runtime = -1;
+
+static void rcu_torture_disable_rt_throttle(void)
+{
+       /*
+        * Disable RT throttling so that rcutorture's boost threads don't get
+        * throttled. Only possible if rcutorture is built-in otherwise the
+        * user should manually do this by setting the sched_rt_period_us and
+        * sched_rt_runtime sysctls.
+        */
+       if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
+               return;
+
+       old_rt_runtime = sysctl_sched_rt_runtime;
+       sysctl_sched_rt_runtime = -1;
+}
+
+static void rcu_torture_enable_rt_throttle(void)
+{
+       if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
+               return;
+
+       sysctl_sched_rt_runtime = old_rt_runtime;
+       old_rt_runtime = -1;
+}
+
+static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
+{
+       if (end - start > test_boost_duration * HZ - HZ / 2) {
+               VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
+               n_rcu_torture_boost_failure++;
+
+               return true; /* failed */
+       }
+
+       return false; /* passed */
+}
+
 static int rcu_torture_boost(void *arg)
 {
        unsigned long call_rcu_time;
@@ -746,6 +831,21 @@ static int rcu_torture_boost(void *arg)
        init_rcu_head_on_stack(&rbi.rcu);
        /* Each pass through the following loop does one boost-test cycle. */
        do {
+               /* Track if the test failed already in this test interval? */
+               bool failed = false;
+
+               /* Increment n_rcu_torture_boosts once per boost-test */
+               while (!kthread_should_stop()) {
+                       if (mutex_trylock(&boost_mutex)) {
+                               n_rcu_torture_boosts++;
+                               mutex_unlock(&boost_mutex);
+                               break;
+                       }
+                       schedule_timeout_uninterruptible(1);
+               }
+               if (kthread_should_stop())
+                       goto checkwait;
+
                /* Wait for the next test interval. */
                oldstarttime = boost_starttime;
                while (ULONG_CMP_LT(jiffies, oldstarttime)) {
@@ -764,11 +864,10 @@ static int rcu_torture_boost(void *arg)
                                /* RCU core before ->inflight = 1. */
                                smp_store_release(&rbi.inflight, 1);
                                call_rcu(&rbi.rcu, rcu_torture_boost_cb);
-                               if (jiffies - call_rcu_time >
-                                        test_boost_duration * HZ - HZ / 2) {
-                                       VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
-                                       n_rcu_torture_boost_failure++;
-                               }
+                               /* Check if the boost test failed */
+                               failed = failed ||
+                                        rcu_torture_boost_failed(call_rcu_time,
+                                                                jiffies);
                                call_rcu_time = jiffies;
                        }
                        stutter_wait("rcu_torture_boost");
@@ -776,6 +875,14 @@ static int rcu_torture_boost(void *arg)
                                goto checkwait;
                }
 
+               /*
+                * If boost never happened, then inflight will always be 1, in
+                * this case the boost check would never happen in the above
+                * loop so do another one here.
+                */
+               if (!failed && smp_load_acquire(&rbi.inflight))
+                       rcu_torture_boost_failed(call_rcu_time, jiffies);
+
                /*
                 * Set the start time of the next test interval.
                 * Yes, this is vulnerable to long delays, but such
@@ -788,7 +895,6 @@ static int rcu_torture_boost(void *arg)
                        if (mutex_trylock(&boost_mutex)) {
                                boost_starttime = jiffies +
                                                  test_boost_interval * HZ;
-                               n_rcu_torture_boosts++;
                                mutex_unlock(&boost_mutex);
                                break;
                        }
@@ -1010,7 +1116,7 @@ rcu_torture_writer(void *arg)
                                break;
                        }
                }
-               rcutorture_record_progress(++rcu_torture_current_version);
+               rcu_torture_current_version++;
                /* Cycle through nesting levels of rcu_expedite_gp() calls. */
                if (can_expedite &&
                    !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
@@ -1084,27 +1190,133 @@ static void rcu_torture_timer_cb(struct rcu_head *rhp)
 }
 
 /*
- * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
- * incrementing the corresponding element of the pipeline array.  The
- * counter in the element should never be greater than 1, otherwise, the
- * RCU implementation is broken.
+ * Do one extension of an RCU read-side critical section using the
+ * current reader state in readstate (set to zero for initial entry
+ * to extended critical section), set the new state as specified by
+ * newstate (set to zero for final exit from extended critical section),
+ * and random-number-generator state in trsp.  If this is neither the
+ * beginning or end of the critical section and if there was actually a
+ * change, do a ->read_delay().
  */
-static void rcu_torture_timer(struct timer_list *unused)
+static void rcutorture_one_extend(int *readstate, int newstate,
+                                 struct torture_random_state *trsp)
+{
+       int idxnew = -1;
+       int idxold = *readstate;
+       int statesnew = ~*readstate & newstate;
+       int statesold = *readstate & ~newstate;
+
+       WARN_ON_ONCE(idxold < 0);
+       WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
+
+       /* First, put new protection in place to avoid critical-section gap. */
+       if (statesnew & RCUTORTURE_RDR_BH)
+               local_bh_disable();
+       if (statesnew & RCUTORTURE_RDR_IRQ)
+               local_irq_disable();
+       if (statesnew & RCUTORTURE_RDR_PREEMPT)
+               preempt_disable();
+       if (statesnew & RCUTORTURE_RDR_RCU)
+               idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
+
+       /* Next, remove old protection, irq first due to bh conflict. */
+       if (statesold & RCUTORTURE_RDR_IRQ)
+               local_irq_enable();
+       if (statesold & RCUTORTURE_RDR_BH)
+               local_bh_enable();
+       if (statesold & RCUTORTURE_RDR_PREEMPT)
+               preempt_enable();
+       if (statesold & RCUTORTURE_RDR_RCU)
+               cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
+
+       /* Delay if neither beginning nor end and there was a change. */
+       if ((statesnew || statesold) && *readstate && newstate)
+               cur_ops->read_delay(trsp);
+
+       /* Update the reader state. */
+       if (idxnew == -1)
+               idxnew = idxold & ~RCUTORTURE_RDR_MASK;
+       WARN_ON_ONCE(idxnew < 0);
+       WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
+       *readstate = idxnew | newstate;
+       WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
+       WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
+}
+
+/* Return the biggest extendables mask given current RCU and boot parameters. */
+static int rcutorture_extend_mask_max(void)
+{
+       int mask;
+
+       WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
+       mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
+       mask = mask | RCUTORTURE_RDR_RCU;
+       return mask;
+}
+
+/* Return a random protection state mask, but with at least one bit set. */
+static int
+rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
+{
+       int mask = rcutorture_extend_mask_max();
+       unsigned long randmask1 = torture_random(trsp) >> 8;
+       unsigned long randmask2 = randmask1 >> 1;
+
+       WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
+       /* Half the time lots of bits, half the time only one bit. */
+       if (randmask1 & 0x1)
+               mask = mask & randmask2;
+       else
+               mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
+       if ((mask & RCUTORTURE_RDR_IRQ) &&
+           !(mask & RCUTORTURE_RDR_BH) &&
+           (oldmask & RCUTORTURE_RDR_BH))
+               mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */
+       if ((mask & RCUTORTURE_RDR_IRQ) &&
+           !(mask & cur_ops->ext_irq_conflict) &&
+           (oldmask & cur_ops->ext_irq_conflict))
+               mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */
+       return mask ?: RCUTORTURE_RDR_RCU;
+}
+
+/*
+ * Do a randomly selected number of extensions of an existing RCU read-side
+ * critical section.
+ */
+static void rcutorture_loop_extend(int *readstate,
+                                  struct torture_random_state *trsp)
+{
+       int i;
+       int mask = rcutorture_extend_mask_max();
+
+       WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
+       if (!((mask - 1) & mask))
+               return;  /* Current RCU flavor not extendable. */
+       i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS;
+       while (i--) {
+               mask = rcutorture_extend_mask(*readstate, trsp);
+               rcutorture_one_extend(readstate, mask, trsp);
+       }
+}
+
+/*
+ * Do one read-side critical section, returning false if there was
+ * no data to read.  Can be invoked both from process context and
+ * from a timer handler.
+ */
+static bool rcu_torture_one_read(struct torture_random_state *trsp)
 {
-       int idx;
        unsigned long started;
        unsigned long completed;
-       static DEFINE_TORTURE_RANDOM(rand);
-       static DEFINE_SPINLOCK(rand_lock);
+       int newstate;
        struct rcu_torture *p;
        int pipe_count;
+       int readstate = 0;
        unsigned long long ts;
 
-       idx = cur_ops->readlock();
-       if (cur_ops->started)
-               started = cur_ops->started();
-       else
-               started = cur_ops->completed();
+       newstate = rcutorture_extend_mask(readstate, trsp);
+       rcutorture_one_extend(&readstate, newstate, trsp);
+       started = cur_ops->get_gp_seq();
        ts = rcu_trace_clock_local();
        p = rcu_dereference_check(rcu_torture_current,
                                  rcu_read_lock_bh_held() ||
@@ -1112,39 +1324,50 @@ static void rcu_torture_timer(struct timer_list *unused)
                                  srcu_read_lock_held(srcu_ctlp) ||
                                  torturing_tasks());
        if (p == NULL) {
-               /* Leave because rcu_torture_writer is not yet underway */
-               cur_ops->readunlock(idx);
-               return;
+               /* Wait for rcu_torture_writer to get underway */
+               rcutorture_one_extend(&readstate, 0, trsp);
+               return false;
        }
        if (p->rtort_mbtest == 0)
                atomic_inc(&n_rcu_torture_mberror);
-       spin_lock(&rand_lock);
-       cur_ops->read_delay(&rand);
-       n_rcu_torture_timers++;
-       spin_unlock(&rand_lock);
+       rcutorture_loop_extend(&readstate, trsp);
        preempt_disable();
        pipe_count = p->rtort_pipe_count;
        if (pipe_count > RCU_TORTURE_PIPE_LEN) {
                /* Should not happen, but... */
                pipe_count = RCU_TORTURE_PIPE_LEN;
        }
-       completed = cur_ops->completed();
+       completed = cur_ops->get_gp_seq();
        if (pipe_count > 1) {
-               do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
-                                         started, completed);
+               do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
+                                         ts, started, completed);
                rcu_ftrace_dump(DUMP_ALL);
        }
        __this_cpu_inc(rcu_torture_count[pipe_count]);
-       completed = completed - started;
-       if (cur_ops->started)
-               completed++;
+       completed = rcutorture_seq_diff(completed, started);
        if (completed > RCU_TORTURE_PIPE_LEN) {
                /* Should not happen, but... */
                completed = RCU_TORTURE_PIPE_LEN;
        }
        __this_cpu_inc(rcu_torture_batch[completed]);
        preempt_enable();
-       cur_ops->readunlock(idx);
+       rcutorture_one_extend(&readstate, 0, trsp);
+       WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
+       return true;
+}
+
+static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
+
+/*
+ * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
+ * incrementing the corresponding element of the pipeline array.  The
+ * counter in the element should never be greater than 1, otherwise, the
+ * RCU implementation is broken.
+ */
+static void rcu_torture_timer(struct timer_list *unused)
+{
+       atomic_long_inc(&n_rcu_torture_timers);
+       (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
 
        /* Test call_rcu() invocation from interrupt handler. */
        if (cur_ops->call) {
@@ -1164,14 +1387,8 @@ static void rcu_torture_timer(struct timer_list *unused)
 static int
 rcu_torture_reader(void *arg)
 {
-       unsigned long started;
-       unsigned long completed;
-       int idx;
        DEFINE_TORTURE_RANDOM(rand);
-       struct rcu_torture *p;
-       int pipe_count;
        struct timer_list t;
-       unsigned long long ts;
 
        VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
        set_user_nice(current, MAX_NICE);
@@ -1183,49 +1400,8 @@ rcu_torture_reader(void *arg)
                        if (!timer_pending(&t))
                                mod_timer(&t, jiffies + 1);
                }
-               idx = cur_ops->readlock();
-               if (cur_ops->started)
-                       started = cur_ops->started();
-               else
-                       started = cur_ops->completed();
-               ts = rcu_trace_clock_local();
-               p = rcu_dereference_check(rcu_torture_current,
-                                         rcu_read_lock_bh_held() ||
-                                         rcu_read_lock_sched_held() ||
-                                         srcu_read_lock_held(srcu_ctlp) ||
-                                         torturing_tasks());
-               if (p == NULL) {
-                       /* Wait for rcu_torture_writer to get underway */
-                       cur_ops->readunlock(idx);
+               if (!rcu_torture_one_read(&rand))
                        schedule_timeout_interruptible(HZ);
-                       continue;
-               }
-               if (p->rtort_mbtest == 0)
-                       atomic_inc(&n_rcu_torture_mberror);
-               cur_ops->read_delay(&rand);
-               preempt_disable();
-               pipe_count = p->rtort_pipe_count;
-               if (pipe_count > RCU_TORTURE_PIPE_LEN) {
-                       /* Should not happen, but... */
-                       pipe_count = RCU_TORTURE_PIPE_LEN;
-               }
-               completed = cur_ops->completed();
-               if (pipe_count > 1) {
-                       do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
-                                                 ts, started, completed);
-                       rcu_ftrace_dump(DUMP_ALL);
-               }
-               __this_cpu_inc(rcu_torture_count[pipe_count]);
-               completed = completed - started;
-               if (cur_ops->started)
-                       completed++;
-               if (completed > RCU_TORTURE_PIPE_LEN) {
-                       /* Should not happen, but... */
-                       completed = RCU_TORTURE_PIPE_LEN;
-               }
-               __this_cpu_inc(rcu_torture_batch[completed]);
-               preempt_enable();
-               cur_ops->readunlock(idx);
                stutter_wait("rcu_torture_reader");
        } while (!torture_must_stop());
        if (irqreader && cur_ops->irq_capable) {
@@ -1282,7 +1458,7 @@ rcu_torture_stats_print(void)
        pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
                n_rcu_torture_boost_failure,
                n_rcu_torture_boosts,
-               n_rcu_torture_timers);
+               atomic_long_read(&n_rcu_torture_timers));
        torture_onoff_stats();
        pr_cont("barrier: %ld/%ld:%ld ",
                n_barrier_successes,
@@ -1324,18 +1500,16 @@ rcu_torture_stats_print(void)
        if (rtcv_snap == rcu_torture_current_version &&
            rcu_torture_current != NULL) {
                int __maybe_unused flags = 0;
-               unsigned long __maybe_unused gpnum = 0;
-               unsigned long __maybe_unused completed = 0;
+               unsigned long __maybe_unused gp_seq = 0;
 
                rcutorture_get_gp_data(cur_ops->ttype,
-                                      &flags, &gpnum, &completed);
+                                      &flags, &gp_seq);
                srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
-                                       &flags, &gpnum, &completed);
+                                       &flags, &gp_seq);
                wtp = READ_ONCE(writer_task);
-               pr_alert("??? Writer stall state %s(%d) g%lu c%lu f%#x ->state %#lx cpu %d\n",
+               pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
                         rcu_torture_writer_state_getname(),
-                        rcu_torture_writer_state,
-                        gpnum, completed, flags,
+                        rcu_torture_writer_state, gp_seq, flags,
                         wtp == NULL ? ~0UL : wtp->state,
                         wtp == NULL ? -1 : (int)task_cpu(wtp));
                if (!splatted && wtp) {
@@ -1365,7 +1539,7 @@ rcu_torture_stats(void *arg)
        return 0;
 }
 
-static inline void
+static void
 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
 {
        pr_alert("%s" TORTURE_FLAG
@@ -1397,6 +1571,7 @@ static int rcutorture_booster_cleanup(unsigned int cpu)
        mutex_lock(&boost_mutex);
        t = boost_tasks[cpu];
        boost_tasks[cpu] = NULL;
+       rcu_torture_enable_rt_throttle();
        mutex_unlock(&boost_mutex);
 
        /* This must be outside of the mutex, otherwise deadlock! */
@@ -1413,6 +1588,7 @@ static int rcutorture_booster_init(unsigned int cpu)
 
        /* Don't allow time recalculation while creating a new task. */
        mutex_lock(&boost_mutex);
+       rcu_torture_disable_rt_throttle();
        VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
        boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
                                                  cpu_to_node(cpu),
@@ -1446,7 +1622,7 @@ static int rcu_torture_stall(void *args)
                VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
        }
        if (!kthread_should_stop()) {
-               stop_at = get_seconds() + stall_cpu;
+               stop_at = ktime_get_seconds() + stall_cpu;
                /* RCU CPU stall is expected behavior in following code. */
                rcu_read_lock();
                if (stall_cpu_irqsoff)
@@ -1455,7 +1631,8 @@ static int rcu_torture_stall(void *args)
                        preempt_disable();
                pr_alert("rcu_torture_stall start on CPU %d.\n",
                         smp_processor_id());
-               while (ULONG_CMP_LT(get_seconds(), stop_at))
+               while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
+                                   stop_at))
                        continue;  /* Induce RCU CPU stall warning. */
                if (stall_cpu_irqsoff)
                        local_irq_enable();
@@ -1546,8 +1723,9 @@ static int rcu_torture_barrier(void *arg)
                               atomic_read(&barrier_cbs_invoked),
                               n_barrier_cbs);
                        WARN_ON_ONCE(1);
+               } else {
+                       n_barrier_successes++;
                }
-               n_barrier_successes++;
                schedule_timeout_interruptible(HZ / 10);
        } while (!torture_must_stop());
        torture_kthread_stopping("rcu_torture_barrier");
@@ -1610,17 +1788,39 @@ static void rcu_torture_barrier_cleanup(void)
        }
 }
 
+static bool rcu_torture_can_boost(void)
+{
+       static int boost_warn_once;
+       int prio;
+
+       if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
+               return false;
+
+       prio = rcu_get_gp_kthreads_prio();
+       if (!prio)
+               return false;
+
+       if (prio < 2) {
+               if (boost_warn_once  == 1)
+                       return false;
+
+               pr_alert("%s: WARN: RCU kthread priority too low to test boosting.  Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
+               boost_warn_once = 1;
+               return false;
+       }
+
+       return true;
+}
+
 static enum cpuhp_state rcutor_hp;
 
 static void
 rcu_torture_cleanup(void)
 {
        int flags = 0;
-       unsigned long gpnum = 0;
-       unsigned long completed = 0;
+       unsigned long gp_seq = 0;
        int i;
 
-       rcutorture_record_test_transition();
        if (torture_cleanup_begin()) {
                if (cur_ops->cb_barrier != NULL)
                        cur_ops->cb_barrier();
@@ -1648,17 +1848,15 @@ rcu_torture_cleanup(void)
                fakewriter_tasks = NULL;
        }
 
-       rcutorture_get_gp_data(cur_ops->ttype, &flags, &gpnum, &completed);
-       srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
-                               &flags, &gpnum, &completed);
-       pr_alert("%s:  End-test grace-period state: g%lu c%lu f%#x\n",
-                cur_ops->name, gpnum, completed, flags);
+       rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
+       srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
+       pr_alert("%s:  End-test grace-period state: g%lu f%#x\n",
+                cur_ops->name, gp_seq, flags);
        torture_stop_kthread(rcu_torture_stats, stats_task);
        torture_stop_kthread(rcu_torture_fqs, fqs_task);
        for (i = 0; i < ncbflooders; i++)
                torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
-       if ((test_boost == 1 && cur_ops->can_boost) ||
-           test_boost == 2)
+       if (rcu_torture_can_boost())
                cpuhp_remove_state(rcutor_hp);
 
        /*
@@ -1746,7 +1944,7 @@ rcu_torture_init(void)
        int firsterr = 0;
        static struct rcu_torture_ops *torture_ops[] = {
                &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
-               &sched_ops, &tasks_ops,
+               &busted_srcud_ops, &sched_ops, &tasks_ops,
        };
 
        if (!torture_init_begin(torture_type, verbose))
@@ -1763,8 +1961,8 @@ rcu_torture_init(void)
                         torture_type);
                pr_alert("rcu-torture types:");
                for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
-                       pr_alert(" %s", torture_ops[i]->name);
-               pr_alert("\n");
+                       pr_cont(" %s", torture_ops[i]->name);
+               pr_cont("\n");
                firsterr = -EINVAL;
                goto unwind;
        }
@@ -1882,8 +2080,7 @@ rcu_torture_init(void)
                test_boost_interval = 1;
        if (test_boost_duration < 2)
                test_boost_duration = 2;
-       if ((test_boost == 1 && cur_ops->can_boost) ||
-           test_boost == 2) {
+       if (rcu_torture_can_boost()) {
 
                boost_starttime = jiffies + test_boost_interval * HZ;
 
@@ -1897,7 +2094,7 @@ rcu_torture_init(void)
        firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
        if (firsterr)
                goto unwind;
-       firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ);
+       firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
        if (firsterr)
                goto unwind;
        firsterr = rcu_torture_stall_init();
@@ -1926,7 +2123,6 @@ rcu_torture_init(void)
                                goto unwind;
                }
        }
-       rcutorture_record_test_transition();
        torture_init_end();
        return 0;
 
index b4123d7a2cec4f9010178d9b0049c2e43bbf833b..6c9866a854b111b2ee5245ef2b6ca18af994b4c5 100644 (file)
@@ -26,6 +26,8 @@
  *
  */
 
+#define pr_fmt(fmt) "rcu: " fmt
+
 #include <linux/export.h>
 #include <linux/mutex.h>
 #include <linux/percpu.h>
@@ -390,7 +392,8 @@ void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
                }
        if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
            WARN_ON(srcu_readers_active(sp))) {
-               pr_info("%s: Active srcu_struct %p state: %d\n", __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
+               pr_info("%s: Active srcu_struct %p state: %d\n",
+                       __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
                return; /* Caller forgot to stop doing call_srcu()? */
        }
        free_percpu(sp->sda);
@@ -641,6 +644,9 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
  * period s.  Losers must either ensure that their desired grace-period
  * number is recorded on at least their leaf srcu_node structure, or they
  * must take steps to invoke their own callbacks.
+ *
+ * Note that this function also does the work of srcu_funnel_exp_start(),
+ * in some cases by directly invoking it.
  */
 static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
                                 unsigned long s, bool do_norm)
@@ -823,17 +829,17 @@ static void srcu_leak_callback(struct rcu_head *rhp)
  * more than one CPU, this means that when "func()" is invoked, each CPU
  * is guaranteed to have executed a full memory barrier since the end of
  * its last corresponding SRCU read-side critical section whose beginning
- * preceded the call to call_rcu().  It also means that each CPU executing
+ * preceded the call to call_srcu().  It also means that each CPU executing
  * an SRCU read-side critical section that continues beyond the start of
- * "func()" must have executed a memory barrier after the call_rcu()
+ * "func()" must have executed a memory barrier after the call_srcu()
  * but before the beginning of that SRCU read-side critical section.
  * Note that these guarantees include CPUs that are offline, idle, or
  * executing in user mode, as well as CPUs that are executing in the kernel.
  *
- * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
+ * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
  * resulting SRCU callback function "func()", then both CPU A and CPU
  * B are guaranteed to execute a full memory barrier during the time
- * interval between the call to call_rcu() and the invocation of "func()".
+ * interval between the call to call_srcu() and the invocation of "func()".
  * This guarantee applies even if CPU A and CPU B are the same CPU (but
  * again only if the system has more than one CPU).
  *
@@ -1246,13 +1252,12 @@ static void process_srcu(struct work_struct *work)
 
 void srcutorture_get_gp_data(enum rcutorture_type test_type,
                             struct srcu_struct *sp, int *flags,
-                            unsigned long *gpnum, unsigned long *completed)
+                            unsigned long *gp_seq)
 {
        if (test_type != SRCU_FLAVOR)
                return;
        *flags = 0;
-       *completed = rcu_seq_ctr(sp->srcu_gp_seq);
-       *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
+       *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
 }
 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
 
@@ -1263,16 +1268,17 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
        unsigned long s0 = 0, s1 = 0;
 
        idx = sp->srcu_idx & 0x1;
-       pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt, tf, idx);
+       pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
+                tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
        for_each_possible_cpu(cpu) {
                unsigned long l0, l1;
                unsigned long u0, u1;
                long c0, c1;
-               struct srcu_data *counts;
+               struct srcu_data *sdp;
 
-               counts = per_cpu_ptr(sp->sda, cpu);
-               u0 = counts->srcu_unlock_count[!idx];
-               u1 = counts->srcu_unlock_count[idx];
+               sdp = per_cpu_ptr(sp->sda, cpu);
+               u0 = sdp->srcu_unlock_count[!idx];
+               u1 = sdp->srcu_unlock_count[idx];
 
                /*
                 * Make sure that a lock is always counted if the corresponding
@@ -1280,12 +1286,13 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
                 */
                smp_rmb();
 
-               l0 = counts->srcu_lock_count[!idx];
-               l1 = counts->srcu_lock_count[idx];
+               l0 = sdp->srcu_lock_count[!idx];
+               l1 = sdp->srcu_lock_count[idx];
 
                c0 = l0 - u0;
                c1 = l1 - u1;
-               pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
+               pr_cont(" %d(%ld,%ld %1p)",
+                       cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
                s0 += c0;
                s1 += c1;
        }
index a64eee0db39e3642c33e69c55535619e363c0b55..befc9321a89c22cfbf34d3e8a34e56685ff2801a 100644 (file)
@@ -122,10 +122,8 @@ void rcu_check_callbacks(int user)
 {
        if (user)
                rcu_sched_qs();
-       else if (!in_softirq())
+       if (user || !in_softirq())
                rcu_bh_qs();
-       if (user)
-               rcu_note_voluntary_context_switch(current);
 }
 
 /*
index aa7cade1b9f399abcac792e08a97114305a241ad..6930934e8b9f8b20d5961d87aefd7ea2d299822f 100644 (file)
@@ -27,6 +27,9 @@
  * For detailed explanation of Read-Copy Update mechanism see -
  *     Documentation/RCU
  */
+
+#define pr_fmt(fmt) "rcu: " fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -95,13 +98,13 @@ struct rcu_state sname##_state = { \
        .rda = &sname##_data, \
        .call = cr, \
        .gp_state = RCU_GP_IDLE, \
-       .gpnum = 0UL - 300UL, \
-       .completed = 0UL - 300UL, \
+       .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT, \
        .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
        .name = RCU_STATE_NAME(sname), \
        .abbr = sabbr, \
        .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
        .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
+       .ofl_lock = __SPIN_LOCK_UNLOCKED(sname##_state.ofl_lock), \
 }
 
 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
@@ -155,6 +158,9 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  */
 static int rcu_scheduler_fully_active __read_mostly;
 
+static void
+rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
+                 struct rcu_node *rnp, unsigned long gps, unsigned long flags);
 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
@@ -177,6 +183,13 @@ module_param(gp_init_delay, int, 0444);
 static int gp_cleanup_delay;
 module_param(gp_cleanup_delay, int, 0444);
 
+/* Retreive RCU kthreads priority for rcutorture */
+int rcu_get_gp_kthreads_prio(void)
+{
+       return kthread_prio;
+}
+EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
+
 /*
  * Number of grace periods between delays, normalized by the duration of
  * the delay.  The longer the delay, the more the grace periods between
@@ -188,18 +201,6 @@ module_param(gp_cleanup_delay, int, 0444);
  */
 #define PER_RCU_NODE_PERIOD 3  /* Number of grace periods between delays. */
 
-/*
- * Track the rcutorture test sequence number and the update version
- * number within a given test.  The rcutorture_testseq is incremented
- * on every rcutorture module load and unload, so has an odd value
- * when a test is running.  The rcutorture_vernum is set to zero
- * when rcutorture starts and is incremented on each rcutorture update.
- * These variables enable correlating rcutorture output with the
- * RCU tracing information.
- */
-unsigned long rcutorture_testseq;
-unsigned long rcutorture_vernum;
-
 /*
  * Compute the mask of online CPUs for the specified rcu_node structure.
  * This will not be stable unless the rcu_node structure's ->lock is
@@ -218,7 +219,7 @@ unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
  */
 static int rcu_gp_in_progress(struct rcu_state *rsp)
 {
-       return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
+       return rcu_seq_state(rcu_seq_current(&rsp->gp_seq));
 }
 
 /*
@@ -233,7 +234,7 @@ void rcu_sched_qs(void)
        if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
                return;
        trace_rcu_grace_period(TPS("rcu_sched"),
-                              __this_cpu_read(rcu_sched_data.gpnum),
+                              __this_cpu_read(rcu_sched_data.gp_seq),
                               TPS("cpuqs"));
        __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
        if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
@@ -248,7 +249,7 @@ void rcu_bh_qs(void)
        RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
        if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
                trace_rcu_grace_period(TPS("rcu_bh"),
-                                      __this_cpu_read(rcu_bh_data.gpnum),
+                                      __this_cpu_read(rcu_bh_data.gp_seq),
                                       TPS("cpuqs"));
                __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
        }
@@ -379,20 +380,6 @@ static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
        return snap != rcu_dynticks_snap(rdtp);
 }
 
-/*
- * Do a double-increment of the ->dynticks counter to emulate a
- * momentary idle-CPU quiescent state.
- */
-static void rcu_dynticks_momentary_idle(void)
-{
-       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
-       int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
-                                       &rdtp->dynticks);
-
-       /* It is illegal to call this from idle state. */
-       WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
-}
-
 /*
  * Set the special (bottom) bit of the specified CPU so that it
  * will take special action (such as flushing its TLB) on the
@@ -424,12 +411,17 @@ bool rcu_eqs_special_set(int cpu)
  *
  * We inform the RCU core by emulating a zero-duration dyntick-idle period.
  *
- * The caller must have disabled interrupts.
+ * The caller must have disabled interrupts and must not be idle.
  */
 static void rcu_momentary_dyntick_idle(void)
 {
+       struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+       int special;
+
        raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
-       rcu_dynticks_momentary_idle();
+       special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
+       /* It is illegal to call this from idle state. */
+       WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
 }
 
 /*
@@ -451,7 +443,7 @@ void rcu_note_context_switch(bool preempt)
                rcu_momentary_dyntick_idle();
        this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
        if (!preempt)
-               rcu_note_voluntary_context_switch_lite(current);
+               rcu_tasks_qs(current);
 out:
        trace_rcu_utilization(TPS("End context switch"));
        barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -513,8 +505,38 @@ static ulong jiffies_till_first_fqs = ULONG_MAX;
 static ulong jiffies_till_next_fqs = ULONG_MAX;
 static bool rcu_kick_kthreads;
 
-module_param(jiffies_till_first_fqs, ulong, 0644);
-module_param(jiffies_till_next_fqs, ulong, 0644);
+static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
+{
+       ulong j;
+       int ret = kstrtoul(val, 0, &j);
+
+       if (!ret)
+               WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
+       return ret;
+}
+
+static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
+{
+       ulong j;
+       int ret = kstrtoul(val, 0, &j);
+
+       if (!ret)
+               WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
+       return ret;
+}
+
+static struct kernel_param_ops first_fqs_jiffies_ops = {
+       .set = param_set_first_fqs_jiffies,
+       .get = param_get_ulong,
+};
+
+static struct kernel_param_ops next_fqs_jiffies_ops = {
+       .set = param_set_next_fqs_jiffies,
+       .get = param_get_ulong,
+};
+
+module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
+module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
 module_param(rcu_kick_kthreads, bool, 0644);
 
 /*
@@ -529,58 +551,31 @@ static void force_quiescent_state(struct rcu_state *rsp);
 static int rcu_pending(void);
 
 /*
- * Return the number of RCU batches started thus far for debug & stats.
+ * Return the number of RCU GPs completed thus far for debug & stats.
  */
-unsigned long rcu_batches_started(void)
+unsigned long rcu_get_gp_seq(void)
 {
-       return rcu_state_p->gpnum;
+       return READ_ONCE(rcu_state_p->gp_seq);
 }
-EXPORT_SYMBOL_GPL(rcu_batches_started);
+EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
 
 /*
- * Return the number of RCU-sched batches started thus far for debug & stats.
+ * Return the number of RCU-sched GPs completed thus far for debug & stats.
  */
-unsigned long rcu_batches_started_sched(void)
+unsigned long rcu_sched_get_gp_seq(void)
 {
-       return rcu_sched_state.gpnum;
+       return READ_ONCE(rcu_sched_state.gp_seq);
 }
-EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
+EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
 
 /*
- * Return the number of RCU BH batches started thus far for debug & stats.
+ * Return the number of RCU-bh GPs completed thus far for debug & stats.
  */
-unsigned long rcu_batches_started_bh(void)
+unsigned long rcu_bh_get_gp_seq(void)
 {
-       return rcu_bh_state.gpnum;
+       return READ_ONCE(rcu_bh_state.gp_seq);
 }
-EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
-
-/*
- * Return the number of RCU batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed(void)
-{
-       return rcu_state_p->completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed);
-
-/*
- * Return the number of RCU-sched batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed_sched(void)
-{
-       return rcu_sched_state.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
-
-/*
- * Return the number of RCU BH batches completed thus far for debug & stats.
- */
-unsigned long rcu_batches_completed_bh(void)
-{
-       return rcu_bh_state.completed;
-}
-EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
 
 /*
  * Return the number of RCU expedited batches completed thus far for
@@ -636,35 +631,42 @@ EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
  */
 void show_rcu_gp_kthreads(void)
 {
+       int cpu;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
        struct rcu_state *rsp;
 
        for_each_rcu_flavor(rsp) {
                pr_info("%s: wait state: %d ->state: %#lx\n",
                        rsp->name, rsp->gp_state, rsp->gp_kthread->state);
+               rcu_for_each_node_breadth_first(rsp, rnp) {
+                       if (ULONG_CMP_GE(rsp->gp_seq, rnp->gp_seq_needed))
+                               continue;
+                       pr_info("\trcu_node %d:%d ->gp_seq %lu ->gp_seq_needed %lu\n",
+                               rnp->grplo, rnp->grphi, rnp->gp_seq,
+                               rnp->gp_seq_needed);
+                       if (!rcu_is_leaf_node(rnp))
+                               continue;
+                       for_each_leaf_node_possible_cpu(rnp, cpu) {
+                               rdp = per_cpu_ptr(rsp->rda, cpu);
+                               if (rdp->gpwrap ||
+                                   ULONG_CMP_GE(rsp->gp_seq,
+                                                rdp->gp_seq_needed))
+                                       continue;
+                               pr_info("\tcpu %d ->gp_seq_needed %lu\n",
+                                       cpu, rdp->gp_seq_needed);
+                       }
+               }
                /* sched_show_task(rsp->gp_kthread); */
        }
 }
 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
 
-/*
- * Record the number of times rcutorture tests have been initiated and
- * terminated.  This information allows the debugfs tracing stats to be
- * correlated to the rcutorture messages, even when the rcutorture module
- * is being repeatedly loaded and unloaded.  In other words, we cannot
- * store this state in rcutorture itself.
- */
-void rcutorture_record_test_transition(void)
-{
-       rcutorture_testseq++;
-       rcutorture_vernum = 0;
-}
-EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
-
 /*
  * Send along grace-period-related data for rcutorture diagnostics.
  */
 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
-                           unsigned long *gpnum, unsigned long *completed)
+                           unsigned long *gp_seq)
 {
        struct rcu_state *rsp = NULL;
 
@@ -684,22 +686,10 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
        if (rsp == NULL)
                return;
        *flags = READ_ONCE(rsp->gp_flags);
-       *gpnum = READ_ONCE(rsp->gpnum);
-       *completed = READ_ONCE(rsp->completed);
+       *gp_seq = rcu_seq_current(&rsp->gp_seq);
 }
 EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
 
-/*
- * Record the number of writer passes through the current rcutorture test.
- * This is also used to correlate debugfs tracing stats with the rcutorture
- * messages.
- */
-void rcutorture_record_progress(unsigned long vernum)
-{
-       rcutorture_vernum++;
-}
-EXPORT_SYMBOL_GPL(rcutorture_record_progress);
-
 /*
  * Return the root node of the specified rcu_state structure.
  */
@@ -1059,41 +1049,41 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
 
 /*
- * Is the current CPU online?  Disable preemption to avoid false positives
- * that could otherwise happen due to the current CPU number being sampled,
- * this task being preempted, its old CPU being taken offline, resuming
- * on some other CPU, then determining that its old CPU is now offline.
- * It is OK to use RCU on an offline processor during initial boot, hence
- * the check for rcu_scheduler_fully_active.  Note also that it is OK
- * for a CPU coming online to use RCU for one jiffy prior to marking itself
- * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
- * offline to continue to use RCU for one jiffy after marking itself
- * offline in the cpu_online_mask.  This leniency is necessary given the
- * non-atomic nature of the online and offline processing, for example,
- * the fact that a CPU enters the scheduler after completing the teardown
- * of the CPU.
+ * Is the current CPU online as far as RCU is concerned?
  *
- * This is also why RCU internally marks CPUs online during in the
- * preparation phase and offline after the CPU has been taken down.
+ * Disable preemption to avoid false positives that could otherwise
+ * happen due to the current CPU number being sampled, this task being
+ * preempted, its old CPU being taken offline, resuming on some other CPU,
+ * then determining that its old CPU is now offline.  Because there are
+ * multiple flavors of RCU, and because this function can be called in the
+ * midst of updating the flavors while a given CPU coming online or going
+ * offline, it is necessary to check all flavors.  If any of the flavors
+ * believe that given CPU is online, it is considered to be online.
  *
- * Disable checking if in an NMI handler because we cannot safely report
- * errors from NMI handlers anyway.
+ * Disable checking if in an NMI handler because we cannot safely
+ * report errors from NMI handlers anyway.  In addition, it is OK to use
+ * RCU on an offline processor during initial boot, hence the check for
+ * rcu_scheduler_fully_active.
  */
 bool rcu_lockdep_current_cpu_online(void)
 {
        struct rcu_data *rdp;
        struct rcu_node *rnp;
-       bool ret;
+       struct rcu_state *rsp;
 
-       if (in_nmi())
+       if (in_nmi() || !rcu_scheduler_fully_active)
                return true;
        preempt_disable();
-       rdp = this_cpu_ptr(&rcu_sched_data);
-       rnp = rdp->mynode;
-       ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
-             !rcu_scheduler_fully_active;
+       for_each_rcu_flavor(rsp) {
+               rdp = this_cpu_ptr(rsp->rda);
+               rnp = rdp->mynode;
+               if (rdp->grpmask & rcu_rnp_online_cpus(rnp)) {
+                       preempt_enable();
+                       return true;
+               }
+       }
        preempt_enable();
-       return ret;
+       return false;
 }
 EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
 
@@ -1115,17 +1105,18 @@ static int rcu_is_cpu_rrupt_from_idle(void)
 /*
  * We are reporting a quiescent state on behalf of some other CPU, so
  * it is our responsibility to check for and handle potential overflow
- * of the rcu_node ->gpnum counter with respect to the rcu_data counters.
+ * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
  * After all, the CPU might be in deep idle state, and thus executing no
  * code whatsoever.
  */
 static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
 {
        raw_lockdep_assert_held_rcu_node(rnp);
-       if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
+       if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
+                        rnp->gp_seq))
                WRITE_ONCE(rdp->gpwrap, true);
-       if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
-               rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4;
+       if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
+               rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
 }
 
 /*
@@ -1137,7 +1128,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
 {
        rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
        if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
+               trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
                rcu_gpnum_ovf(rdp->mynode, rdp);
                return 1;
        }
@@ -1159,7 +1150,7 @@ static void rcu_iw_handler(struct irq_work *iwp)
        rnp = rdp->mynode;
        raw_spin_lock_rcu_node(rnp);
        if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
-               rdp->rcu_iw_gpnum = rnp->gpnum;
+               rdp->rcu_iw_gp_seq = rnp->gp_seq;
                rdp->rcu_iw_pending = false;
        }
        raw_spin_unlock_rcu_node(rnp);
@@ -1187,7 +1178,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
         * of the current RCU grace period.
         */
        if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
+               trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("dti"));
                rdp->dynticks_fqs++;
                rcu_gpnum_ovf(rnp, rdp);
                return 1;
@@ -1203,8 +1194,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
        ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
        if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
            READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
-           READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
+           rcu_seq_current(&rdp->gp_seq) == rnp->gp_seq && !rdp->gpwrap) {
+               trace_rcu_fqs(rdp->rsp->name, rdp->gp_seq, rdp->cpu, TPS("rqc"));
                rcu_gpnum_ovf(rnp, rdp);
                return 1;
        } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
@@ -1212,12 +1203,25 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
                smp_store_release(ruqp, true);
        }
 
-       /* Check for the CPU being offline. */
-       if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
-               trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
-               rdp->offline_fqs++;
-               rcu_gpnum_ovf(rnp, rdp);
-               return 1;
+       /* If waiting too long on an offline CPU, complain. */
+       if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp)) &&
+           time_after(jiffies, rdp->rsp->gp_start + HZ)) {
+               bool onl;
+               struct rcu_node *rnp1;
+
+               WARN_ON(1);  /* Offline CPUs are supposed to report QS! */
+               pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
+                       __func__, rnp->grplo, rnp->grphi, rnp->level,
+                       (long)rnp->gp_seq, (long)rnp->completedqs);
+               for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
+                       pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
+                               __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
+               onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
+               pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
+                       __func__, rdp->cpu, ".o"[onl],
+                       (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
+                       (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
+               return 1; /* Break things loose after complaining. */
        }
 
        /*
@@ -1256,11 +1260,11 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
        if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
                resched_cpu(rdp->cpu);
                if (IS_ENABLED(CONFIG_IRQ_WORK) &&
-                   !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum &&
+                   !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
                    (rnp->ffmask & rdp->grpmask)) {
                        init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
                        rdp->rcu_iw_pending = true;
-                       rdp->rcu_iw_gpnum = rnp->gpnum;
+                       rdp->rcu_iw_gp_seq = rnp->gp_seq;
                        irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
                }
        }
@@ -1274,9 +1278,9 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
        unsigned long j1;
 
        rsp->gp_start = j;
-       smp_wmb(); /* Record start time before stall time. */
        j1 = rcu_jiffies_till_stall_check();
-       WRITE_ONCE(rsp->jiffies_stall, j + j1);
+       /* Record ->gp_start before ->jiffies_stall. */
+       smp_store_release(&rsp->jiffies_stall, j + j1); /* ^^^ */
        rsp->jiffies_resched = j + j1 / 2;
        rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
 }
@@ -1302,9 +1306,9 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
        j = jiffies;
        gpa = READ_ONCE(rsp->gp_activity);
        if (j - gpa > 2 * HZ) {
-               pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
+               pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
                       rsp->name, j - gpa,
-                      rsp->gpnum, rsp->completed,
+                      (long)rcu_seq_current(&rsp->gp_seq),
                       rsp->gp_flags,
                       gp_state_getname(rsp->gp_state), rsp->gp_state,
                       rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
@@ -1359,16 +1363,15 @@ static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
        }
 }
 
-static inline void panic_on_rcu_stall(void)
+static void panic_on_rcu_stall(void)
 {
        if (sysctl_panic_on_rcu_stall)
                panic("RCU Stall\n");
 }
 
-static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
+static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gp_seq)
 {
        int cpu;
-       long delta;
        unsigned long flags;
        unsigned long gpa;
        unsigned long j;
@@ -1381,25 +1384,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
        if (rcu_cpu_stall_suppress)
                return;
 
-       /* Only let one CPU complain about others per time interval. */
-
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       delta = jiffies - READ_ONCE(rsp->jiffies_stall);
-       if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-               return;
-       }
-       WRITE_ONCE(rsp->jiffies_stall,
-                  jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-
        /*
         * OK, time to rat on our buddy...
         * See Documentation/RCU/stallwarn.txt for info on how to debug
         * RCU CPU stall warnings.
         */
-       pr_err("INFO: %s detected stalls on CPUs/tasks:",
-              rsp->name);
+       pr_err("INFO: %s detected stalls on CPUs/tasks:", rsp->name);
        print_cpu_stall_info_begin();
        rcu_for_each_leaf_node(rsp, rnp) {
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -1418,17 +1408,16 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
        for_each_possible_cpu(cpu)
                totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
                                                            cpu)->cblist);
-       pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
+       pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
               smp_processor_id(), (long)(jiffies - rsp->gp_start),
-              (long)rsp->gpnum, (long)rsp->completed, totqlen);
+              (long)rcu_seq_current(&rsp->gp_seq), totqlen);
        if (ndetected) {
                rcu_dump_cpu_stacks(rsp);
 
                /* Complain about tasks blocking the grace period. */
                rcu_print_detail_task_stall(rsp);
        } else {
-               if (READ_ONCE(rsp->gpnum) != gpnum ||
-                   READ_ONCE(rsp->completed) == gpnum) {
+               if (rcu_seq_current(&rsp->gp_seq) != gp_seq) {
                        pr_err("INFO: Stall ended before state dump start\n");
                } else {
                        j = jiffies;
@@ -1441,6 +1430,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
                        sched_show_task(current);
                }
        }
+       /* Rewrite if needed in case of slow consoles. */
+       if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
+               WRITE_ONCE(rsp->jiffies_stall,
+                          jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
 
        rcu_check_gp_kthread_starvation(rsp);
 
@@ -1476,15 +1469,16 @@ static void print_cpu_stall(struct rcu_state *rsp)
        for_each_possible_cpu(cpu)
                totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
                                                            cpu)->cblist);
-       pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
+       pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
                jiffies - rsp->gp_start,
-               (long)rsp->gpnum, (long)rsp->completed, totqlen);
+               (long)rcu_seq_current(&rsp->gp_seq), totqlen);
 
        rcu_check_gp_kthread_starvation(rsp);
 
        rcu_dump_cpu_stacks(rsp);
 
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       /* Rewrite if needed in case of slow consoles. */
        if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
                WRITE_ONCE(rsp->jiffies_stall,
                           jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
@@ -1504,10 +1498,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
 
 static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 {
-       unsigned long completed;
-       unsigned long gpnum;
+       unsigned long gs1;
+       unsigned long gs2;
        unsigned long gps;
        unsigned long j;
+       unsigned long jn;
        unsigned long js;
        struct rcu_node *rnp;
 
@@ -1520,43 +1515,46 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
        /*
         * Lots of memory barriers to reject false positives.
         *
-        * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
-        * then rsp->gp_start, and finally rsp->completed.  These values
-        * are updated in the opposite order with memory barriers (or
-        * equivalent) during grace-period initialization and cleanup.
-        * Now, a false positive can occur if we get an new value of
-        * rsp->gp_start and a old value of rsp->jiffies_stall.  But given
-        * the memory barriers, the only way that this can happen is if one
-        * grace period ends and another starts between these two fetches.
-        * Detect this by comparing rsp->completed with the previous fetch
-        * from rsp->gpnum.
+        * The idea is to pick up rsp->gp_seq, then rsp->jiffies_stall,
+        * then rsp->gp_start, and finally another copy of rsp->gp_seq.
+        * These values are updated in the opposite order with memory
+        * barriers (or equivalent) during grace-period initialization
+        * and cleanup.  Now, a false positive can occur if we get an new
+        * value of rsp->gp_start and a old value of rsp->jiffies_stall.
+        * But given the memory barriers, the only way that this can happen
+        * is if one grace period ends and another starts between these
+        * two fetches.  This is detected by comparing the second fetch
+        * of rsp->gp_seq with the previous fetch from rsp->gp_seq.
         *
         * Given this check, comparisons of jiffies, rsp->jiffies_stall,
         * and rsp->gp_start suffice to forestall false positives.
         */
-       gpnum = READ_ONCE(rsp->gpnum);
-       smp_rmb(); /* Pick up ->gpnum first... */
+       gs1 = READ_ONCE(rsp->gp_seq);
+       smp_rmb(); /* Pick up ->gp_seq first... */
        js = READ_ONCE(rsp->jiffies_stall);
        smp_rmb(); /* ...then ->jiffies_stall before the rest... */
        gps = READ_ONCE(rsp->gp_start);
-       smp_rmb(); /* ...and finally ->gp_start before ->completed. */
-       completed = READ_ONCE(rsp->completed);
-       if (ULONG_CMP_GE(completed, gpnum) ||
+       smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
+       gs2 = READ_ONCE(rsp->gp_seq);
+       if (gs1 != gs2 ||
            ULONG_CMP_LT(j, js) ||
            ULONG_CMP_GE(gps, js))
                return; /* No stall or GP completed since entering function. */
        rnp = rdp->mynode;
+       jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
        if (rcu_gp_in_progress(rsp) &&
-           (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
+           (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
+           cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
 
                /* We haven't checked in, so go dump stack. */
                print_cpu_stall(rsp);
 
        } else if (rcu_gp_in_progress(rsp) &&
-                  ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
+                  ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
+                  cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
 
                /* They had a few time units to dump stack, so complain. */
-               print_other_cpu_stall(rsp, gpnum);
+               print_other_cpu_stall(rsp, gs2);
        }
 }
 
@@ -1577,123 +1575,99 @@ void rcu_cpu_stall_reset(void)
                WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
 }
 
-/*
- * Determine the value that ->completed will have at the end of the
- * next subsequent grace period.  This is used to tag callbacks so that
- * a CPU can invoke callbacks in a timely fashion even if that CPU has
- * been dyntick-idle for an extended period with callbacks under the
- * influence of RCU_FAST_NO_HZ.
- *
- * The caller must hold rnp->lock with interrupts disabled.
- */
-static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
-                                      struct rcu_node *rnp)
-{
-       raw_lockdep_assert_held_rcu_node(rnp);
-
-       /*
-        * If RCU is idle, we just wait for the next grace period.
-        * But we can only be sure that RCU is idle if we are looking
-        * at the root rcu_node structure -- otherwise, a new grace
-        * period might have started, but just not yet gotten around
-        * to initializing the current non-root rcu_node structure.
-        */
-       if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
-               return rnp->completed + 1;
-
-       /*
-        * If the current rcu_node structure believes that RCU is
-        * idle, and if the rcu_state structure does not yet reflect
-        * the start of a new grace period, then the next grace period
-        * will suffice.  The memory barrier is needed to accurately
-        * sample the rsp->gpnum, and pairs with the second lock
-        * acquisition in rcu_gp_init(), which is augmented with
-        * smp_mb__after_unlock_lock() for this purpose.
-        */
-       if (rnp->gpnum == rnp->completed) {
-               smp_mb(); /* See above block comment. */
-               if (READ_ONCE(rsp->gpnum) == rnp->completed)
-                       return rnp->completed + 1;
-       }
-
-       /*
-        * Otherwise, wait for a possible partial grace period and
-        * then the subsequent full grace period.
-        */
-       return rnp->completed + 2;
-}
-
 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
 static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
-                             unsigned long c, const char *s)
+                             unsigned long gp_seq_req, const char *s)
 {
-       trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
-                                     rnp->completed, c, rnp->level,
-                                     rnp->grplo, rnp->grphi, s);
+       trace_rcu_future_grace_period(rdp->rsp->name, rnp->gp_seq, gp_seq_req,
+                                     rnp->level, rnp->grplo, rnp->grphi, s);
 }
 
 /*
+ * rcu_start_this_gp - Request the start of a particular grace period
+ * @rnp_start: The leaf node of the CPU from which to start.
+ * @rdp: The rcu_data corresponding to the CPU from which to start.
+ * @gp_seq_req: The gp_seq of the grace period to start.
+ *
  * Start the specified grace period, as needed to handle newly arrived
  * callbacks.  The required future grace periods are recorded in each
- * rcu_node structure's ->need_future_gp[] field.  Returns true if there
+ * rcu_node structure's ->gp_seq_needed field.  Returns true if there
  * is reason to awaken the grace-period kthread.
  *
  * The caller must hold the specified rcu_node structure's ->lock, which
  * is why the caller is responsible for waking the grace-period kthread.
+ *
+ * Returns true if the GP thread needs to be awakened else false.
  */
-static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
-                             unsigned long c)
+static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
+                             unsigned long gp_seq_req)
 {
        bool ret = false;
        struct rcu_state *rsp = rdp->rsp;
-       struct rcu_node *rnp_root;
+       struct rcu_node *rnp;
 
        /*
         * Use funnel locking to either acquire the root rcu_node
         * structure's lock or bail out if the need for this grace period
-        * has already been recorded -- or has already started.  If there
-        * is already a grace period in progress in a non-leaf node, no
-        * recording is needed because the end of the grace period will
-        * scan the leaf rcu_node structures.  Note that rnp->lock must
-        * not be released.
+        * has already been recorded -- or if that grace period has in
+        * fact already started.  If there is already a grace period in
+        * progress in a non-leaf node, no recording is needed because the
+        * end of the grace period will scan the leaf rcu_node structures.
+        * Note that rnp_start->lock must not be released.
         */
-       raw_lockdep_assert_held_rcu_node(rnp);
-       trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf"));
-       for (rnp_root = rnp; 1; rnp_root = rnp_root->parent) {
-               if (rnp_root != rnp)
-                       raw_spin_lock_rcu_node(rnp_root);
-               WARN_ON_ONCE(ULONG_CMP_LT(rnp_root->gpnum +
-                                         need_future_gp_mask(), c));
-               if (need_future_gp_element(rnp_root, c) ||
-                   ULONG_CMP_GE(rnp_root->gpnum, c) ||
-                   (rnp != rnp_root &&
-                    rnp_root->gpnum != rnp_root->completed)) {
-                       trace_rcu_this_gp(rnp_root, rdp, c, TPS("Prestarted"));
+       raw_lockdep_assert_held_rcu_node(rnp_start);
+       trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
+       for (rnp = rnp_start; 1; rnp = rnp->parent) {
+               if (rnp != rnp_start)
+                       raw_spin_lock_rcu_node(rnp);
+               if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
+                   rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
+                   (rnp != rnp_start &&
+                    rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
+                       trace_rcu_this_gp(rnp, rdp, gp_seq_req,
+                                         TPS("Prestarted"));
                        goto unlock_out;
                }
-               need_future_gp_element(rnp_root, c) = true;
-               if (rnp_root != rnp && rnp_root->parent != NULL)
-                       raw_spin_unlock_rcu_node(rnp_root);
-               if (!rnp_root->parent)
+               rnp->gp_seq_needed = gp_seq_req;
+               if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
+                       /*
+                        * We just marked the leaf or internal node, and a
+                        * grace period is in progress, which means that
+                        * rcu_gp_cleanup() will see the marking.  Bail to
+                        * reduce contention.
+                        */
+                       trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
+                                         TPS("Startedleaf"));
+                       goto unlock_out;
+               }
+               if (rnp != rnp_start && rnp->parent != NULL)
+                       raw_spin_unlock_rcu_node(rnp);
+               if (!rnp->parent)
                        break;  /* At root, and perhaps also leaf. */
        }
 
        /* If GP already in progress, just leave, otherwise start one. */
-       if (rnp_root->gpnum != rnp_root->completed) {
-               trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedleafroot"));
+       if (rcu_gp_in_progress(rsp)) {
+               trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
                goto unlock_out;
        }
-       trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedroot"));
+       trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
        WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
+       rsp->gp_req_activity = jiffies;
        if (!rsp->gp_kthread) {
-               trace_rcu_this_gp(rnp_root, rdp, c, TPS("NoGPkthread"));
+               trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
                goto unlock_out;
        }
-       trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), TPS("newreq"));
+       trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq), TPS("newreq"));
        ret = true;  /* Caller must wake GP kthread. */
 unlock_out:
-       if (rnp != rnp_root)
-               raw_spin_unlock_rcu_node(rnp_root);
+       /* Push furthest requested GP to leaf node and rcu_data structure. */
+       if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
+               rnp_start->gp_seq_needed = rnp->gp_seq_needed;
+               rdp->gp_seq_needed = rnp->gp_seq_needed;
+       }
+       if (rnp != rnp_start)
+               raw_spin_unlock_rcu_node(rnp);
        return ret;
 }
 
@@ -1703,13 +1677,13 @@ static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
  */
 static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
 {
-       unsigned long c = rnp->completed;
        bool needmore;
        struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
 
-       need_future_gp_element(rnp, c) = false;
-       needmore = need_any_future_gp(rnp);
-       trace_rcu_this_gp(rnp, rdp, c,
+       needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
+       if (!needmore)
+               rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
+       trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
                          needmore ? TPS("CleanupMore") : TPS("Cleanup"));
        return needmore;
 }
@@ -1731,21 +1705,21 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
 }
 
 /*
- * If there is room, assign a ->completed number to any callbacks on
- * this CPU that have not already been assigned.  Also accelerate any
- * callbacks that were previously assigned a ->completed number that has
- * since proven to be too conservative, which can happen if callbacks get
- * assigned a ->completed number while RCU is idle, but with reference to
- * a non-root rcu_node structure.  This function is idempotent, so it does
- * not hurt to call it repeatedly.  Returns an flag saying that we should
- * awaken the RCU grace-period kthread.
+ * If there is room, assign a ->gp_seq number to any callbacks on this
+ * CPU that have not already been assigned.  Also accelerate any callbacks
+ * that were previously assigned a ->gp_seq number that has since proven
+ * to be too conservative, which can happen if callbacks get assigned a
+ * ->gp_seq number while RCU is idle, but with reference to a non-root
+ * rcu_node structure.  This function is idempotent, so it does not hurt
+ * to call it repeatedly.  Returns an flag saying that we should awaken
+ * the RCU grace-period kthread.
  *
  * The caller must hold rnp->lock with interrupts disabled.
  */
 static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
                               struct rcu_data *rdp)
 {
-       unsigned long c;
+       unsigned long gp_seq_req;
        bool ret = false;
 
        raw_lockdep_assert_held_rcu_node(rnp);
@@ -1764,22 +1738,50 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
         * accelerating callback invocation to an earlier grace-period
         * number.
         */
-       c = rcu_cbs_completed(rsp, rnp);
-       if (rcu_segcblist_accelerate(&rdp->cblist, c))
-               ret = rcu_start_this_gp(rnp, rdp, c);
+       gp_seq_req = rcu_seq_snap(&rsp->gp_seq);
+       if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
+               ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
 
        /* Trace depending on how much we were able to accelerate. */
        if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
-               trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
+               trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccWaitCB"));
        else
-               trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
+               trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("AccReadyCB"));
        return ret;
 }
 
+/*
+ * Similar to rcu_accelerate_cbs(), but does not require that the leaf
+ * rcu_node structure's ->lock be held.  It consults the cached value
+ * of ->gp_seq_needed in the rcu_data structure, and if that indicates
+ * that a new grace-period request be made, invokes rcu_accelerate_cbs()
+ * while holding the leaf rcu_node structure's ->lock.
+ */
+static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
+                                       struct rcu_node *rnp,
+                                       struct rcu_data *rdp)
+{
+       unsigned long c;
+       bool needwake;
+
+       lockdep_assert_irqs_disabled();
+       c = rcu_seq_snap(&rsp->gp_seq);
+       if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
+               /* Old request still live, so mark recent callbacks. */
+               (void)rcu_segcblist_accelerate(&rdp->cblist, c);
+               return;
+       }
+       raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
+       needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
+       raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
+       if (needwake)
+               rcu_gp_kthread_wake(rsp);
+}
+
 /*
  * Move any callbacks whose grace period has completed to the
  * RCU_DONE_TAIL sublist, then compact the remaining sublists and
- * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
+ * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
  * sublist.  This function is idempotent, so it does not hurt to
  * invoke it repeatedly.  As long as it is not invoked -too- often...
  * Returns true if the RCU grace-period kthread needs to be awakened.
@@ -1796,10 +1798,10 @@ static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
                return false;
 
        /*
-        * Find all callbacks whose ->completed numbers indicate that they
+        * Find all callbacks whose ->gp_seq numbers indicate that they
         * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
         */
-       rcu_segcblist_advance(&rdp->cblist, rnp->completed);
+       rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
 
        /* Classify any remaining callbacks. */
        return rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1819,39 +1821,38 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
 
        raw_lockdep_assert_held_rcu_node(rnp);
 
-       /* Handle the ends of any preceding grace periods first. */
-       if (rdp->completed == rnp->completed &&
-           !unlikely(READ_ONCE(rdp->gpwrap))) {
-
-               /* No grace period end, so just accelerate recent callbacks. */
-               ret = rcu_accelerate_cbs(rsp, rnp, rdp);
+       if (rdp->gp_seq == rnp->gp_seq)
+               return false; /* Nothing to do. */
 
+       /* Handle the ends of any preceding grace periods first. */
+       if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
+           unlikely(READ_ONCE(rdp->gpwrap))) {
+               ret = rcu_advance_cbs(rsp, rnp, rdp); /* Advance callbacks. */
+               trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuend"));
        } else {
-
-               /* Advance callbacks. */
-               ret = rcu_advance_cbs(rsp, rnp, rdp);
-
-               /* Remember that we saw this grace-period completion. */
-               rdp->completed = rnp->completed;
-               trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
+               ret = rcu_accelerate_cbs(rsp, rnp, rdp); /* Recent callbacks. */
        }
 
-       if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
+       /* Now handle the beginnings of any new-to-this-CPU grace periods. */
+       if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
+           unlikely(READ_ONCE(rdp->gpwrap))) {
                /*
                 * If the current grace period is waiting for this CPU,
                 * set up to detect a quiescent state, otherwise don't
                 * go looking for one.
                 */
-               rdp->gpnum = rnp->gpnum;
-               trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
+               trace_rcu_grace_period(rsp->name, rnp->gp_seq, TPS("cpustart"));
                need_gp = !!(rnp->qsmask & rdp->grpmask);
                rdp->cpu_no_qs.b.norm = need_gp;
                rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
                rdp->core_needs_qs = need_gp;
                zero_cpu_stall_ticks(rdp);
-               WRITE_ONCE(rdp->gpwrap, false);
-               rcu_gpnum_ovf(rnp, rdp);
        }
+       rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
+       if (ULONG_CMP_GE(rnp->gp_seq_needed, rdp->gp_seq_needed) || rdp->gpwrap)
+               rdp->gp_seq_needed = rnp->gp_seq_needed;
+       WRITE_ONCE(rdp->gpwrap, false);
+       rcu_gpnum_ovf(rnp, rdp);
        return ret;
 }
 
@@ -1863,8 +1864,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
 
        local_irq_save(flags);
        rnp = rdp->mynode;
-       if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
-            rdp->completed == READ_ONCE(rnp->completed) &&
+       if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
             !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
            !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
                local_irq_restore(flags);
@@ -1879,7 +1879,8 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
 static void rcu_gp_slow(struct rcu_state *rsp, int delay)
 {
        if (delay > 0 &&
-           !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
+           !(rcu_seq_ctr(rsp->gp_seq) %
+             (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
                schedule_timeout_uninterruptible(delay);
 }
 
@@ -1888,7 +1889,9 @@ static void rcu_gp_slow(struct rcu_state *rsp, int delay)
  */
 static bool rcu_gp_init(struct rcu_state *rsp)
 {
+       unsigned long flags;
        unsigned long oldmask;
+       unsigned long mask;
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
@@ -1912,9 +1915,9 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 
        /* Advance to a new grace period and initialize state. */
        record_gp_stall_check_time(rsp);
-       /* Record GP times before starting GP, hence smp_store_release(). */
-       smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
-       trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
+       /* Record GP times before starting GP, hence rcu_seq_start(). */
+       rcu_seq_start(&rsp->gp_seq);
+       trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("start"));
        raw_spin_unlock_irq_rcu_node(rnp);
 
        /*
@@ -1923,13 +1926,15 @@ static bool rcu_gp_init(struct rcu_state *rsp)
         * for subsequent online CPUs, and that quiescent-state forcing
         * will handle subsequent offline CPUs.
         */
+       rsp->gp_state = RCU_GP_ONOFF;
        rcu_for_each_leaf_node(rsp, rnp) {
-               rcu_gp_slow(rsp, gp_preinit_delay);
+               spin_lock(&rsp->ofl_lock);
                raw_spin_lock_irq_rcu_node(rnp);
                if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
                    !rnp->wait_blkd_tasks) {
                        /* Nothing to do on this leaf rcu_node structure. */
                        raw_spin_unlock_irq_rcu_node(rnp);
+                       spin_unlock(&rsp->ofl_lock);
                        continue;
                }
 
@@ -1939,12 +1944,14 @@ static bool rcu_gp_init(struct rcu_state *rsp)
 
                /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
                if (!oldmask != !rnp->qsmaskinit) {
-                       if (!oldmask) /* First online CPU for this rcu_node. */
-                               rcu_init_new_rnp(rnp);
-                       else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
-                               rnp->wait_blkd_tasks = true;
-                       else /* Last offline CPU and can propagate. */
+                       if (!oldmask) { /* First online CPU for rcu_node. */
+                               if (!rnp->wait_blkd_tasks) /* Ever offline? */
+                                       rcu_init_new_rnp(rnp);
+                       } else if (rcu_preempt_has_tasks(rnp)) {
+                               rnp->wait_blkd_tasks = true; /* blocked tasks */
+                       } else { /* Last offline CPU and can propagate. */
                                rcu_cleanup_dead_rnp(rnp);
+                       }
                }
 
                /*
@@ -1953,18 +1960,19 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                 * still offline, propagate up the rcu_node tree and
                 * clear ->wait_blkd_tasks.  Otherwise, if one of this
                 * rcu_node structure's CPUs has since come back online,
-                * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
-                * checks for this, so just call it unconditionally).
+                * simply clear ->wait_blkd_tasks.
                 */
                if (rnp->wait_blkd_tasks &&
-                   (!rcu_preempt_has_tasks(rnp) ||
-                    rnp->qsmaskinit)) {
+                   (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
                        rnp->wait_blkd_tasks = false;
-                       rcu_cleanup_dead_rnp(rnp);
+                       if (!rnp->qsmaskinit)
+                               rcu_cleanup_dead_rnp(rnp);
                }
 
                raw_spin_unlock_irq_rcu_node(rnp);
+               spin_unlock(&rsp->ofl_lock);
        }
+       rcu_gp_slow(rsp, gp_preinit_delay); /* Races with CPU hotplug. */
 
        /*
         * Set the quiescent-state-needed bits in all the rcu_node
@@ -1978,22 +1986,27 @@ static bool rcu_gp_init(struct rcu_state *rsp)
         * The grace period cannot complete until the initialization
         * process finishes, because this kthread handles both.
         */
+       rsp->gp_state = RCU_GP_INIT;
        rcu_for_each_node_breadth_first(rsp, rnp) {
                rcu_gp_slow(rsp, gp_init_delay);
-               raw_spin_lock_irq_rcu_node(rnp);
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
                rdp = this_cpu_ptr(rsp->rda);
-               rcu_preempt_check_blocked_tasks(rnp);
+               rcu_preempt_check_blocked_tasks(rsp, rnp);
                rnp->qsmask = rnp->qsmaskinit;
-               WRITE_ONCE(rnp->gpnum, rsp->gpnum);
-               if (WARN_ON_ONCE(rnp->completed != rsp->completed))
-                       WRITE_ONCE(rnp->completed, rsp->completed);
+               WRITE_ONCE(rnp->gp_seq, rsp->gp_seq);
                if (rnp == rdp->mynode)
                        (void)__note_gp_changes(rsp, rnp, rdp);
                rcu_preempt_boost_start_gp(rnp);
-               trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
+               trace_rcu_grace_period_init(rsp->name, rnp->gp_seq,
                                            rnp->level, rnp->grplo,
                                            rnp->grphi, rnp->qsmask);
-               raw_spin_unlock_irq_rcu_node(rnp);
+               /* Quiescent states for tasks on any now-offline CPUs. */
+               mask = rnp->qsmask & ~rnp->qsmaskinitnext;
+               rnp->rcu_gp_init_mask = mask;
+               if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
+                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               else
+                       raw_spin_unlock_irq_rcu_node(rnp);
                cond_resched_tasks_rcu_qs();
                WRITE_ONCE(rsp->gp_activity, jiffies);
        }
@@ -2053,6 +2066,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
 {
        unsigned long gp_duration;
        bool needgp = false;
+       unsigned long new_gp_seq;
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
        struct swait_queue_head *sq;
@@ -2074,19 +2088,22 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        raw_spin_unlock_irq_rcu_node(rnp);
 
        /*
-        * Propagate new ->completed value to rcu_node structures so
-        * that other CPUs don't have to wait until the start of the next
-        * grace period to process their callbacks.  This also avoids
-        * some nasty RCU grace-period initialization races by forcing
-        * the end of the current grace period to be completely recorded in
-        * all of the rcu_node structures before the beginning of the next
-        * grace period is recorded in any of the rcu_node structures.
+        * Propagate new ->gp_seq value to rcu_node structures so that
+        * other CPUs don't have to wait until the start of the next grace
+        * period to process their callbacks.  This also avoids some nasty
+        * RCU grace-period initialization races by forcing the end of
+        * the current grace period to be completely recorded in all of
+        * the rcu_node structures before the beginning of the next grace
+        * period is recorded in any of the rcu_node structures.
         */
+       new_gp_seq = rsp->gp_seq;
+       rcu_seq_end(&new_gp_seq);
        rcu_for_each_node_breadth_first(rsp, rnp) {
                raw_spin_lock_irq_rcu_node(rnp);
-               WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
+               if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
+                       dump_blkd_tasks(rsp, rnp, 10);
                WARN_ON_ONCE(rnp->qsmask);
-               WRITE_ONCE(rnp->completed, rsp->gpnum);
+               WRITE_ONCE(rnp->gp_seq, new_gp_seq);
                rdp = this_cpu_ptr(rsp->rda);
                if (rnp == rdp->mynode)
                        needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
@@ -2100,26 +2117,28 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                rcu_gp_slow(rsp, gp_cleanup_delay);
        }
        rnp = rcu_get_root(rsp);
-       raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
+       raw_spin_lock_irq_rcu_node(rnp); /* GP before rsp->gp_seq update. */
 
        /* Declare grace period done. */
-       WRITE_ONCE(rsp->completed, rsp->gpnum);
-       trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
+       rcu_seq_end(&rsp->gp_seq);
+       trace_rcu_grace_period(rsp->name, rsp->gp_seq, TPS("end"));
        rsp->gp_state = RCU_GP_IDLE;
        /* Check for GP requests since above loop. */
        rdp = this_cpu_ptr(rsp->rda);
-       if (need_any_future_gp(rnp)) {
-               trace_rcu_this_gp(rnp, rdp, rsp->completed - 1,
+       if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
+               trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
                                  TPS("CleanupMore"));
                needgp = true;
        }
        /* Advance CBs to reduce false positives below. */
        if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
                WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
-               trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
+               rsp->gp_req_activity = jiffies;
+               trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gp_seq),
                                       TPS("newreq"));
+       } else {
+               WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
        }
-       WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
        raw_spin_unlock_irq_rcu_node(rnp);
 }
 
@@ -2141,7 +2160,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                /* Handle grace-period start. */
                for (;;) {
                        trace_rcu_grace_period(rsp->name,
-                                              READ_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gp_seq),
                                               TPS("reqwait"));
                        rsp->gp_state = RCU_GP_WAIT_GPS;
                        swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
@@ -2154,17 +2173,13 @@ static int __noreturn rcu_gp_kthread(void *arg)
                        WRITE_ONCE(rsp->gp_activity, jiffies);
                        WARN_ON(signal_pending(current));
                        trace_rcu_grace_period(rsp->name,
-                                              READ_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gp_seq),
                                               TPS("reqwaitsig"));
                }
 
                /* Handle quiescent-state forcing. */
                first_gp_fqs = true;
                j = jiffies_till_first_fqs;
-               if (j > HZ) {
-                       j = HZ;
-                       jiffies_till_first_fqs = HZ;
-               }
                ret = 0;
                for (;;) {
                        if (!ret) {
@@ -2173,7 +2188,7 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                           jiffies + 3 * j);
                        }
                        trace_rcu_grace_period(rsp->name,
-                                              READ_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gp_seq),
                                               TPS("fqswait"));
                        rsp->gp_state = RCU_GP_WAIT_FQS;
                        ret = swait_event_idle_timeout(rsp->gp_wq,
@@ -2188,31 +2203,24 @@ static int __noreturn rcu_gp_kthread(void *arg)
                        if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
                            (gf & RCU_GP_FLAG_FQS)) {
                                trace_rcu_grace_period(rsp->name,
-                                                      READ_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gp_seq),
                                                       TPS("fqsstart"));
                                rcu_gp_fqs(rsp, first_gp_fqs);
                                first_gp_fqs = false;
                                trace_rcu_grace_period(rsp->name,
-                                                      READ_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gp_seq),
                                                       TPS("fqsend"));
                                cond_resched_tasks_rcu_qs();
                                WRITE_ONCE(rsp->gp_activity, jiffies);
                                ret = 0; /* Force full wait till next FQS. */
                                j = jiffies_till_next_fqs;
-                               if (j > HZ) {
-                                       j = HZ;
-                                       jiffies_till_next_fqs = HZ;
-                               } else if (j < 1) {
-                                       j = 1;
-                                       jiffies_till_next_fqs = 1;
-                               }
                        } else {
                                /* Deal with stray signal. */
                                cond_resched_tasks_rcu_qs();
                                WRITE_ONCE(rsp->gp_activity, jiffies);
                                WARN_ON(signal_pending(current));
                                trace_rcu_grace_period(rsp->name,
-                                                      READ_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gp_seq),
                                                       TPS("fqswaitsig"));
                                ret = 1; /* Keep old FQS timing. */
                                j = jiffies;
@@ -2256,8 +2264,12 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
  * must be represented by the same rcu_node structure (which need not be a
  * leaf rcu_node structure, though it often will be).  The gps parameter
  * is the grace-period snapshot, which means that the quiescent states
- * are valid only if rnp->gpnum is equal to gps.  That structure's lock
+ * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
  * must be held upon entry, and it is released before return.
+ *
+ * As a special case, if mask is zero, the bit-already-cleared check is
+ * disabled.  This allows propagating quiescent state due to resumed tasks
+ * during grace-period initialization.
  */
 static void
 rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
@@ -2271,7 +2283,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
 
        /* Walk up the rcu_node hierarchy. */
        for (;;) {
-               if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
+               if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
 
                        /*
                         * Our bit has already been cleared, or the
@@ -2284,7 +2296,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
                WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
                             rcu_preempt_blocked_readers_cgp(rnp));
                rnp->qsmask &= ~mask;
-               trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
+               trace_rcu_quiescent_state_report(rsp->name, rnp->gp_seq,
                                                 mask, rnp->qsmask, rnp->level,
                                                 rnp->grplo, rnp->grphi,
                                                 !!rnp->gp_tasks);
@@ -2294,6 +2306,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        return;
                }
+               rnp->completedqs = rnp->gp_seq;
                mask = rnp->grpmask;
                if (rnp->parent == NULL) {
 
@@ -2323,8 +2336,9 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
  * irqs disabled, and this lock is released upon return, but irqs remain
  * disabled.
  */
-static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
-                                     struct rcu_node *rnp, unsigned long flags)
+static void __maybe_unused
+rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
+                         struct rcu_node *rnp, unsigned long flags)
        __releases(rnp->lock)
 {
        unsigned long gps;
@@ -2332,12 +2346,15 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
        struct rcu_node *rnp_p;
 
        raw_lockdep_assert_held_rcu_node(rnp);
-       if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
-           rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
+       if (WARN_ON_ONCE(rcu_state_p == &rcu_sched_state) ||
+           WARN_ON_ONCE(rsp != rcu_state_p) ||
+           WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
+           rnp->qsmask != 0) {
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;  /* Still need more quiescent states! */
        }
 
+       rnp->completedqs = rnp->gp_seq;
        rnp_p = rnp->parent;
        if (rnp_p == NULL) {
                /*
@@ -2348,8 +2365,8 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
                return;
        }
 
-       /* Report up the rest of the hierarchy, tracking current ->gpnum. */
-       gps = rnp->gpnum;
+       /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
+       gps = rnp->gp_seq;
        mask = rnp->grpmask;
        raw_spin_unlock_rcu_node(rnp);  /* irqs remain disabled. */
        raw_spin_lock_rcu_node(rnp_p);  /* irqs already disabled. */
@@ -2370,8 +2387,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
 
        rnp = rdp->mynode;
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
-           rnp->completed == rnp->gpnum || rdp->gpwrap) {
+       if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
+           rdp->gpwrap) {
 
                /*
                 * The grace period in which this quiescent state was
@@ -2396,7 +2413,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
                 */
                needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
 
-               rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+               rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
                /* ^^^ Released rnp->lock */
                if (needwake)
                        rcu_gp_kthread_wake(rsp);
@@ -2441,17 +2458,16 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
  */
 static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
 {
-       RCU_TRACE(unsigned long mask;)
+       RCU_TRACE(bool blkd;)
        RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
        RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
 
        if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
                return;
 
-       RCU_TRACE(mask = rdp->grpmask;)
-       trace_rcu_grace_period(rsp->name,
-                              rnp->gpnum + 1 - !!(rnp->qsmask & mask),
-                              TPS("cpuofl"));
+       RCU_TRACE(blkd = !!(rnp->qsmask & rdp->grpmask);)
+       trace_rcu_grace_period(rsp->name, rnp->gp_seq,
+                              blkd ? TPS("cpuofl") : TPS("cpuofl-bgp"));
 }
 
 /*
@@ -2463,7 +2479,7 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
  * This function therefore goes up the tree of rcu_node structures,
  * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
  * the leaf rcu_node structure's ->qsmaskinit field has already been
- * updated
+ * updated.
  *
  * This function does check that the specified rcu_node structure has
  * all CPUs offline and no blocked tasks, so it is OK to invoke it
@@ -2476,9 +2492,10 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
        long mask;
        struct rcu_node *rnp = rnp_leaf;
 
-       raw_lockdep_assert_held_rcu_node(rnp);
+       raw_lockdep_assert_held_rcu_node(rnp_leaf);
        if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
-           rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
+           WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
+           WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
                return;
        for (;;) {
                mask = rnp->grpmask;
@@ -2487,7 +2504,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
                        break;
                raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
                rnp->qsmaskinit &= ~mask;
-               rnp->qsmask &= ~mask;
+               /* Between grace periods, so better already be zero! */
+               WARN_ON_ONCE(rnp->qsmask);
                if (rnp->qsmaskinit) {
                        raw_spin_unlock_rcu_node(rnp);
                        /* irqs remain disabled. */
@@ -2630,6 +2648,7 @@ void rcu_check_callbacks(int user)
 
                rcu_sched_qs();
                rcu_bh_qs();
+               rcu_note_voluntary_context_switch(current);
 
        } else if (!in_softirq()) {
 
@@ -2645,8 +2664,7 @@ void rcu_check_callbacks(int user)
        rcu_preempt_check_callbacks();
        if (rcu_pending())
                invoke_rcu_core();
-       if (user)
-               rcu_note_voluntary_context_switch(current);
+
        trace_rcu_utilization(TPS("End scheduler-tick"));
 }
 
@@ -2681,17 +2699,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
                                /* rcu_initiate_boost() releases rnp->lock */
                                continue;
                        }
-                       if (rnp->parent &&
-                           (rnp->parent->qsmask & rnp->grpmask)) {
-                               /*
-                                * Race between grace-period
-                                * initialization and task exiting RCU
-                                * read-side critical section: Report.
-                                */
-                               rcu_report_unblock_qs_rnp(rsp, rnp, flags);
-                               /* rcu_report_unblock_qs_rnp() rlses ->lock */
-                               continue;
-                       }
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+                       continue;
                }
                for_each_leaf_node_possible_cpu(rnp, cpu) {
                        unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
@@ -2701,8 +2710,8 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
                        }
                }
                if (mask != 0) {
-                       /* Idle/offline CPUs, report (releases rnp->lock. */
-                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
+                       /* Idle/offline CPUs, report (releases rnp->lock). */
+                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
                } else {
                        /* Nothing to do here, so just drop the lock. */
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -2746,6 +2755,65 @@ static void force_quiescent_state(struct rcu_state *rsp)
        rcu_gp_kthread_wake(rsp);
 }
 
+/*
+ * This function checks for grace-period requests that fail to motivate
+ * RCU to come out of its idle mode.
+ */
+static void
+rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
+                        struct rcu_data *rdp)
+{
+       const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
+       unsigned long flags;
+       unsigned long j;
+       struct rcu_node *rnp_root = rcu_get_root(rsp);
+       static atomic_t warned = ATOMIC_INIT(0);
+
+       if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress(rsp) ||
+           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed))
+               return;
+       j = jiffies; /* Expensive access, and in common case don't get here. */
+       if (time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+           time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+           atomic_read(&warned))
+               return;
+
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       j = jiffies;
+       if (rcu_gp_in_progress(rsp) ||
+           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+           time_before(j, READ_ONCE(rsp->gp_req_activity) + gpssdelay) ||
+           time_before(j, READ_ONCE(rsp->gp_activity) + gpssdelay) ||
+           atomic_read(&warned)) {
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               return;
+       }
+       /* Hold onto the leaf lock to make others see warned==1. */
+
+       if (rnp_root != rnp)
+               raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
+       j = jiffies;
+       if (rcu_gp_in_progress(rsp) ||
+           ULONG_CMP_GE(rnp_root->gp_seq, rnp_root->gp_seq_needed) ||
+           time_before(j, rsp->gp_req_activity + gpssdelay) ||
+           time_before(j, rsp->gp_activity + gpssdelay) ||
+           atomic_xchg(&warned, 1)) {
+               raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               return;
+       }
+       pr_alert("%s: g%ld->%ld gar:%lu ga:%lu f%#x gs:%d %s->state:%#lx\n",
+                __func__, (long)READ_ONCE(rsp->gp_seq),
+                (long)READ_ONCE(rnp_root->gp_seq_needed),
+                j - rsp->gp_req_activity, j - rsp->gp_activity,
+                rsp->gp_flags, rsp->gp_state, rsp->name,
+                rsp->gp_kthread ? rsp->gp_kthread->state : 0x1ffffL);
+       WARN_ON(1);
+       if (rnp_root != rnp)
+               raw_spin_unlock_rcu_node(rnp_root);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+}
+
 /*
  * This does the RCU core processing work for the specified rcu_state
  * and rcu_data structures.  This may be called only from the CPU to
@@ -2755,9 +2823,8 @@ static void
 __rcu_process_callbacks(struct rcu_state *rsp)
 {
        unsigned long flags;
-       bool needwake;
        struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
-       struct rcu_node *rnp;
+       struct rcu_node *rnp = rdp->mynode;
 
        WARN_ON_ONCE(!rdp->beenonline);
 
@@ -2768,18 +2835,13 @@ __rcu_process_callbacks(struct rcu_state *rsp)
        if (!rcu_gp_in_progress(rsp) &&
            rcu_segcblist_is_enabled(&rdp->cblist)) {
                local_irq_save(flags);
-               if (rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) {
-                       local_irq_restore(flags);
-               } else {
-                       rnp = rdp->mynode;
-                       raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
-                       needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
-                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-                       if (needwake)
-                               rcu_gp_kthread_wake(rsp);
-               }
+               if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
+                       rcu_accelerate_cbs_unlocked(rsp, rnp, rdp);
+               local_irq_restore(flags);
        }
 
+       rcu_check_gp_start_stall(rsp, rnp, rdp);
+
        /* If there are callbacks ready, invoke them. */
        if (rcu_segcblist_ready_cbs(&rdp->cblist))
                invoke_rcu_callbacks(rsp, rdp);
@@ -2833,8 +2895,6 @@ static void invoke_rcu_core(void)
 static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
                            struct rcu_head *head, unsigned long flags)
 {
-       bool needwake;
-
        /*
         * If called from an extended quiescent state, invoke the RCU
         * core in order to force a re-evaluation of RCU's idleness.
@@ -2861,13 +2921,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
 
                /* Start a new grace period if one not already started. */
                if (!rcu_gp_in_progress(rsp)) {
-                       struct rcu_node *rnp = rdp->mynode;
-
-                       raw_spin_lock_rcu_node(rnp);
-                       needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
-                       raw_spin_unlock_rcu_node(rnp);
-                       if (needwake)
-                               rcu_gp_kthread_wake(rsp);
+                       rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp);
                } else {
                        /* Give the grace period a kick. */
                        rdp->blimit = LONG_MAX;
@@ -3037,7 +3091,7 @@ EXPORT_SYMBOL_GPL(kfree_call_rcu);
  * when there was in fact only one the whole time, as this just adds
  * some overhead: RCU still operates correctly.
  */
-static inline int rcu_blocking_is_gp(void)
+static int rcu_blocking_is_gp(void)
 {
        int ret;
 
@@ -3136,16 +3190,10 @@ unsigned long get_state_synchronize_rcu(void)
 {
        /*
         * Any prior manipulation of RCU-protected data must happen
-        * before the load from ->gpnum.
+        * before the load from ->gp_seq.
         */
        smp_mb();  /* ^^^ */
-
-       /*
-        * Make sure this load happens before the purportedly
-        * time-consuming work between get_state_synchronize_rcu()
-        * and cond_synchronize_rcu().
-        */
-       return smp_load_acquire(&rcu_state_p->gpnum);
+       return rcu_seq_snap(&rcu_state_p->gp_seq);
 }
 EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
 
@@ -3165,15 +3213,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
  */
 void cond_synchronize_rcu(unsigned long oldstate)
 {
-       unsigned long newstate;
-
-       /*
-        * Ensure that this load happens before any RCU-destructive
-        * actions the caller might carry out after we return.
-        */
-       newstate = smp_load_acquire(&rcu_state_p->completed);
-       if (ULONG_CMP_GE(oldstate, newstate))
+       if (!rcu_seq_done(&rcu_state_p->gp_seq, oldstate))
                synchronize_rcu();
+       else
+               smp_mb(); /* Ensure GP ends before subsequent accesses. */
 }
 EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
 
@@ -3188,16 +3231,10 @@ unsigned long get_state_synchronize_sched(void)
 {
        /*
         * Any prior manipulation of RCU-protected data must happen
-        * before the load from ->gpnum.
+        * before the load from ->gp_seq.
         */
        smp_mb();  /* ^^^ */
-
-       /*
-        * Make sure this load happens before the purportedly
-        * time-consuming work between get_state_synchronize_sched()
-        * and cond_synchronize_sched().
-        */
-       return smp_load_acquire(&rcu_sched_state.gpnum);
+       return rcu_seq_snap(&rcu_sched_state.gp_seq);
 }
 EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
 
@@ -3217,15 +3254,10 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
  */
 void cond_synchronize_sched(unsigned long oldstate)
 {
-       unsigned long newstate;
-
-       /*
-        * Ensure that this load happens before any RCU-destructive
-        * actions the caller might carry out after we return.
-        */
-       newstate = smp_load_acquire(&rcu_sched_state.completed);
-       if (ULONG_CMP_GE(oldstate, newstate))
+       if (!rcu_seq_done(&rcu_sched_state.gp_seq, oldstate))
                synchronize_sched();
+       else
+               smp_mb(); /* Ensure GP ends before subsequent accesses. */
 }
 EXPORT_SYMBOL_GPL(cond_synchronize_sched);
 
@@ -3261,12 +3293,8 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
            !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
                return 1;
 
-       /* Has another RCU grace period completed?  */
-       if (READ_ONCE(rnp->completed) != rdp->completed) /* outside lock */
-               return 1;
-
-       /* Has a new RCU grace period started? */
-       if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
+       /* Have RCU grace period completed or started?  */
+       if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
            unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
                return 1;
 
@@ -3298,7 +3326,7 @@ static int rcu_pending(void)
  * non-NULL, store an indication of whether all callbacks are lazy.
  * (If there are no callbacks, all of them are deemed to be lazy.)
  */
-static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
+static bool rcu_cpu_has_callbacks(bool *all_lazy)
 {
        bool al = true;
        bool hc = false;
@@ -3484,17 +3512,22 @@ EXPORT_SYMBOL_GPL(rcu_barrier_sched);
 static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
 {
        long mask;
+       long oldmask;
        struct rcu_node *rnp = rnp_leaf;
 
-       raw_lockdep_assert_held_rcu_node(rnp);
+       raw_lockdep_assert_held_rcu_node(rnp_leaf);
+       WARN_ON_ONCE(rnp->wait_blkd_tasks);
        for (;;) {
                mask = rnp->grpmask;
                rnp = rnp->parent;
                if (rnp == NULL)
                        return;
                raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
+               oldmask = rnp->qsmaskinit;
                rnp->qsmaskinit |= mask;
                raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
+               if (oldmask)
+                       return;
        }
 }
 
@@ -3511,6 +3544,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
        WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
+       rdp->rcu_ofl_gp_seq = rsp->gp_seq;
+       rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
+       rdp->rcu_onl_gp_seq = rsp->gp_seq;
+       rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
        rdp->cpu = cpu;
        rdp->rsp = rsp;
        rcu_boot_init_nocb_percpu_data(rdp);
@@ -3518,9 +3555,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
 
 /*
  * Initialize a CPU's per-CPU RCU data.  Note that only one online or
- * offline event can be happening at a given time.  Note also that we
- * can accept some slop in the rsp->completed access due to the fact
- * that this CPU cannot possibly have any RCU callbacks in flight yet.
+ * offline event can be happening at a given time.  Note also that we can
+ * accept some slop in the rsp->gp_seq access due to the fact that this
+ * CPU cannot possibly have any RCU callbacks in flight yet.
  */
 static void
 rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
@@ -3549,14 +3586,14 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        rnp = rdp->mynode;
        raw_spin_lock_rcu_node(rnp);            /* irqs already disabled. */
        rdp->beenonline = true;  /* We have now been online. */
-       rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
-       rdp->completed = rnp->completed;
+       rdp->gp_seq = rnp->gp_seq;
+       rdp->gp_seq_needed = rnp->gp_seq;
        rdp->cpu_no_qs.b.norm = true;
        rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
        rdp->core_needs_qs = false;
        rdp->rcu_iw_pending = false;
-       rdp->rcu_iw_gpnum = rnp->gpnum - 1;
-       trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
+       rdp->rcu_iw_gp_seq = rnp->gp_seq - 1;
+       trace_rcu_grace_period(rsp->name, rdp->gp_seq, TPS("cpuonl"));
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
@@ -3705,7 +3742,15 @@ void rcu_cpu_starting(unsigned int cpu)
                nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
                /* Allow lockless access for expedited grace periods. */
                smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
+               rdp->rcu_onl_gp_seq = READ_ONCE(rsp->gp_seq);
+               rdp->rcu_onl_gp_flags = READ_ONCE(rsp->gp_flags);
+               if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
+                       /* Report QS -after- changing ->qsmaskinitnext! */
+                       rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               } else {
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               }
        }
        smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
 }
@@ -3713,7 +3758,7 @@ void rcu_cpu_starting(unsigned int cpu)
 #ifdef CONFIG_HOTPLUG_CPU
 /*
  * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
- * function.  We now remove it from the rcu_node tree's ->qsmaskinit
+ * function.  We now remove it from the rcu_node tree's ->qsmaskinitnext
  * bit masks.
  */
 static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
@@ -3725,9 +3770,18 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
 
        /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
        mask = rdp->grpmask;
+       spin_lock(&rsp->ofl_lock);
        raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
+       rdp->rcu_ofl_gp_seq = READ_ONCE(rsp->gp_seq);
+       rdp->rcu_ofl_gp_flags = READ_ONCE(rsp->gp_flags);
+       if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
+               /* Report quiescent state -before- changing ->qsmaskinitnext! */
+               rcu_report_qs_rnp(mask, rsp, rnp, rnp->gp_seq, flags);
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       }
        rnp->qsmaskinitnext &= ~mask;
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+       spin_unlock(&rsp->ofl_lock);
 }
 
 /*
@@ -3839,12 +3893,16 @@ static int __init rcu_spawn_gp_kthread(void)
        struct task_struct *t;
 
        /* Force priority into range. */
-       if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
+       if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
+           && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
+               kthread_prio = 2;
+       else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
                kthread_prio = 1;
        else if (kthread_prio < 0)
                kthread_prio = 0;
        else if (kthread_prio > 99)
                kthread_prio = 99;
+
        if (kthread_prio != kthread_prio_in)
                pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
                         kthread_prio, kthread_prio_in);
@@ -3928,8 +3986,9 @@ static void __init rcu_init_one(struct rcu_state *rsp)
                        raw_spin_lock_init(&rnp->fqslock);
                        lockdep_set_class_and_name(&rnp->fqslock,
                                                   &rcu_fqs_class[i], fqs[i]);
-                       rnp->gpnum = rsp->gpnum;
-                       rnp->completed = rsp->completed;
+                       rnp->gp_seq = rsp->gp_seq;
+                       rnp->gp_seq_needed = rsp->gp_seq;
+                       rnp->completedqs = rsp->gp_seq;
                        rnp->qsmask = 0;
                        rnp->qsmaskinit = 0;
                        rnp->grplo = j * cpustride;
@@ -3997,7 +4056,7 @@ static void __init rcu_init_geometry(void)
        if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
            nr_cpu_ids == NR_CPUS)
                return;
-       pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
+       pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
                rcu_fanout_leaf, nr_cpu_ids);
 
        /*
index 78e051dffc5bf6aab3e56b8d5966996269eaf1a3..4e74df768c579d9fe356081b0f421bdbab731275 100644 (file)
@@ -81,18 +81,16 @@ struct rcu_node {
        raw_spinlock_t __private lock;  /* Root rcu_node's lock protects */
                                        /*  some rcu_state fields as well as */
                                        /*  following. */
-       unsigned long gpnum;    /* Current grace period for this node. */
-                               /*  This will either be equal to or one */
-                               /*  behind the root rcu_node's gpnum. */
-       unsigned long completed; /* Last GP completed for this node. */
-                               /*  This will either be equal to or one */
-                               /*  behind the root rcu_node's gpnum. */
+       unsigned long gp_seq;   /* Track rsp->rcu_gp_seq. */
+       unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed. */
+       unsigned long completedqs; /* All QSes done for this node. */
        unsigned long qsmask;   /* CPUs or groups that need to switch in */
                                /*  order for current grace period to proceed.*/
                                /*  In leaf rcu_node, each bit corresponds to */
                                /*  an rcu_data structure, otherwise, each */
                                /*  bit corresponds to a child rcu_node */
                                /*  structure. */
+       unsigned long rcu_gp_init_mask; /* Mask of offline CPUs at GP init. */
        unsigned long qsmaskinit;
                                /* Per-GP initial value for qsmask. */
                                /*  Initialized from ->qsmaskinitnext at the */
@@ -158,7 +156,6 @@ struct rcu_node {
        struct swait_queue_head nocb_gp_wq[2];
                                /* Place for rcu_nocb_kthread() to wait GP. */
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
-       u8 need_future_gp[4];   /* Counts of upcoming GP requests. */
        raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
 
        spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
@@ -168,22 +165,6 @@ struct rcu_node {
        bool exp_need_flush;    /* Need to flush workitem? */
 } ____cacheline_internodealigned_in_smp;
 
-/* Accessors for ->need_future_gp[] array. */
-#define need_future_gp_mask() \
-       (ARRAY_SIZE(((struct rcu_node *)NULL)->need_future_gp) - 1)
-#define need_future_gp_element(rnp, c) \
-       ((rnp)->need_future_gp[(c) & need_future_gp_mask()])
-#define need_any_future_gp(rnp)                                                \
-({                                                                     \
-       int __i;                                                        \
-       bool __nonzero = false;                                         \
-                                                                       \
-       for (__i = 0; __i < ARRAY_SIZE((rnp)->need_future_gp); __i++)   \
-               __nonzero = __nonzero ||                                \
-                           READ_ONCE((rnp)->need_future_gp[__i]);      \
-       __nonzero;                                                      \
-})
-
 /*
  * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
  * are indexed relative to this interval rather than the global CPU ID space.
@@ -206,16 +187,14 @@ union rcu_noqs {
 /* Per-CPU data for read-copy update. */
 struct rcu_data {
        /* 1) quiescent-state and grace-period handling : */
-       unsigned long   completed;      /* Track rsp->completed gp number */
-                                       /*  in order to detect GP end. */
-       unsigned long   gpnum;          /* Highest gp number that this CPU */
-                                       /*  is aware of having started. */
+       unsigned long   gp_seq;         /* Track rsp->rcu_gp_seq counter. */
+       unsigned long   gp_seq_needed;  /* Track rsp->rcu_gp_seq_needed ctr. */
        unsigned long   rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
                                        /*  for rcu_all_qs() invocations. */
        union rcu_noqs  cpu_no_qs;      /* No QSes yet for this CPU. */
        bool            core_needs_qs;  /* Core waits for quiesc state. */
        bool            beenonline;     /* CPU online at least once. */
-       bool            gpwrap;         /* Possible gpnum/completed wrap. */
+       bool            gpwrap;         /* Possible ->gp_seq wrap. */
        struct rcu_node *mynode;        /* This CPU's leaf of hierarchy */
        unsigned long grpmask;          /* Mask to apply to leaf qsmask. */
        unsigned long   ticks_this_gp;  /* The number of scheduling-clock */
@@ -239,7 +218,6 @@ struct rcu_data {
 
        /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
        unsigned long dynticks_fqs;     /* Kicked due to dynticks idle. */
-       unsigned long offline_fqs;      /* Kicked due to being offline. */
        unsigned long cond_resched_completed;
                                        /* Grace period that needs help */
                                        /*  from cond_resched(). */
@@ -278,12 +256,16 @@ struct rcu_data {
                                        /* Leader CPU takes GP-end wakeups. */
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 
-       /* 7) RCU CPU stall data. */
+       /* 7) Diagnostic data, including RCU CPU stall warnings. */
        unsigned int softirq_snap;      /* Snapshot of softirq activity. */
        /* ->rcu_iw* fields protected by leaf rcu_node ->lock. */
        struct irq_work rcu_iw;         /* Check for non-irq activity. */
        bool rcu_iw_pending;            /* Is ->rcu_iw pending? */
-       unsigned long rcu_iw_gpnum;     /* ->gpnum associated with ->rcu_iw. */
+       unsigned long rcu_iw_gp_seq;    /* ->gp_seq associated with ->rcu_iw. */
+       unsigned long rcu_ofl_gp_seq;   /* ->gp_seq at last offline. */
+       short rcu_ofl_gp_flags;         /* ->gp_flags at last offline. */
+       unsigned long rcu_onl_gp_seq;   /* ->gp_seq at last online. */
+       short rcu_onl_gp_flags;         /* ->gp_flags at last online. */
 
        int cpu;
        struct rcu_state *rsp;
@@ -340,8 +322,7 @@ struct rcu_state {
 
        u8      boost ____cacheline_internodealigned_in_smp;
                                                /* Subject to priority boost. */
-       unsigned long gpnum;                    /* Current gp number. */
-       unsigned long completed;                /* # of last completed gp. */
+       unsigned long gp_seq;                   /* Grace-period sequence #. */
        struct task_struct *gp_kthread;         /* Task for grace periods. */
        struct swait_queue_head gp_wq;          /* Where GP task waits. */
        short gp_flags;                         /* Commands for GP task. */
@@ -373,6 +354,8 @@ struct rcu_state {
                                                /*  but in jiffies. */
        unsigned long gp_activity;              /* Time of last GP kthread */
                                                /*  activity in jiffies. */
+       unsigned long gp_req_activity;          /* Time of last GP request */
+                                               /*  in jiffies. */
        unsigned long jiffies_stall;            /* Time at which to check */
                                                /*  for CPU stalls. */
        unsigned long jiffies_resched;          /* Time at which to resched */
@@ -384,6 +367,10 @@ struct rcu_state {
        const char *name;                       /* Name of structure. */
        char abbr;                              /* Abbreviated name. */
        struct list_head flavors;               /* List of RCU flavors. */
+
+       spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;
+                                               /* Synchronize offline with */
+                                               /*  GP pre-initialization. */
 };
 
 /* Values for rcu_state structure's gp_flags field. */
@@ -394,16 +381,20 @@ struct rcu_state {
 #define RCU_GP_IDLE     0      /* Initial state and no GP in progress. */
 #define RCU_GP_WAIT_GPS  1     /* Wait for grace-period start. */
 #define RCU_GP_DONE_GPS  2     /* Wait done for grace-period start. */
-#define RCU_GP_WAIT_FQS  3     /* Wait for force-quiescent-state time. */
-#define RCU_GP_DOING_FQS 4     /* Wait done for force-quiescent-state time. */
-#define RCU_GP_CLEANUP   5     /* Grace-period cleanup started. */
-#define RCU_GP_CLEANED   6     /* Grace-period cleanup complete. */
+#define RCU_GP_ONOFF     3     /* Grace-period initialization hotplug. */
+#define RCU_GP_INIT      4     /* Grace-period initialization. */
+#define RCU_GP_WAIT_FQS  5     /* Wait for force-quiescent-state time. */
+#define RCU_GP_DOING_FQS 6     /* Wait done for force-quiescent-state time. */
+#define RCU_GP_CLEANUP   7     /* Grace-period cleanup started. */
+#define RCU_GP_CLEANED   8     /* Grace-period cleanup complete. */
 
 #ifndef RCU_TREE_NONCORE
 static const char * const gp_state_names[] = {
        "RCU_GP_IDLE",
        "RCU_GP_WAIT_GPS",
        "RCU_GP_DONE_GPS",
+       "RCU_GP_ONOFF",
+       "RCU_GP_INIT",
        "RCU_GP_WAIT_FQS",
        "RCU_GP_DOING_FQS",
        "RCU_GP_CLEANUP",
@@ -449,10 +440,13 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
 static void rcu_print_detail_task_stall(struct rcu_state *rsp);
 static int rcu_print_task_stall(struct rcu_node *rnp);
 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
+static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp,
+                                           struct rcu_node *rnp);
 static void rcu_preempt_check_callbacks(void);
 void call_rcu(struct rcu_head *head, rcu_callback_t func);
 static void __init __rcu_init_preempt(void);
+static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp,
+                           int ncheck);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
 static void invoke_rcu_callbacks_kthread(void);
@@ -489,7 +483,6 @@ static void __init rcu_spawn_nocb_kthreads(void);
 #ifdef CONFIG_RCU_NOCB_CPU
 static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
-static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
 static bool init_nocb_callback_list(struct rcu_data *rdp);
 static void rcu_bind_gp_kthread(void);
 static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
index d40708e8c5d6e1f234ad508aa2cf60afa3e4e83c..b3df3b770afb1c054c0fe298350d45a7d8c57bf7 100644 (file)
@@ -472,6 +472,7 @@ static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
 static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                                     smp_call_func_t func)
 {
+       int cpu;
        struct rcu_node *rnp;
 
        trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset"));
@@ -486,13 +487,20 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                rnp->rew.rew_func = func;
                rnp->rew.rew_rsp = rsp;
                if (!READ_ONCE(rcu_par_gp_wq) ||
-                   rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
-                       /* No workqueues yet. */
+                   rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
+                   rcu_is_last_leaf_node(rsp, rnp)) {
+                       /* No workqueues yet or last leaf, do direct call. */
                        sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
                        continue;
                }
                INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
-               queue_work_on(rnp->grplo, rcu_par_gp_wq, &rnp->rew.rew_work);
+               preempt_disable();
+               cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
+               /* If all offline, queue the work on an unbound CPU. */
+               if (unlikely(cpu > rnp->grphi))
+                       cpu = WORK_CPU_UNBOUND;
+               queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
+               preempt_enable();
                rnp->exp_need_flush = true;
        }
 
index 7fd12039e512664626590e0a188260403d4578cc..c1b17f5b9361ad49692a2bd27dee6b72bff3800e 100644 (file)
@@ -74,8 +74,8 @@ static void __init rcu_bootup_announce_oddness(void)
                pr_info("\tRCU event tracing is enabled.\n");
        if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
            (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
-               pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
-                      RCU_FANOUT);
+               pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d.\n",
+                       RCU_FANOUT);
        if (rcu_fanout_exact)
                pr_info("\tHierarchical RCU autobalancing is disabled.\n");
        if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
@@ -88,11 +88,13 @@ static void __init rcu_bootup_announce_oddness(void)
                pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
                        RCU_FANOUT_LEAF);
        if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
-               pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
+               pr_info("\tBoot-time adjustment of leaf fanout to %d.\n",
+                       rcu_fanout_leaf);
        if (nr_cpu_ids != NR_CPUS)
                pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%u.\n", NR_CPUS, nr_cpu_ids);
 #ifdef CONFIG_RCU_BOOST
-       pr_info("\tRCU priority boosting: priority %d delay %d ms.\n", kthread_prio, CONFIG_RCU_BOOST_DELAY);
+       pr_info("\tRCU priority boosting: priority %d delay %d ms.\n",
+               kthread_prio, CONFIG_RCU_BOOST_DELAY);
 #endif
        if (blimit != DEFAULT_RCU_BLIMIT)
                pr_info("\tBoot-time adjustment of callback invocation limit to %ld.\n", blimit);
@@ -127,6 +129,7 @@ static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
 
 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                               bool wake);
+static void rcu_read_unlock_special(struct task_struct *t);
 
 /*
  * Tell them what RCU they are running.
@@ -183,6 +186,9 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
        raw_lockdep_assert_held_rcu_node(rnp);
        WARN_ON_ONCE(rdp->mynode != rnp);
        WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
+       /* RCU better not be waiting on newly onlined CPUs! */
+       WARN_ON_ONCE(rnp->qsmaskinitnext & ~rnp->qsmaskinit & rnp->qsmask &
+                    rdp->grpmask);
 
        /*
         * Decide where to queue the newly blocked task.  In theory,
@@ -260,8 +266,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
         * ->exp_tasks pointers, respectively, to reference the newly
         * blocked tasks.
         */
-       if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
+       if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
                rnp->gp_tasks = &t->rcu_node_entry;
+               WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq);
+       }
        if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
                rnp->exp_tasks = &t->rcu_node_entry;
        WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
@@ -286,20 +294,24 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
 }
 
 /*
- * Record a preemptible-RCU quiescent state for the specified CPU.  Note
- * that this just means that the task currently running on the CPU is
- * not in a quiescent state.  There might be any number of tasks blocked
- * while in an RCU read-side critical section.
+ * Record a preemptible-RCU quiescent state for the specified CPU.
+ * Note that this does not necessarily mean that the task currently running
+ * on the CPU is in a quiescent state:  Instead, it means that the current
+ * grace period need not wait on any RCU read-side critical section that
+ * starts later on this CPU.  It also means that if the current task is
+ * in an RCU read-side critical section, it has already added itself to
+ * some leaf rcu_node structure's ->blkd_tasks list.  In addition to the
+ * current task, there might be any number of other tasks blocked while
+ * in an RCU read-side critical section.
  *
- * As with the other rcu_*_qs() functions, callers to this function
- * must disable preemption.
+ * Callers to this function must disable preemption.
  */
 static void rcu_preempt_qs(void)
 {
        RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_qs() invoked with preemption enabled!!!\n");
        if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) {
                trace_rcu_grace_period(TPS("rcu_preempt"),
-                                      __this_cpu_read(rcu_data_p->gpnum),
+                                      __this_cpu_read(rcu_data_p->gp_seq),
                                       TPS("cpuqs"));
                __this_cpu_write(rcu_data_p->cpu_no_qs.b.norm, false);
                barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
@@ -348,8 +360,8 @@ static void rcu_preempt_note_context_switch(bool preempt)
                trace_rcu_preempt_task(rdp->rsp->name,
                                       t->pid,
                                       (rnp->qsmask & rdp->grpmask)
-                                      ? rnp->gpnum
-                                      : rnp->gpnum + 1);
+                                      ? rnp->gp_seq
+                                      : rcu_seq_snap(&rnp->gp_seq));
                rcu_preempt_ctxt_queue(rnp, rdp);
        } else if (t->rcu_read_lock_nesting < 0 &&
                   t->rcu_read_unlock_special.s) {
@@ -456,7 +468,7 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
  * notify RCU core processing or task having blocked during the RCU
  * read-side critical section.
  */
-void rcu_read_unlock_special(struct task_struct *t)
+static void rcu_read_unlock_special(struct task_struct *t)
 {
        bool empty_exp;
        bool empty_norm;
@@ -535,13 +547,15 @@ void rcu_read_unlock_special(struct task_struct *t)
                WARN_ON_ONCE(rnp != t->rcu_blocked_node);
                WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
                empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
+               WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
+                            (!empty_norm || rnp->qsmask));
                empty_exp = sync_rcu_preempt_exp_done(rnp);
                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
                np = rcu_next_node_entry(t, rnp);
                list_del_init(&t->rcu_node_entry);
                t->rcu_blocked_node = NULL;
                trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
-                                               rnp->gpnum, t->pid);
+                                               rnp->gp_seq, t->pid);
                if (&t->rcu_node_entry == rnp->gp_tasks)
                        rnp->gp_tasks = np;
                if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -562,7 +576,7 @@ void rcu_read_unlock_special(struct task_struct *t)
                empty_exp_now = sync_rcu_preempt_exp_done(rnp);
                if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
                        trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
-                                                        rnp->gpnum,
+                                                        rnp->gp_seq,
                                                         0, rnp->qsmask,
                                                         rnp->level,
                                                         rnp->grplo,
@@ -686,24 +700,27 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
  * Check that the list of blocked tasks for the newly completed grace
  * period is in fact empty.  It is a serious bug to complete a grace
  * period that still has RCU readers blocked!  This function must be
- * invoked -before- updating this rnp's ->gpnum, and the rnp's ->lock
+ * invoked -before- updating this rnp's ->gp_seq, and the rnp's ->lock
  * must be held by the caller.
  *
  * Also, if there are blocked tasks on the list, they automatically
  * block the newly created grace period, so set up ->gp_tasks accordingly.
  */
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
+static void
+rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
 {
        struct task_struct *t;
 
        RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
-       WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
-       if (rcu_preempt_has_tasks(rnp)) {
+       if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
+               dump_blkd_tasks(rsp, rnp, 10);
+       if (rcu_preempt_has_tasks(rnp) &&
+           (rnp->qsmaskinit || rnp->wait_blkd_tasks)) {
                rnp->gp_tasks = rnp->blkd_tasks.next;
                t = container_of(rnp->gp_tasks, struct task_struct,
                                 rcu_node_entry);
                trace_rcu_unlock_preempted_task(TPS("rcu_preempt-GPS"),
-                                               rnp->gpnum, t->pid);
+                                               rnp->gp_seq, t->pid);
        }
        WARN_ON_ONCE(rnp->qsmask);
 }
@@ -717,6 +734,7 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
  */
 static void rcu_preempt_check_callbacks(void)
 {
+       struct rcu_state *rsp = &rcu_preempt_state;
        struct task_struct *t = current;
 
        if (t->rcu_read_lock_nesting == 0) {
@@ -725,7 +743,9 @@ static void rcu_preempt_check_callbacks(void)
        }
        if (t->rcu_read_lock_nesting > 0 &&
            __this_cpu_read(rcu_data_p->core_needs_qs) &&
-           __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm))
+           __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm) &&
+           !t->rcu_read_unlock_special.b.need_qs &&
+           time_after(jiffies, rsp->gp_start + HZ))
                t->rcu_read_unlock_special.b.need_qs = true;
 }
 
@@ -841,6 +861,47 @@ void exit_rcu(void)
        __rcu_read_unlock();
 }
 
+/*
+ * Dump the blocked-tasks state, but limit the list dump to the
+ * specified number of elements.
+ */
+static void
+dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+{
+       int cpu;
+       int i;
+       struct list_head *lhp;
+       bool onl;
+       struct rcu_data *rdp;
+       struct rcu_node *rnp1;
+
+       raw_lockdep_assert_held_rcu_node(rnp);
+       pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
+               __func__, rnp->grplo, rnp->grphi, rnp->level,
+               (long)rnp->gp_seq, (long)rnp->completedqs);
+       for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
+               pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx\n",
+                       __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext);
+       pr_info("%s: ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p\n",
+               __func__, rnp->gp_tasks, rnp->boost_tasks, rnp->exp_tasks);
+       pr_info("%s: ->blkd_tasks", __func__);
+       i = 0;
+       list_for_each(lhp, &rnp->blkd_tasks) {
+               pr_cont(" %p", lhp);
+               if (++i >= 10)
+                       break;
+       }
+       pr_cont("\n");
+       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
+               rdp = per_cpu_ptr(rsp->rda, cpu);
+               onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
+               pr_info("\t%d: %c online: %ld(%d) offline: %ld(%d)\n",
+                       cpu, ".o"[onl],
+                       (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
+                       (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
+       }
+}
+
 #else /* #ifdef CONFIG_PREEMPT_RCU */
 
 static struct rcu_state *const rcu_state_p = &rcu_sched_state;
@@ -911,7 +972,8 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp)
  * so there is no need to check for blocked tasks.  So check only for
  * bogus qsmask values.
  */
-static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
+static void
+rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
 {
        WARN_ON_ONCE(rnp->qsmask);
 }
@@ -949,6 +1011,15 @@ void exit_rcu(void)
 {
 }
 
+/*
+ * Dump the guaranteed-empty blocked-tasks state.  Trust but verify.
+ */
+static void
+dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, int ncheck)
+{
+       WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
+}
+
 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 
 #ifdef CONFIG_RCU_BOOST
@@ -1433,7 +1504,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
                 * completed since we last checked and there are
                 * callbacks not yet ready to invoke.
                 */
-               if ((rdp->completed != rnp->completed ||
+               if ((rcu_seq_completed_gp(rdp->gp_seq,
+                                         rcu_seq_current(&rnp->gp_seq)) ||
                     unlikely(READ_ONCE(rdp->gpwrap))) &&
                    rcu_segcblist_pend_cbs(&rdp->cblist))
                        note_gp_changes(rsp, rdp);
@@ -1720,16 +1792,16 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
         */
        touch_nmi_watchdog();
 
-       if (rsp->gpnum == rdp->gpnum) {
+       ticks_value = rcu_seq_ctr(rsp->gp_seq - rdp->gp_seq);
+       if (ticks_value) {
+               ticks_title = "GPs behind";
+       } else {
                ticks_title = "ticks this GP";
                ticks_value = rdp->ticks_this_gp;
-       } else {
-               ticks_title = "GPs behind";
-               ticks_value = rsp->gpnum - rdp->gpnum;
        }
        print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
-       delta = rdp->mynode->gpnum - rdp->rcu_iw_gpnum;
-       pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%ld softirq=%u/%u fqs=%ld %s\n",
+       delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
+       pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n",
               cpu,
               "O."[!!cpu_online(cpu)],
               "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
@@ -1817,7 +1889,7 @@ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
 
 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
 {
-       return &rnp->nocb_gp_wq[rnp->completed & 0x1];
+       return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
 }
 
 static void rcu_init_one_nocb(struct rcu_node *rnp)
@@ -2069,12 +2141,17 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
        bool needwake;
        struct rcu_node *rnp = rdp->mynode;
 
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       c = rcu_cbs_completed(rdp->rsp, rnp);
-       needwake = rcu_start_this_gp(rnp, rdp, c);
-       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-       if (needwake)
-               rcu_gp_kthread_wake(rdp->rsp);
+       local_irq_save(flags);
+       c = rcu_seq_snap(&rdp->rsp->gp_seq);
+       if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
+               local_irq_restore(flags);
+       } else {
+               raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
+               needwake = rcu_start_this_gp(rnp, rdp, c);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               if (needwake)
+                       rcu_gp_kthread_wake(rdp->rsp);
+       }
 
        /*
         * Wait for the grace period.  Do so interruptibly to avoid messing
@@ -2083,8 +2160,8 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
        trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
        for (;;) {
                swait_event_interruptible(
-                       rnp->nocb_gp_wq[c & 0x1],
-                       (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
+                       rnp->nocb_gp_wq[rcu_seq_ctr(c) & 0x1],
+                       (d = rcu_seq_done(&rnp->gp_seq, c)));
                if (likely(d))
                        break;
                WARN_ON(signal_pending(current));
@@ -2568,23 +2645,6 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
 
 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
 
-/*
- * An adaptive-ticks CPU can potentially execute in kernel mode for an
- * arbitrarily long period of time with the scheduling-clock tick turned
- * off.  RCU will be paying attention to this CPU because it is in the
- * kernel, but the CPU cannot be guaranteed to be executing the RCU state
- * machine because the scheduling-clock tick has been disabled.  Therefore,
- * if an adaptive-ticks CPU is failing to respond to the current grace
- * period and has not be idle from an RCU perspective, kick it.
- */
-static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
-{
-#ifdef CONFIG_NO_HZ_FULL
-       if (tick_nohz_full_cpu(cpu))
-               smp_send_reschedule(cpu);
-#endif /* #ifdef CONFIG_NO_HZ_FULL */
-}
-
 /*
  * Is this CPU a NO_HZ_FULL CPU that should ignore RCU so that the
  * grace-period kthread will do force_quiescent_state() processing?
@@ -2610,8 +2670,6 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
  */
 static void rcu_bind_gp_kthread(void)
 {
-       int __maybe_unused cpu;
-
        if (!tick_nohz_full_enabled())
                return;
        housekeeping_affine(current, HK_FLAG_RCU);
index 4c230a60ece44e1cf2fd65b954898fa3c6f52454..39cb23d22109386562683c57931df279257a9e85 100644 (file)
@@ -507,14 +507,15 @@ early_initcall(check_cpu_stall_init);
 #ifdef CONFIG_TASKS_RCU
 
 /*
- * Simple variant of RCU whose quiescent states are voluntary context switch,
- * user-space execution, and idle.  As such, grace periods can take one good
- * long time.  There are no read-side primitives similar to rcu_read_lock()
- * and rcu_read_unlock() because this implementation is intended to get
- * the system into a safe state for some of the manipulations involved in
- * tracing and the like.  Finally, this implementation does not support
- * high call_rcu_tasks() rates from multiple CPUs.  If this is required,
- * per-CPU callback lists will be needed.
+ * Simple variant of RCU whose quiescent states are voluntary context
+ * switch, cond_resched_rcu_qs(), user-space execution, and idle.
+ * As such, grace periods can take one good long time.  There are no
+ * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
+ * because this implementation is intended to get the system into a safe
+ * state for some of the manipulations involved in tracing and the like.
+ * Finally, this implementation does not support high call_rcu_tasks()
+ * rates from multiple CPUs.  If this is required, per-CPU callback lists
+ * will be needed.
  */
 
 /* Global list of callbacks and associated lock. */
@@ -542,11 +543,11 @@ static struct task_struct *rcu_tasks_kthread_ptr;
  * period elapses, in other words after all currently executing RCU
  * read-side critical sections have completed. call_rcu_tasks() assumes
  * that the read-side critical sections end at a voluntary context
- * switch (not a preemption!), entry into idle, or transition to usermode
- * execution.  As such, there are no read-side primitives analogous to
- * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
- * to determine that all tasks have passed through a safe state, not so
- * much for data-strcuture synchronization.
+ * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
+ * or transition to usermode execution.  As such, there are no read-side
+ * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
+ * this primitive is intended to determine that all tasks have passed
+ * through a safe state, not so much for data-strcuture synchronization.
  *
  * See the description of call_rcu() for more detailed information on
  * memory ordering guarantees.
@@ -667,6 +668,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
        struct rcu_head *list;
        struct rcu_head *next;
        LIST_HEAD(rcu_tasks_holdouts);
+       int fract;
 
        /* Run on housekeeping CPUs by default.  Sysadm can move if desired. */
        housekeeping_affine(current, HK_FLAG_RCU);
@@ -748,13 +750,25 @@ static int __noreturn rcu_tasks_kthread(void *arg)
                 * holdouts.  When the list is empty, we are done.
                 */
                lastreport = jiffies;
-               while (!list_empty(&rcu_tasks_holdouts)) {
+
+               /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/
+               fract = 10;
+
+               for (;;) {
                        bool firstreport;
                        bool needreport;
                        int rtst;
                        struct task_struct *t1;
 
-                       schedule_timeout_interruptible(HZ);
+                       if (list_empty(&rcu_tasks_holdouts))
+                               break;
+
+                       /* Slowly back off waiting for holdouts */
+                       schedule_timeout_interruptible(HZ/fract);
+
+                       if (fract > 1)
+                               fract--;
+
                        rtst = READ_ONCE(rcu_task_stall_timeout);
                        needreport = rtst > 0 &&
                                     time_after(jiffies, lastreport + rtst);
@@ -800,6 +814,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
                        list = next;
                        cond_resched();
                }
+               /* Paranoid sleep to keep this from entering a tight loop */
                schedule_timeout_uninterruptible(HZ/10);
        }
 }
index 3de1efbecd6a3cad4c47b1e09e649969f5ea3b0d..1ac24a826589353396f02c98150049d134e6e7dd 100644 (file)
@@ -20,6 +20,9 @@
  * Author: Paul E. McKenney <paulmck@us.ibm.com>
  *     Based on kernel/rcu/torture.c.
  */
+
+#define pr_fmt(fmt) fmt
+
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -53,7 +56,7 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
 
 static char *torture_type;
-static bool verbose;
+static int verbose;
 
 /* Mediate rmmod and system shutdown.  Concurrent rmmod & shutdown illegal! */
 #define FULLSTOP_DONTSTOP 0    /* Normal operation. */
@@ -98,7 +101,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
        if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
                return false;
 
-       if (verbose)
+       if (verbose > 1)
                pr_alert("%s" TORTURE_FLAG
                         "torture_onoff task: offlining %d\n",
                         torture_type, cpu);
@@ -111,7 +114,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
                                 "torture_onoff task: offline %d failed: errno %d\n",
                                 torture_type, cpu, ret);
        } else {
-               if (verbose)
+               if (verbose > 1)
                        pr_alert("%s" TORTURE_FLAG
                                 "torture_onoff task: offlined %d\n",
                                 torture_type, cpu);
@@ -147,7 +150,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
        if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
                return false;
 
-       if (verbose)
+       if (verbose > 1)
                pr_alert("%s" TORTURE_FLAG
                         "torture_onoff task: onlining %d\n",
                         torture_type, cpu);
@@ -160,7 +163,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
                                 "torture_onoff task: online %d failed: errno %d\n",
                                 torture_type, cpu, ret);
        } else {
-               if (verbose)
+               if (verbose > 1)
                        pr_alert("%s" TORTURE_FLAG
                                 "torture_onoff task: onlined %d\n",
                                 torture_type, cpu);
@@ -647,7 +650,7 @@ static void torture_stutter_cleanup(void)
  * The runnable parameter points to a flag that controls whether or not
  * the test is currently runnable.  If there is no such flag, pass in NULL.
  */
-bool torture_init_begin(char *ttype, bool v)
+bool torture_init_begin(char *ttype, int v)
 {
        mutex_lock(&fullstop_mutex);
        if (torture_type != NULL) {
index 19d42ea75ec225d385734e5212680df0ef9609c6..98fa559ebd808698ecc3774bf88c5bdea1988f27 100644 (file)
@@ -1,9 +1,6 @@
 config ARCH_HAS_UBSAN_SANITIZE_ALL
        bool
 
-config ARCH_WANTS_UBSAN_NO_NULL
-       def_bool n
-
 config UBSAN
        bool "Undefined behaviour sanity checker"
        help
@@ -39,14 +36,6 @@ config UBSAN_ALIGNMENT
          Enabling this option on architectures that support unaligned
          accesses may produce a lot of false positives.
 
-config UBSAN_NULL
-       bool "Enable checking of null pointers"
-       depends on UBSAN
-       default y if !ARCH_WANTS_UBSAN_NO_NULL
-       help
-         This option enables detection of memory accesses via a
-         null pointer.
-
 config TEST_UBSAN
        tristate "Module for testing for undefined behavior detection"
        depends on m && UBSAN
index 994be4805ceca93dd417baf2289eff8f5446c4f5..70935ed9112599c3517829c0f6dea0de6d2435e4 100644 (file)
@@ -360,9 +360,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
 
        limit++;
        if (is_on_stack)
-               pr_warn("object is on stack, but not annotated\n");
+               pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
+                        task_stack_page(current));
        else
-               pr_warn("object is not on stack, but annotated\n");
+               pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
+                        task_stack_page(current));
+
        WARN_ON(1);
 }
 
@@ -1185,8 +1188,7 @@ void __init debug_objects_mem_init(void)
 
        if (!obj_cache || debug_objects_replace_static_objects()) {
                debug_objects_enabled = 0;
-               if (obj_cache)
-                       kmem_cache_destroy(obj_cache);
+               kmem_cache_destroy(obj_cache);
                pr_warn("out of memory.\n");
        } else
                debug_objects_selftest();
index dab1511294add14ba1290ded1dcd65408e274a4c..c5e87a3a82ba340254ff66b446c9b853fc211dde 100644 (file)
@@ -4395,6 +4395,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
                return -EINVAL;
 
        maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
+       if (!maddr)
+               return -ENOMEM;
+
        if (write)
                memcpy_toio(maddr + offset, buf, len);
        else
index 2b75df469220a8a5bf3ffa09aa6616eef0a9002d..842a9c7c73a3f6ef7568e105d38c276547ea29ef 100644 (file)
@@ -229,14 +229,16 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
        struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
        u32 cwnd = hc->tx_cwnd, restart_cwnd,
            iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
+       s32 delta = now - hc->tx_lsndtime;
 
        hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
 
        /* don't reduce cwnd below the initial window (IW) */
        restart_cwnd = min(cwnd, iwnd);
-       cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto;
-       hc->tx_cwnd = max(cwnd, restart_cwnd);
 
+       while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
+               cwnd >>= 1;
+       hc->tx_cwnd = max(cwnd, restart_cwnd);
        hc->tx_cwnd_stamp = now;
        hc->tx_cwnd_used  = 0;
 
index 732369c80644a5eb056a1abd2d7de609082ed059..9864bcd3d317f49f3c676718f5b3c302923cc713 100644 (file)
@@ -639,7 +639,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
        int ret;
 
        /* Port's PHY and MAC both need to be EEE capable */
-       if (!dev->phydev)
+       if (!dev->phydev && !dp->pl)
                return -ENODEV;
 
        if (!ds->ops->set_mac_eee)
@@ -659,7 +659,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
        int ret;
 
        /* Port's PHY and MAC both need to be EEE capable */
-       if (!dev->phydev)
+       if (!dev->phydev && !dp->pl)
                return -ENODEV;
 
        if (!ds->ops->get_mac_eee)
index 00e138a44cbba2e7c03cb003f5823b42e18a923a..1cc9650af9fbc0bf3f49d2bd4258f9b3d1e5790c 100644 (file)
@@ -1133,12 +1133,8 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
                max_headroom += 8;
                mtu -= 8;
        }
-       if (skb->protocol == htons(ETH_P_IPV6)) {
-               if (mtu < IPV6_MIN_MTU)
-                       mtu = IPV6_MIN_MTU;
-       } else if (mtu < 576) {
-               mtu = 576;
-       }
+       mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
+                      IPV6_MIN_MTU : IPV4_MIN_MTU);
 
        skb_dst_update_pmtu(skb, mtu);
        if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
index ec18b3ce8b6d8fc84c9bdb828448b9da88b3ed48..7208c16302f61adc15636f6a332ff0c02325cfcf 100644 (file)
@@ -978,10 +978,6 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
        rt->rt6i_flags &= ~RTF_EXPIRES;
        rcu_assign_pointer(rt->from, from);
        dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
-       if (from->fib6_metrics != &dst_default_metrics) {
-               rt->dst._metrics |= DST_METRICS_REFCOUNTED;
-               refcount_inc(&from->fib6_metrics->refcnt);
-       }
 }
 
 /* Caller must already hold reference to @ort */
index 89041260784c0871195556074f7138eee01edb33..260b3dc1b4a2ab4545982b0b36478d6a6cf0b71f 100644 (file)
@@ -73,8 +73,8 @@ struct llc_sap *llc_sap_find(unsigned char sap_value)
 
        rcu_read_lock_bh();
        sap = __llc_sap_find(sap_value);
-       if (sap)
-               llc_sap_hold(sap);
+       if (!sap || !llc_sap_hold_safe(sap))
+               sap = NULL;
        rcu_read_unlock_bh();
        return sap;
 }
index 9b27d0cd766d560fdb67ee2e3bbfc415963db8c6..e6445d8f3f57fee1c39de269724294b4455d8013 100644 (file)
@@ -4226,6 +4226,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        }
 
        if (req->tp_block_nr) {
+               unsigned int min_frame_size;
+
                /* Sanity tests and some calculations */
                err = -EBUSY;
                if (unlikely(rb->pg_vec))
@@ -4248,12 +4250,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                        goto out;
                if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
                        goto out;
+               min_frame_size = po->tp_hdrlen + po->tp_reserve;
                if (po->tp_version >= TPACKET_V3 &&
-                   req->tp_block_size <=
-                   BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr))
+                   req->tp_block_size <
+                   BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
                        goto out;
-               if (unlikely(req->tp_frame_size < po->tp_hdrlen +
-                                       po->tp_reserve))
+               if (unlikely(req->tp_frame_size < min_frame_size))
                        goto out;
                if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
                        goto out;
index 5fb7d3254d9e290106dda6566ed393eb6a2c3696..707630ab47133406f76cdd8bf76fe792bf2fa41f 100644 (file)
@@ -104,9 +104,9 @@ struct rxrpc_net {
 
 #define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */
        u8                      peer_keepalive_cursor;
-       ktime_t                 peer_keepalive_base;
-       struct hlist_head       peer_keepalive[RXRPC_KEEPALIVE_TIME + 1];
-       struct hlist_head       peer_keepalive_new;
+       time64_t                peer_keepalive_base;
+       struct list_head        peer_keepalive[32];
+       struct list_head        peer_keepalive_new;
        struct timer_list       peer_keepalive_timer;
        struct work_struct      peer_keepalive_work;
 };
@@ -295,7 +295,7 @@ struct rxrpc_peer {
        struct hlist_head       error_targets;  /* targets for net error distribution */
        struct work_struct      error_distributor;
        struct rb_root          service_conns;  /* Service connections */
-       struct hlist_node       keepalive_link; /* Link in net->peer_keepalive[] */
+       struct list_head        keepalive_link; /* Link in net->peer_keepalive[] */
        time64_t                last_tx_at;     /* Last time packet sent here */
        seqlock_t               service_conn_lock;
        spinlock_t              lock;           /* access lock */
index 8229a52c2acd79f69883e27e966fa4df43ca93c3..3fde001fcc392d9db372c4fa60a80ddf60e55d9a 100644 (file)
@@ -136,7 +136,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        }
 
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len);
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        if (ret < 0)
                trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
                                    rxrpc_tx_fail_call_final_resend);
@@ -245,7 +245,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
                return -EAGAIN;
        }
 
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
 
        _leave(" = 0");
        return 0;
index 5d6a773db973180c646e61ddea9598dca6ff453a..417d80867c4f4a013465b3f9c22be04e49082b2f 100644 (file)
@@ -85,12 +85,12 @@ static __net_init int rxrpc_init_net(struct net *net)
        hash_init(rxnet->peer_hash);
        spin_lock_init(&rxnet->peer_hash_lock);
        for (i = 0; i < ARRAY_SIZE(rxnet->peer_keepalive); i++)
-               INIT_HLIST_HEAD(&rxnet->peer_keepalive[i]);
-       INIT_HLIST_HEAD(&rxnet->peer_keepalive_new);
+               INIT_LIST_HEAD(&rxnet->peer_keepalive[i]);
+       INIT_LIST_HEAD(&rxnet->peer_keepalive_new);
        timer_setup(&rxnet->peer_keepalive_timer,
                    rxrpc_peer_keepalive_timeout, 0);
        INIT_WORK(&rxnet->peer_keepalive_work, rxrpc_peer_keepalive_worker);
-       rxnet->peer_keepalive_base = ktime_add(ktime_get_real(), NSEC_PER_SEC);
+       rxnet->peer_keepalive_base = ktime_get_seconds();
 
        ret = -ENOMEM;
        rxnet->proc_net = proc_net_mkdir(net, "rxrpc", net->proc_net);
index f03de1c59ba37678f36f3a5c0778f3f3f9274757..4774c8f5634d95c647d1fbdc938dece00bc38ea2 100644 (file)
@@ -209,7 +209,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        now = ktime_get_real();
        if (ping)
                call->ping_time = now;
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        if (ret < 0)
                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
                                    rxrpc_tx_fail_call_ack);
@@ -296,7 +296,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
 
        ret = kernel_sendmsg(conn->params.local->socket,
                             &msg, iov, 1, sizeof(pkt));
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        if (ret < 0)
                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
                                    rxrpc_tx_fail_call_abort);
@@ -391,7 +391,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
         *     message and update the peer record
         */
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
 
        up_read(&conn->params.local->defrag_sem);
        if (ret < 0)
@@ -457,7 +457,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
                if (ret == 0) {
                        ret = kernel_sendmsg(conn->params.local->socket, &msg,
                                             iov, 2, len);
-                       conn->params.peer->last_tx_at = ktime_get_real();
+                       conn->params.peer->last_tx_at = ktime_get_seconds();
 
                        opt = IP_PMTUDISC_DO;
                        kernel_setsockopt(conn->params.local->socket, SOL_IP,
@@ -475,7 +475,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
                if (ret == 0) {
                        ret = kernel_sendmsg(conn->params.local->socket, &msg,
                                             iov, 2, len);
-                       conn->params.peer->last_tx_at = ktime_get_real();
+                       conn->params.peer->last_tx_at = ktime_get_seconds();
 
                        opt = IPV6_PMTUDISC_DO;
                        kernel_setsockopt(conn->params.local->socket,
@@ -599,6 +599,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
                trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
                                    rxrpc_tx_fail_version_keepalive);
 
-       peer->last_tx_at = ktime_get_real();
+       peer->last_tx_at = ktime_get_seconds();
        _leave("");
 }
index 0ed8b651cec293e121e40cf05282bddc8c3f1171..4f9da2f51c694c3f93d3883476057377664b80e7 100644 (file)
@@ -350,97 +350,117 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
 }
 
 /*
- * Perform keep-alive pings with VERSION packets to keep any NAT alive.
+ * Perform keep-alive pings.
  */
-void rxrpc_peer_keepalive_worker(struct work_struct *work)
+static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
+                                         struct list_head *collector,
+                                         time64_t base,
+                                         u8 cursor)
 {
-       struct rxrpc_net *rxnet =
-               container_of(work, struct rxrpc_net, peer_keepalive_work);
        struct rxrpc_peer *peer;
-       unsigned long delay;
-       ktime_t base, now = ktime_get_real();
-       s64 diff;
-       u8 cursor, slot;
+       const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
+       time64_t keepalive_at;
+       int slot;
 
-       base = rxnet->peer_keepalive_base;
-       cursor = rxnet->peer_keepalive_cursor;
+       spin_lock_bh(&rxnet->peer_hash_lock);
 
-       _enter("%u,%lld", cursor, ktime_sub(now, base));
+       while (!list_empty(collector)) {
+               peer = list_entry(collector->next,
+                                 struct rxrpc_peer, keepalive_link);
 
-next_bucket:
-       diff = ktime_to_ns(ktime_sub(now, base));
-       if (diff < 0)
-               goto resched;
+               list_del_init(&peer->keepalive_link);
+               if (!rxrpc_get_peer_maybe(peer))
+                       continue;
 
-       _debug("at %u", cursor);
-       spin_lock_bh(&rxnet->peer_hash_lock);
-next_peer:
-       if (!rxnet->live) {
                spin_unlock_bh(&rxnet->peer_hash_lock);
-               goto out;
-       }
 
-       /* Everything in the bucket at the cursor is processed this second; the
-        * bucket at cursor + 1 goes now + 1s and so on...
-        */
-       if (hlist_empty(&rxnet->peer_keepalive[cursor])) {
-               if (hlist_empty(&rxnet->peer_keepalive_new)) {
-                       spin_unlock_bh(&rxnet->peer_hash_lock);
-                       goto emptied_bucket;
+               keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
+               slot = keepalive_at - base;
+               _debug("%02x peer %u t=%d {%pISp}",
+                      cursor, peer->debug_id, slot, &peer->srx.transport);
+
+               if (keepalive_at <= base ||
+                   keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
+                       rxrpc_send_keepalive(peer);
+                       slot = RXRPC_KEEPALIVE_TIME;
                }
 
-               hlist_move_list(&rxnet->peer_keepalive_new,
-                               &rxnet->peer_keepalive[cursor]);
+               /* A transmission to this peer occurred since last we examined
+                * it so put it into the appropriate future bucket.
+                */
+               slot += cursor;
+               slot &= mask;
+               spin_lock_bh(&rxnet->peer_hash_lock);
+               list_add_tail(&peer->keepalive_link,
+                             &rxnet->peer_keepalive[slot & mask]);
+               rxrpc_put_peer(peer);
        }
 
-       peer = hlist_entry(rxnet->peer_keepalive[cursor].first,
-                          struct rxrpc_peer, keepalive_link);
-       hlist_del_init(&peer->keepalive_link);
-       if (!rxrpc_get_peer_maybe(peer))
-               goto next_peer;
-
        spin_unlock_bh(&rxnet->peer_hash_lock);
+}
 
-       _debug("peer %u {%pISp}", peer->debug_id, &peer->srx.transport);
+/*
+ * Perform keep-alive pings with VERSION packets to keep any NAT alive.
+ */
+void rxrpc_peer_keepalive_worker(struct work_struct *work)
+{
+       struct rxrpc_net *rxnet =
+               container_of(work, struct rxrpc_net, peer_keepalive_work);
+       const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
+       time64_t base, now, delay;
+       u8 cursor, stop;
+       LIST_HEAD(collector);
 
-recalc:
-       diff = ktime_divns(ktime_sub(peer->last_tx_at, base), NSEC_PER_SEC);
-       if (diff < -30 || diff > 30)
-               goto send; /* LSW of 64-bit time probably wrapped on 32-bit */
-       diff += RXRPC_KEEPALIVE_TIME - 1;
-       if (diff < 0)
-               goto send;
+       now = ktime_get_seconds();
+       base = rxnet->peer_keepalive_base;
+       cursor = rxnet->peer_keepalive_cursor;
+       _enter("%lld,%u", base - now, cursor);
 
-       slot = (diff > RXRPC_KEEPALIVE_TIME - 1) ? RXRPC_KEEPALIVE_TIME - 1 : diff;
-       if (slot == 0)
-               goto send;
+       if (!rxnet->live)
+               return;
 
-       /* A transmission to this peer occurred since last we examined it so
-        * put it into the appropriate future bucket.
+       /* Remove to a temporary list all the peers that are currently lodged
+        * in expired buckets plus all new peers.
+        *
+        * Everything in the bucket at the cursor is processed this
+        * second; the bucket at cursor + 1 goes at now + 1s and so
+        * on...
         */
-       slot = (slot + cursor) % ARRAY_SIZE(rxnet->peer_keepalive);
        spin_lock_bh(&rxnet->peer_hash_lock);
-       hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive[slot]);
-       rxrpc_put_peer(peer);
-       goto next_peer;
-
-send:
-       rxrpc_send_keepalive(peer);
-       now = ktime_get_real();
-       goto recalc;
+       list_splice_init(&rxnet->peer_keepalive_new, &collector);
+
+       stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
+       while (base <= now && (s8)(cursor - stop) < 0) {
+               list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
+                                     &collector);
+               base++;
+               cursor++;
+       }
 
-emptied_bucket:
-       cursor++;
-       if (cursor >= ARRAY_SIZE(rxnet->peer_keepalive))
-               cursor = 0;
-       base = ktime_add_ns(base, NSEC_PER_SEC);
-       goto next_bucket;
+       base = now;
+       spin_unlock_bh(&rxnet->peer_hash_lock);
 
-resched:
        rxnet->peer_keepalive_base = base;
        rxnet->peer_keepalive_cursor = cursor;
-       delay = nsecs_to_jiffies(-diff) + 1;
-       timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
-out:
+       rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
+       ASSERT(list_empty(&collector));
+
+       /* Schedule the timer for the next occupied timeslot. */
+       cursor = rxnet->peer_keepalive_cursor;
+       stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
+       for (; (s8)(cursor - stop) < 0; cursor++) {
+               if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
+                       break;
+               base++;
+       }
+
+       now = ktime_get_seconds();
+       delay = base - now;
+       if (delay < 1)
+               delay = 1;
+       delay *= HZ;
+       if (rxnet->live)
+               timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
+
        _leave("");
 }
index 1b7e8107b3ae8a144bc72d01f6e1277368b3cbad..24ec7cdcf332577180a0f59116d4fed140350395 100644 (file)
@@ -322,7 +322,7 @@ struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
        if (!peer) {
                peer = prealloc;
                hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
-               hlist_add_head(&peer->keepalive_link, &rxnet->peer_keepalive_new);
+               list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
        }
 
        spin_unlock(&rxnet->peer_hash_lock);
@@ -367,8 +367,8 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
                if (!peer) {
                        hash_add_rcu(rxnet->peer_hash,
                                     &candidate->hash_link, hash_key);
-                       hlist_add_head(&candidate->keepalive_link,
-                                      &rxnet->peer_keepalive_new);
+                       list_add_tail(&candidate->keepalive_link,
+                                     &rxnet->peer_keepalive_new);
                }
 
                spin_unlock_bh(&rxnet->peer_hash_lock);
@@ -441,7 +441,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
 
        spin_lock_bh(&rxnet->peer_hash_lock);
        hash_del_rcu(&peer->hash_link);
-       hlist_del_init(&peer->keepalive_link);
+       list_del_init(&peer->keepalive_link);
        spin_unlock_bh(&rxnet->peer_hash_lock);
 
        kfree_rcu(peer, rcu);
index 278ac0807a60a8664bbb825ccd06737a595d8631..47cb019c521a8ca01ead3d47e25a0474ae6282b1 100644 (file)
@@ -669,7 +669,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
                return -EAGAIN;
        }
 
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        _leave(" = 0");
        return 0;
 }
@@ -725,7 +725,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
                return -EAGAIN;
        }
 
-       conn->params.peer->last_tx_at = ktime_get_real();
+       conn->params.peer->last_tx_at = ktime_get_seconds();
        _leave(" = 0");
        return 0;
 }
index 05e4ffe5aabde6baa711b1396484cf037fbccaee..e7de5f282722d5dd41b77affc6bea0a490e3969d 100644 (file)
@@ -1122,6 +1122,8 @@ static void smc_tcp_listen_work(struct work_struct *work)
                sock_hold(lsk); /* sock_put in smc_listen_work */
                INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
                smc_copy_sock_settings_to_smc(new_smc);
+               new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
+               new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
                sock_hold(&new_smc->sk); /* sock_put in passive closing */
                if (!schedule_work(&new_smc->smc_listen_work))
                        sock_put(&new_smc->sk);
@@ -1397,8 +1399,7 @@ static int smc_shutdown(struct socket *sock, int how)
        lock_sock(sk);
 
        rc = -ENOTCONN;
-       if ((sk->sk_state != SMC_LISTEN) &&
-           (sk->sk_state != SMC_ACTIVE) &&
+       if ((sk->sk_state != SMC_ACTIVE) &&
            (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
            (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
            (sk->sk_state != SMC_APPCLOSEWAIT1) &&
@@ -1521,12 +1522,16 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
 
        smc = smc_sk(sock->sk);
        conn = &smc->conn;
+       lock_sock(&smc->sk);
        if (smc->use_fallback) {
-               if (!smc->clcsock)
+               if (!smc->clcsock) {
+                       release_sock(&smc->sk);
                        return -EBADF;
-               return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
+               }
+               answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
+               release_sock(&smc->sk);
+               return answ;
        }
-       lock_sock(&smc->sk);
        switch (cmd) {
        case SIOCINQ: /* same as FIONREAD */
                if (smc->sk.sk_state == SMC_LISTEN) {
index a7f6964c3a4b725a7cd06411dc1b5f3d48df778d..62199cf5a56c04db99af54dad9fc2564df8d6b05 100644 (file)
@@ -123,15 +123,13 @@ void tipc_net_finalize(struct net *net, u32 addr)
 {
        struct tipc_net *tn = tipc_net(net);
 
-       spin_lock_bh(&tn->node_list_lock);
-       if (!tipc_own_addr(net)) {
+       if (!cmpxchg(&tn->node_addr, 0, addr)) {
                tipc_set_node_addr(net, addr);
                tipc_named_reinit(net);
                tipc_sk_reinit(net);
                tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
                                     TIPC_CLUSTER_SCOPE, 0, addr);
        }
-       spin_unlock_bh(&tn->node_list_lock);
 }
 
 void tipc_net_stop(struct net *net)
index c1076c19b8580688ff041f71aee0d05ce0906030..ab27a2872935774d41fb1f2c2f9341eb67c8cc0a 100644 (file)
@@ -451,14 +451,14 @@ static int vsock_send_shutdown(struct sock *sk, int mode)
        return transport->shutdown(vsock_sk(sk), mode);
 }
 
-void vsock_pending_work(struct work_struct *work)
+static void vsock_pending_work(struct work_struct *work)
 {
        struct sock *sk;
        struct sock *listener;
        struct vsock_sock *vsk;
        bool cleanup;
 
-       vsk = container_of(work, struct vsock_sock, dwork.work);
+       vsk = container_of(work, struct vsock_sock, pending_work.work);
        sk = sk_vsock(vsk);
        listener = vsk->listener;
        cleanup = true;
@@ -498,7 +498,6 @@ void vsock_pending_work(struct work_struct *work)
        sock_put(sk);
        sock_put(listener);
 }
-EXPORT_SYMBOL_GPL(vsock_pending_work);
 
 /**** SOCKET OPERATIONS ****/
 
@@ -597,6 +596,8 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
        return retval;
 }
 
+static void vsock_connect_timeout(struct work_struct *work);
+
 struct sock *__vsock_create(struct net *net,
                            struct socket *sock,
                            struct sock *parent,
@@ -638,6 +639,8 @@ struct sock *__vsock_create(struct net *net,
        vsk->sent_request = false;
        vsk->ignore_connecting_rst = false;
        vsk->peer_shutdown = 0;
+       INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
+       INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
 
        psk = parent ? vsock_sk(parent) : NULL;
        if (parent) {
@@ -1117,7 +1120,7 @@ static void vsock_connect_timeout(struct work_struct *work)
        struct vsock_sock *vsk;
        int cancel = 0;
 
-       vsk = container_of(work, struct vsock_sock, dwork.work);
+       vsk = container_of(work, struct vsock_sock, connect_work.work);
        sk = sk_vsock(vsk);
 
        lock_sock(sk);
@@ -1221,9 +1224,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
                         * timeout fires.
                         */
                        sock_hold(sk);
-                       INIT_DELAYED_WORK(&vsk->dwork,
-                                         vsock_connect_timeout);
-                       schedule_delayed_work(&vsk->dwork, timeout);
+                       schedule_delayed_work(&vsk->connect_work, timeout);
 
                        /* Skip ahead to preserve error code set above. */
                        goto out_wait;
index a7a73ffe675b2a9e829a375e90fc3d055386dcf8..cb332adb84cdcadc006de6d7a8668111babd2f53 100644 (file)
@@ -1094,8 +1094,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
        vpending->listener = sk;
        sock_hold(sk);
        sock_hold(pending);
-       INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work);
-       schedule_delayed_work(&vpending->dwork, HZ);
+       schedule_delayed_work(&vpending->pending_work, HZ);
 
 out:
        return err;
index 303e9e7161f3169ebcad88974a0d549f01e7db30..4938dcbaecbfbb7414a824dc5b9f033ff72643e1 100644 (file)
@@ -14,7 +14,7 @@
 #include <uapi/linux/bpf.h>
 #include "bpf_helpers.h"
 
-#define MAX_CPUS 12 /* WARNING - sync with _user.c */
+#define MAX_CPUS 64 /* WARNING - sync with _user.c */
 
 /* Special map type that can XDP_REDIRECT frames to another CPU */
 struct bpf_map_def SEC("maps") cpu_map = {
index f6efaefd485b15929b04bf23346ef21934342d52..4b4d78fffe30427d26ee28a338ff6749dccb7448 100644 (file)
@@ -19,7 +19,7 @@ static const char *__doc__ =
 #include <arpa/inet.h>
 #include <linux/if_link.h>
 
-#define MAX_CPUS 12 /* WARNING - sync with _kern.c */
+#define MAX_CPUS 64 /* WARNING - sync with _kern.c */
 
 /* How many xdp_progs are defined in _kern.c */
 #define MAX_PROG 5
@@ -527,7 +527,7 @@ static void stress_cpumap(void)
         * procedure.
         */
        create_cpu_entry(1,  1024, 0, false);
-       create_cpu_entry(1,   128, 0, false);
+       create_cpu_entry(1,     8, 0, false);
        create_cpu_entry(1, 16000, 0, false);
 }
 
index b593b36ccff86910644c63346af8e497346ebab4..38b2b4818e8ebfbe9d19deb0aebef5fdbcc19d28 100644 (file)
@@ -14,10 +14,6 @@ ifdef CONFIG_UBSAN_ALIGNMENT
       CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment)
 endif
 
-ifdef CONFIG_UBSAN_NULL
-      CFLAGS_UBSAN += $(call cc-option, -fsanitize=null)
-endif
-
       # -fsanitize=* options makes GCC less smart than usual and
       # increase number of 'maybe-uninitialized false-positives
       CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized)
index 2d270c560df3982edc105fdb944d738be3c03024..c36a3a76986a5ee17ad51a876af10ddb06492fba 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: LGPL-2.1
 /* Copyright (c) 2018 Facebook */
 
 #include <stdlib.h>
index e2a09a155f84faf7031b93faf37003060ecbd455..caac3a404dc54221b46b2c4feb373801c311bddf 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: LGPL-2.1 */
 /* Copyright (c) 2018 Facebook */
 
 #ifndef __BPF_BTF_H
index 9e78df207919366fbdd048f7645568ac701ad5f5..0c7d9e556b47d0a8129586a634a423e4ad1af067 100644 (file)
@@ -354,7 +354,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
                while (s->bytes_recvd < total_bytes) {
                        if (txmsg_cork) {
                                timeout.tv_sec = 0;
-                               timeout.tv_usec = 1000;
+                               timeout.tv_usec = 300000;
                        } else {
                                timeout.tv_sec = 1;
                                timeout.tv_usec = 0;
index c15f270e121d61af0e80b19f8a69d47fe68e88de..65541c21a5444abbad5face676e223c0934de39e 100755 (executable)
@@ -1,6 +1,6 @@
 #!/bin/bash
 #
-# Usage: configinit.sh config-spec-file [ build output dir ]
+# Usage: configinit.sh config-spec-file build-output-dir results-dir
 #
 # Create a .config file from the spec file.  Run from the kernel source tree.
 # Exits with 0 if all went well, with 1 if all went well but the config
@@ -40,20 +40,18 @@ mkdir $T
 
 c=$1
 buildloc=$2
+resdir=$3
 builddir=
-if test -n $buildloc
+if echo $buildloc | grep -q '^O='
 then
-       if echo $buildloc | grep -q '^O='
+       builddir=`echo $buildloc | sed -e 's/^O=//'`
+       if test ! -d $builddir
        then
-               builddir=`echo $buildloc | sed -e 's/^O=//'`
-               if test ! -d $builddir
-               then
-                       mkdir $builddir
-               fi
-       else
-               echo Bad build directory: \"$buildloc\"
-               exit 2
+               mkdir $builddir
        fi
+else
+       echo Bad build directory: \"$buildloc\"
+       exit 2
 fi
 
 sed -e 's/^\(CONFIG[0-9A-Z_]*\)=.*$/grep -v "^# \1" |/' < $c > $T/u.sh
@@ -61,12 +59,12 @@ sed -e 's/^\(CONFIG[0-9A-Z_]*=\).*$/grep -v \1 |/' < $c >> $T/u.sh
 grep '^grep' < $T/u.sh > $T/upd.sh
 echo "cat - $c" >> $T/upd.sh
 make mrproper
-make $buildloc distclean > $builddir/Make.distclean 2>&1
-make $buildloc $TORTURE_DEFCONFIG > $builddir/Make.defconfig.out 2>&1
+make $buildloc distclean > $resdir/Make.distclean 2>&1
+make $buildloc $TORTURE_DEFCONFIG > $resdir/Make.defconfig.out 2>&1
 mv $builddir/.config $builddir/.config.sav
 sh $T/upd.sh < $builddir/.config.sav > $builddir/.config
 cp $builddir/.config $builddir/.config.new
-yes '' | make $buildloc oldconfig > $builddir/Make.oldconfig.out 2> $builddir/Make.oldconfig.err
+yes '' | make $buildloc oldconfig > $resdir/Make.oldconfig.out 2> $resdir/Make.oldconfig.err
 
 # verify new config matches specification.
 configcheck.sh $builddir/.config $c
index 34d126734cde9f1dd3109077e5810d491aa88562..9115fcdb5617cdcb7f72dbcf23bdd523f32528e6 100755 (executable)
@@ -2,7 +2,7 @@
 #
 # Build a kvm-ready Linux kernel from the tree in the current directory.
 #
-# Usage: kvm-build.sh config-template build-dir
+# Usage: kvm-build.sh config-template build-dir resdir
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
@@ -29,6 +29,7 @@ then
        exit 1
 fi
 builddir=${2}
+resdir=${3}
 
 T=${TMPDIR-/tmp}/test-linux.sh.$$
 trap 'rm -rf $T' 0
@@ -41,19 +42,19 @@ CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_CONSOLE=y
 ___EOF___
 
-configinit.sh $T/config O=$builddir
+configinit.sh $T/config O=$builddir $resdir
 retval=$?
 if test $retval -gt 1
 then
        exit 2
 fi
 ncpus=`cpus2use.sh`
-make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $builddir/Make.out 2>&1
+make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $resdir/Make.out 2>&1
 retval=$?
-if test $retval -ne 0 || grep "rcu[^/]*": < $builddir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $builddir/Make.out
+if test $retval -ne 0 || grep "rcu[^/]*": < $resdir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $resdir/Make.out
 then
        echo Kernel build error
-       egrep "Stop|Error|error:|warning:" < $builddir/Make.out
+       egrep "Stop|Error|error:|warning:" < $resdir/Make.out
        echo Run aborted.
        exit 3
 fi
index 477ecb1293ab273ba60094c911031603740c44c3..0fa8a61ccb7b254baa29ea8fdf30b0dd28da2246 100755 (executable)
@@ -70,4 +70,5 @@ else
        else
                print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
        fi
+       echo $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i > $i/console.log.rcu.diags
 fi
index c27e97824163e3c48a911455ab295d0d6a4dc00d..c9bab57a77ebafe98d01809f19508c7d589ccf97 100755 (executable)
@@ -39,6 +39,7 @@ do
                        head -1 $resdir/log
                fi
                TORTURE_SUITE="`cat $i/../TORTURE_SUITE`"
+               rm -f $i/console.log.*.diags
                kvm-recheck-${TORTURE_SUITE}.sh $i
                if test -f "$i/console.log"
                then
index c5b0f94341d9a686d16b9eb52a3ecc6e5d990fc0..f7247ee00514d97b368e91338721fd8090b8a98c 100755 (executable)
@@ -98,14 +98,15 @@ then
        ln -s $base_resdir/.config $resdir  # for kvm-recheck.sh
        # Arch-independent indicator
        touch $resdir/builtkernel
-elif kvm-build.sh $T/Kc2 $builddir
+elif kvm-build.sh $T/Kc2 $builddir $resdir
 then
        # Had to build a kernel for this test.
        QEMU="`identify_qemu $builddir/vmlinux`"
        BOOT_IMAGE="`identify_boot_image $QEMU`"
-       cp $builddir/Make*.out $resdir
        cp $builddir/vmlinux $resdir
        cp $builddir/.config $resdir
+       cp $builddir/Module.symvers $resdir > /dev/null || :
+       cp $builddir/System.map $resdir > /dev/null || :
        if test -n "$BOOT_IMAGE"
        then
                cp $builddir/$BOOT_IMAGE $resdir
index 56610dbbdf73ffc6e390015a538df89eb6096830..5a7a62d76a50b91234b82fb1455ffc0d66f637a6 100755 (executable)
@@ -347,7 +347,7 @@ function dump(first, pastlast, batchnum)
        print "needqemurun="
        jn=1
        for (j = first; j < pastlast; j++) {
-               builddir=KVM "/b" jn
+               builddir=KVM "/b1"
                cpusr[jn] = cpus[j];
                if (cfrep[cf[j]] == "") {
                        cfr[jn] = cf[j];
index 17293436f5518008e2619c3dec09217eecd7f4d9..84933f6aed77818d7e368f171ad544141578493f 100755 (executable)
@@ -163,6 +163,13 @@ then
        print_warning Summary: $summary
        cat $T.diags >> $file.diags
 fi
+for i in $file.*.diags
+do
+       if test -f "$i"
+       then
+               cat $i >> $file.diags
+       fi
+done
 if ! test -s $file.diags
 then
        rm -f $file.diags
index 5d2cc0bd50a0963efb0348d264114f60b30d91a8..5c3213cc3ad707feabd47dd691a42cecd5003b4a 100644 (file)
@@ -1,5 +1,5 @@
-rcutorture.onoff_interval=1 rcutorture.onoff_holdoff=30
-rcutree.gp_preinit_delay=3
+rcutorture.onoff_interval=200 rcutorture.onoff_holdoff=30
+rcutree.gp_preinit_delay=12
 rcutree.gp_init_delay=3
 rcutree.gp_cleanup_delay=3
 rcutree.kthread_prio=2
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot
deleted file mode 100644 (file)
index 883149b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-rcutree.rcu_fanout_exact=1
index 24ec910419576879ed48a75e448cebc57c13aece..7bab8246392bb21f982e3803f77860717513a084 100644 (file)
@@ -39,7 +39,7 @@ rcutorture_param_onoff () {
        if ! bootparam_hotplug_cpu "$1" && configfrag_hotplug_cpu "$2"
        then
                echo CPU-hotplug kernel, adding rcutorture onoff. 1>&2
-               echo rcutorture.onoff_interval=3 rcutorture.onoff_holdoff=30
+               echo rcutorture.onoff_interval=1000 rcutorture.onoff_holdoff=30
        fi
 }