]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/rcu/srcutiny.c
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[linux.git] / kernel / rcu / srcutiny.c
index 36e1f82faed15cb8e3bbfb3b7705bcdf29b5da71..1a1c1047d2edecaba7691d8c6f9fd3851c3c434a 100644 (file)
@@ -38,8 +38,8 @@ static int init_srcu_struct_fields(struct srcu_struct *sp)
        sp->srcu_lock_nesting[0] = 0;
        sp->srcu_lock_nesting[1] = 0;
        init_swait_queue_head(&sp->srcu_wq);
-       sp->srcu_gp_seq = 0;
-       rcu_segcblist_init(&sp->srcu_cblist);
+       sp->srcu_cb_head = NULL;
+       sp->srcu_cb_tail = &sp->srcu_cb_head;
        sp->srcu_gp_running = false;
        sp->srcu_gp_waiting = false;
        sp->srcu_idx = 0;
@@ -88,31 +88,16 @@ void cleanup_srcu_struct(struct srcu_struct *sp)
 {
        WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]);
        flush_work(&sp->srcu_work);
-       WARN_ON(rcu_seq_state(sp->srcu_gp_seq));
        WARN_ON(sp->srcu_gp_running);
        WARN_ON(sp->srcu_gp_waiting);
-       WARN_ON(!rcu_segcblist_empty(&sp->srcu_cblist));
+       WARN_ON(sp->srcu_cb_head);
+       WARN_ON(&sp->srcu_cb_head != sp->srcu_cb_tail);
 }
 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
 
-/*
- * Counts the new reader in the appropriate per-CPU element of the
- * srcu_struct.  Must be called from process context.
- * Returns an index that must be passed to the matching srcu_read_unlock().
- */
-int __srcu_read_lock(struct srcu_struct *sp)
-{
-       int idx;
-
-       idx = READ_ONCE(sp->srcu_idx);
-       WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1);
-       return idx;
-}
-EXPORT_SYMBOL_GPL(__srcu_read_lock);
-
 /*
  * Removes the count for the old reader from the appropriate element of
- * the srcu_struct.  Must be called from process context.
+ * the srcu_struct.
  */
 void __srcu_read_unlock(struct srcu_struct *sp, int idx)
 {
@@ -132,52 +117,44 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 void srcu_drive_gp(struct work_struct *wp)
 {
        int idx;
-       struct rcu_cblist ready_cbs;
-       struct srcu_struct *sp;
+       struct rcu_head *lh;
        struct rcu_head *rhp;
+       struct srcu_struct *sp;
 
        sp = container_of(wp, struct srcu_struct, srcu_work);
-       if (sp->srcu_gp_running || rcu_segcblist_empty(&sp->srcu_cblist))
+       if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
                return; /* Already running or nothing to do. */
 
-       /* Tag recently arrived callbacks and wait for readers. */
+       /* Remove recently arrived callbacks and wait for readers. */
        WRITE_ONCE(sp->srcu_gp_running, true);
-       rcu_segcblist_accelerate(&sp->srcu_cblist,
-                                rcu_seq_snap(&sp->srcu_gp_seq));
-       rcu_seq_start(&sp->srcu_gp_seq);
+       local_irq_disable();
+       lh = sp->srcu_cb_head;
+       sp->srcu_cb_head = NULL;
+       sp->srcu_cb_tail = &sp->srcu_cb_head;
+       local_irq_enable();
        idx = sp->srcu_idx;
        WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
        WRITE_ONCE(sp->srcu_gp_waiting, true);  /* srcu_read_unlock() wakes! */
        swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
        WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
-       rcu_seq_end(&sp->srcu_gp_seq);
-
-       /* Update callback list based on GP, and invoke ready callbacks. */
-       rcu_segcblist_advance(&sp->srcu_cblist,
-                             rcu_seq_current(&sp->srcu_gp_seq));
-       if (rcu_segcblist_ready_cbs(&sp->srcu_cblist)) {
-               rcu_cblist_init(&ready_cbs);
-               local_irq_disable();
-               rcu_segcblist_extract_done_cbs(&sp->srcu_cblist, &ready_cbs);
-               local_irq_enable();
-               rhp = rcu_cblist_dequeue(&ready_cbs);
-               for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
-                       local_bh_disable();
-                       rhp->func(rhp);
-                       local_bh_enable();
-               }
-               local_irq_disable();
-               rcu_segcblist_insert_count(&sp->srcu_cblist, &ready_cbs);
-               local_irq_enable();
+
+       /* Invoke the callbacks we removed above. */
+       while (lh) {
+               rhp = lh;
+               lh = lh->next;
+               local_bh_disable();
+               rhp->func(rhp);
+               local_bh_enable();
        }
-       WRITE_ONCE(sp->srcu_gp_running, false);
 
        /*
-        * If more callbacks, reschedule ourselves.  This can race with
-        * a call_srcu() at interrupt level, but the ->srcu_gp_running
-        * checks will straighten that out.
+        * Enable rescheduling, and if there are more callbacks,
+        * reschedule ourselves.  This can race with a call_srcu()
+        * at interrupt level, but the ->srcu_gp_running checks will
+        * straighten that out.
         */
-       if (!rcu_segcblist_empty(&sp->srcu_cblist))
+       WRITE_ONCE(sp->srcu_gp_running, false);
+       if (READ_ONCE(sp->srcu_cb_head))
                schedule_work(&sp->srcu_work);
 }
 EXPORT_SYMBOL_GPL(srcu_drive_gp);
@@ -186,14 +163,16 @@ EXPORT_SYMBOL_GPL(srcu_drive_gp);
  * Enqueue an SRCU callback on the specified srcu_struct structure,
  * initiating grace-period processing if it is not already running.
  */
-void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
+void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
               rcu_callback_t func)
 {
        unsigned long flags;
 
-       head->func = func;
+       rhp->func = func;
+       rhp->next = NULL;
        local_irq_save(flags);
-       rcu_segcblist_enqueue(&sp->srcu_cblist, head, false);
+       *sp->srcu_cb_tail = rhp;
+       sp->srcu_cb_tail = &rhp->next;
        local_irq_restore(flags);
        if (!READ_ONCE(sp->srcu_gp_running))
                schedule_work(&sp->srcu_work);