]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/trace/ring_buffer.c
Merge branch 'for-5.1/libnvdimm' into libnvdimm-for-next
[linux.git] / kernel / trace / ring_buffer.c
index 4f3247a532598efcd62ec3aba09ec129bedd3904..06e864a334bba16789df45fb0c627721d0d65155 100644 (file)
@@ -487,6 +487,10 @@ struct ring_buffer_per_cpu {
        local_t                         dropped_events;
        local_t                         committing;
        local_t                         commits;
+       local_t                         pages_touched;
+       local_t                         pages_read;
+       long                            last_pages_touch;
+       size_t                          shortest_full;
        unsigned long                   read;
        unsigned long                   read_bytes;
        u64                             write_stamp;
@@ -529,6 +533,41 @@ struct ring_buffer_iter {
        u64                             read_stamp;
 };
 
+/**
+ * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
+ * @buffer: The ring_buffer to get the number of pages from
+ * @cpu: The cpu of the ring_buffer to get the number of pages from
+ *
+ * Returns the number of pages used by a per_cpu buffer of the ring buffer.
+ */
+size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
+{
+       return buffer->buffers[cpu]->nr_pages;
+}
+
+/**
+ * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer
+ * @buffer: The ring_buffer to get the number of pages from
+ * @cpu: The cpu of the ring_buffer to get the number of pages from
+ *
+ * Returns the number of pages that have content in the ring buffer.
+ */
+size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
+{
+       size_t read;
+       size_t cnt;
+
+       read = local_read(&buffer->buffers[cpu]->pages_read);
+       cnt = local_read(&buffer->buffers[cpu]->pages_touched);
+       /* The reader can read an empty page, but not more than that */
+       if (cnt < read) {
+               WARN_ON_ONCE(read > cnt + 1);
+               return 0;
+       }
+
+       return cnt - read;
+}
+
 /*
  * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
  *
@@ -556,7 +595,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
  * as data is added to any of the @buffer's cpu buffers. Otherwise
  * it will wait for data to be added to a specific cpu buffer.
  */
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
+int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
 {
        struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
        DEFINE_WAIT(wait);
@@ -571,7 +610,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
        if (cpu == RING_BUFFER_ALL_CPUS) {
                work = &buffer->irq_work;
                /* Full only makes sense on per cpu reads */
-               full = false;
+               full = 0;
        } else {
                if (!cpumask_test_cpu(cpu, buffer->cpumask))
                        return -ENODEV;
@@ -623,15 +662,22 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
                    !ring_buffer_empty_cpu(buffer, cpu)) {
                        unsigned long flags;
                        bool pagebusy;
+                       size_t nr_pages;
+                       size_t dirty;
 
                        if (!full)
                                break;
 
                        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
                        pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+                       nr_pages = cpu_buffer->nr_pages;
+                       dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
+                       if (!cpu_buffer->shortest_full ||
+                           cpu_buffer->shortest_full < full)
+                               cpu_buffer->shortest_full = full;
                        raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
-
-                       if (!pagebusy)
+                       if (!pagebusy &&
+                           (!nr_pages || (dirty * 100) > full * nr_pages))
                                break;
                }
 
@@ -1054,6 +1100,7 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
        old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
        old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
 
+       local_inc(&cpu_buffer->pages_touched);
        /*
         * Just make sure we have seen our old_write and synchronize
         * with any interrupts that come in.
@@ -2586,7 +2633,9 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
 static __always_inline void
 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
 {
-       bool pagebusy;
+       size_t nr_pages;
+       size_t dirty;
+       size_t full;
 
        if (buffer->irq_work.waiters_pending) {
                buffer->irq_work.waiters_pending = false;
@@ -2600,14 +2649,27 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
                irq_work_queue(&cpu_buffer->irq_work.work);
        }
 
-       pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
+       if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
+               return;
 
-       if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
-               cpu_buffer->irq_work.wakeup_full = true;
-               cpu_buffer->irq_work.full_waiters_pending = false;
-               /* irq_work_queue() supplies it's own memory barriers */
-               irq_work_queue(&cpu_buffer->irq_work.work);
-       }
+       if (cpu_buffer->reader_page == cpu_buffer->commit_page)
+               return;
+
+       if (!cpu_buffer->irq_work.full_waiters_pending)
+               return;
+
+       cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
+
+       full = cpu_buffer->shortest_full;
+       nr_pages = cpu_buffer->nr_pages;
+       dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
+       if (full && nr_pages && (dirty * 100) <= full * nr_pages)
+               return;
+
+       cpu_buffer->irq_work.wakeup_full = true;
+       cpu_buffer->irq_work.full_waiters_pending = false;
+       /* irq_work_queue() supplies it's own memory barriers */
+       irq_work_queue(&cpu_buffer->irq_work.work);
 }
 
 /*
@@ -3732,13 +3794,15 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
                goto spin;
 
        /*
-        * Yeah! We succeeded in replacing the page.
+        * Yay! We succeeded in replacing the page.
         *
         * Now make the new head point back to the reader page.
         */
        rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
        rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
 
+       local_inc(&cpu_buffer->pages_read);
+
        /* Finally update the reader page to the new head */
        cpu_buffer->reader_page = reader;
        cpu_buffer->reader_page->read = 0;
@@ -4334,6 +4398,10 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
        local_set(&cpu_buffer->entries, 0);
        local_set(&cpu_buffer->committing, 0);
        local_set(&cpu_buffer->commits, 0);
+       local_set(&cpu_buffer->pages_touched, 0);
+       local_set(&cpu_buffer->pages_read, 0);
+       cpu_buffer->last_pages_touch = 0;
+       cpu_buffer->shortest_full = 0;
        cpu_buffer->read = 0;
        cpu_buffer->read_bytes = 0;