unsigned long nmi_dropped;
unsigned long commit_overrun;
unsigned long overrun;
- unsigned long entries;
+ unsigned long read;
+ local_t entries;
u64 write_stamp;
u64 read_stamp;
atomic_t record_disabled;
if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
continue;
cpu_buffer->overrun++;
- cpu_buffer->entries--;
}
}
static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
- cpu_buffer->entries++;
+ local_inc(&cpu_buffer->entries);
/* Only process further if we own the commit */
if (!rb_is_commit(cpu_buffer, event))
* The commit is still visible by the reader, so we
* must increment entries.
*/
- cpu_buffer->entries++;
+ local_inc(&cpu_buffer->entries);
out:
/*
* If a write came in and pushed the tail page
return 0;
cpu_buffer = buffer->buffers[cpu];
- ret = cpu_buffer->entries;
+ ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
+ - cpu_buffer->read;
return ret;
}
/* if you care about this being correct, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
- entries += cpu_buffer->entries;
+ entries += (local_read(&cpu_buffer->entries) -
+ cpu_buffer->overrun) - cpu_buffer->read;
}
return entries;
if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
|| rb_discarded_event(event))
- cpu_buffer->entries--;
+ cpu_buffer->read++;
rb_update_read_stamp(cpu_buffer, event);
cpu_buffer->nmi_dropped = 0;
cpu_buffer->commit_overrun = 0;
cpu_buffer->overrun = 0;
- cpu_buffer->entries = 0;
+ cpu_buffer->read = 0;
+ local_set(&cpu_buffer->entries, 0);
cpu_buffer->write_stamp = 0;
cpu_buffer->read_stamp = 0;
/* Only count data entries */
if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
continue;
- cpu_buffer->entries--;
+ cpu_buffer->read++;
}
__raw_spin_unlock(&cpu_buffer->lock);
}