};
/*
- * The logbuf_lock protects kmsg buffer, indices, counters. It is also
+ * The printk_logbuf_lock protects kmsg buffer, indices, counters. It is also
* used in interesting ways to provide interlocking in console_unlock();
*/
-static DEFINE_RAW_SPINLOCK(logbuf_lock);
+static DEFINE_RAW_SPINLOCK(printk_logbuf_lock);
#ifdef CONFIG_PRINTK
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static char *printk_log_buf = __printk_log_buf;
static u32 printk_log_buf_len = __PRINTK_LOG_BUF_LEN;
-/* cpu currently holding logbuf_lock */
+/* cpu currently holding printk_logbuf_lock */
static volatile unsigned int logbuf_cpu = UINT_MAX;
/* human readable text of the record */
ret = mutex_lock_interruptible(&user->lock);
if (ret)
return ret;
- raw_spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&printk_logbuf_lock);
while (user->seq == printk_log_next_seq) {
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&printk_logbuf_lock);
goto out;
}
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&printk_logbuf_lock);
ret = wait_event_interruptible(printk_log_wait,
user->seq != printk_log_next_seq);
if (ret)
goto out;
- raw_spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&printk_logbuf_lock);
}
if (user->seq < printk_log_first_seq) {
user->idx = printk_log_first_idx;
user->seq = printk_log_first_seq;
ret = -EPIPE;
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&printk_logbuf_lock);
goto out;
}
user->idx = printk_log_next(user->idx);
user->seq++;
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&printk_logbuf_lock);
if (len > count) {
ret = -EINVAL;
if (offset)
return -ESPIPE;
- raw_spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&printk_logbuf_lock);
switch (whence) {
case SEEK_SET:
/* the first record */
default:
ret = -EINVAL;
}
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&printk_logbuf_lock);
return ret;
}
poll_wait(file, &printk_log_wait, wait);
- raw_spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&printk_logbuf_lock);
if (user->seq < printk_log_next_seq) {
/* return error when data has vanished underneath us */
if (user->seq < printk_log_first_seq)
ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
ret = POLLIN|POLLRDNORM;
}
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&printk_logbuf_lock);
return ret;
}
mutex_init(&user->lock);
- raw_spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&printk_logbuf_lock);
user->idx = printk_log_first_idx;
user->seq = printk_log_first_seq;
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&printk_logbuf_lock);
file->private_data = user;
return 0;
return;
}
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&printk_logbuf_lock, flags);
printk_log_buf_len = new_printk_log_buf_len;
printk_log_buf = new_printk_log_buf;
new_printk_log_buf_len = 0;
free = __PRINTK_LOG_BUF_LEN - printk_log_next_idx;
memcpy(printk_log_buf, __printk_log_buf, __PRINTK_LOG_BUF_LEN);
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&printk_logbuf_lock, flags);
pr_info("printk_log_buf_len: %d\n", printk_log_buf_len);
pr_info("early log buf free: %d(%d%%)\n",
size_t n;
size_t skip;
- raw_spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&printk_logbuf_lock);
if (syslog_seq < printk_log_first_seq) {
/* messages are gone, move to first one */
syslog_seq = printk_log_first_seq;
syslog_partial = 0;
}
if (syslog_seq == printk_log_next_seq) {
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&printk_logbuf_lock);
break;
}
syslog_partial += n;
} else
n = 0;
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&printk_logbuf_lock);
if (!n)
break;
if (!text)
return -ENOMEM;
- raw_spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&printk_logbuf_lock);
if (buf) {
u64 next_seq;
u64 seq;
seq++;
prev = msg->flags;
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&printk_logbuf_lock);
if (copy_to_user(buf + len, text, textlen))
len = -EFAULT;
else
len += textlen;
- raw_spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&printk_logbuf_lock);
if (seq < printk_log_first_seq) {
/* messages are gone, move to next one */
clear_seq = printk_log_next_seq;
clear_idx = printk_log_next_idx;
}
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&printk_logbuf_lock);
kfree(text);
return len;
break;
/* Number of chars in the log buffer */
case SYSLOG_ACTION_SIZE_UNREAD:
- raw_spin_lock_irq(&logbuf_lock);
+ raw_spin_lock_irq(&printk_logbuf_lock);
if (syslog_seq < printk_log_first_seq) {
/* messages are gone, move to first one */
syslog_seq = printk_log_first_seq;
}
error -= syslog_partial;
}
- raw_spin_unlock_irq(&logbuf_lock);
+ raw_spin_unlock_irq(&printk_logbuf_lock);
break;
/* Size of the log buffer */
case SYSLOG_ACTION_SIZE_BUFFER:
debug_locks_off();
/* If a crash is occurring, make sure we can't deadlock */
- raw_spin_lock_init(&logbuf_lock);
+ raw_spin_lock_init(&printk_logbuf_lock);
/* And make sure that we print immediately */
sema_init(&console_sem, 1);
}
* console_lock held, and 'console_locked' set) if it
* is successful, false otherwise.
*
- * This gets called with the 'logbuf_lock' spinlock held and
+ * This gets called with the 'printk_logbuf_lock' spinlock held and
* interrupts disabled. It should return with 'lockbuf_lock'
* released but interrupts still disabled.
*/
static int console_trylock_for_printk(unsigned int cpu)
- __releases(&logbuf_lock)
+ __releases(&printk_logbuf_lock)
{
int retval = 0, wake = 0;
logbuf_cpu = UINT_MAX;
if (wake)
up(&console_sem);
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock(&printk_logbuf_lock);
return retval;
}
}
lockdep_off();
- raw_spin_lock(&logbuf_lock);
+ raw_spin_lock(&printk_logbuf_lock);
logbuf_cpu = this_cpu;
if (recursion_bug) {
* The release will print out buffers and wake up /dev/kmsg and syslog()
* users.
*
- * The console_trylock_for_printk() function will release 'logbuf_lock'
+ * The console_trylock_for_printk() function will release 'printk_logbuf_lock'
* regardless of whether it actually gets the console semaphore or not.
*/
if (console_trylock_for_printk(this_cpu))
unsigned long flags;
size_t len;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&printk_logbuf_lock, flags);
if (!cont.len)
goto out;
goto out;
len = cont_print_text(text, size);
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock(&printk_logbuf_lock);
stop_critical_timings();
call_console_drivers(cont.level, text, len);
start_critical_timings();
local_irq_restore(flags);
return;
out:
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&printk_logbuf_lock, flags);
}
/**
size_t len;
int level;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&printk_logbuf_lock, flags);
if (seen_seq != printk_log_next_seq) {
wake_klogd = true;
seen_seq = printk_log_next_seq;
console_idx = printk_log_next(console_idx);
console_seq++;
console_prev = msg->flags;
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock(&printk_logbuf_lock);
stop_critical_timings(); /* don't trace print latency */
call_console_drivers(level, text, len);
if (unlikely(exclusive_console))
exclusive_console = NULL;
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock(&printk_logbuf_lock);
up(&console_sem);
* there's a new owner and the console_unlock() from them will do the
* flush, no worries.
*/
- raw_spin_lock(&logbuf_lock);
+ raw_spin_lock(&printk_logbuf_lock);
retry = console_seq != printk_log_next_seq;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&printk_logbuf_lock, flags);
if (retry && console_trylock())
goto again;
* console_unlock(); will print out the buffered messages
* for us.
*/
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&printk_logbuf_lock, flags);
console_seq = syslog_seq;
console_idx = syslog_idx;
console_prev = syslog_prev;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&printk_logbuf_lock, flags);
/*
* We're about to replay the log buffer. Only do this to the
* just-registered console to avoid excessive message spam to
/* initialize iterator with data about the stored records */
dumper->active = true;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&printk_logbuf_lock, flags);
dumper->cur_seq = clear_seq;
dumper->cur_idx = clear_idx;
dumper->next_seq = printk_log_next_seq;
dumper->next_idx = printk_log_next_idx;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&printk_logbuf_lock, flags);
/* invoke dumper which will iterate over records */
dumper->dump(dumper, reason);
unsigned long flags;
bool ret;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&printk_logbuf_lock, flags);
ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&printk_logbuf_lock, flags);
return ret;
}
if (!dumper->active)
goto out;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&printk_logbuf_lock, flags);
if (dumper->cur_seq < printk_log_first_seq) {
/* messages are gone, move to first available one */
dumper->cur_seq = printk_log_first_seq;
/* last entry */
if (dumper->cur_seq >= dumper->next_seq) {
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&printk_logbuf_lock, flags);
goto out;
}
dumper->next_seq = next_seq;
dumper->next_idx = next_idx;
ret = true;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&printk_logbuf_lock, flags);
out:
if (len)
*len = l;
{
unsigned long flags;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
+ raw_spin_lock_irqsave(&printk_logbuf_lock, flags);
kmsg_dump_rewind_nolock(dumper);
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ raw_spin_unlock_irqrestore(&printk_logbuf_lock, flags);
}
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
#endif