struct xlog_ticket *tic;
int need_bytes;
- list_for_each_entry(tic, &log->l_reserveq, t_queue) {
+ list_for_each_entry(tic, &log->l_reserve_head.waiters, t_queue) {
if (tic->t_flags & XLOG_TIC_PERM_RESERV)
need_bytes = tic->t_unit_res * tic->t_cnt;
else
struct xlog_ticket *tic;
int need_bytes;
- list_for_each_entry(tic, &log->l_writeq, t_queue) {
+ list_for_each_entry(tic, &log->l_write_head.waiters, t_queue) {
ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
need_bytes = tic->t_unit_res;
struct xlog_ticket *tic,
int need_bytes)
{
- list_add_tail(&tic->t_queue, &log->l_reserveq);
+ list_add_tail(&tic->t_queue, &log->l_reserve_head.waiters);
do {
if (XLOG_FORCED_SHUTDOWN(log))
xlog_grant_push_ail(log, need_bytes);
__set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock(&log->l_grant_reserve_lock);
+ spin_unlock(&log->l_reserve_head.lock);
XFS_STATS_INC(xs_sleep_logspace);
schedule();
trace_xfs_log_grant_wake(log, tic);
- spin_lock(&log->l_grant_reserve_lock);
+ spin_lock(&log->l_reserve_head.lock);
if (XLOG_FORCED_SHUTDOWN(log))
goto shutdown;
- } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes);
+ } while (xlog_space_left(log, &log->l_reserve_head.grant) < need_bytes);
list_del_init(&tic->t_queue);
return 0;
struct xlog_ticket *tic,
int need_bytes)
{
- list_add_tail(&tic->t_queue, &log->l_writeq);
+ list_add_tail(&tic->t_queue, &log->l_write_head.waiters);
do {
if (XLOG_FORCED_SHUTDOWN(log))
xlog_grant_push_ail(log, need_bytes);
__set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock(&log->l_grant_write_lock);
+ spin_unlock(&log->l_write_head.lock);
XFS_STATS_INC(xs_sleep_logspace);
schedule();
trace_xfs_log_regrant_write_wake(log, tic);
- spin_lock(&log->l_grant_write_lock);
+ spin_lock(&log->l_write_head.lock);
if (XLOG_FORCED_SHUTDOWN(log))
goto shutdown;
- } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes);
+ } while (xlog_space_left(log, &log->l_write_head.grant) < need_bytes);
list_del_init(&tic->t_queue);
return 0;
if (XLOG_FORCED_SHUTDOWN(log))
return;
- if (!list_empty_careful(&log->l_writeq)) {
+ if (!list_empty_careful(&log->l_write_head.waiters)) {
ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
- spin_lock(&log->l_grant_write_lock);
- free_bytes = xlog_space_left(log, &log->l_grant_write_head);
+ spin_lock(&log->l_write_head.lock);
+ free_bytes = xlog_space_left(log, &log->l_write_head.grant);
xlog_writeq_wake(log, &free_bytes);
- spin_unlock(&log->l_grant_write_lock);
+ spin_unlock(&log->l_write_head.lock);
}
- if (!list_empty_careful(&log->l_reserveq)) {
+ if (!list_empty_careful(&log->l_reserve_head.waiters)) {
ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
- spin_lock(&log->l_grant_reserve_lock);
- free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
+ spin_lock(&log->l_reserve_head.lock);
+ free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
xlog_reserveq_wake(log, &free_bytes);
- spin_unlock(&log->l_grant_reserve_lock);
+ spin_unlock(&log->l_reserve_head.lock);
}
}
xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
- xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0);
- xlog_assign_grant_head(&log->l_grant_write_head, 1, 0);
- INIT_LIST_HEAD(&log->l_reserveq);
- INIT_LIST_HEAD(&log->l_writeq);
- spin_lock_init(&log->l_grant_reserve_lock);
- spin_lock_init(&log->l_grant_write_lock);
+ xlog_assign_grant_head(&log->l_reserve_head.grant, 1, 0);
+ xlog_assign_grant_head(&log->l_write_head.grant, 1, 0);
+ INIT_LIST_HEAD(&log->l_reserve_head.waiters);
+ INIT_LIST_HEAD(&log->l_write_head.waiters);
+ spin_lock_init(&log->l_reserve_head.lock);
+ spin_lock_init(&log->l_write_head.lock);
error = EFSCORRUPTED;
if (xfs_sb_version_hassector(&mp->m_sb)) {
ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
- free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
+ free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
free_blocks = BTOBBT(free_bytes);
/*
roundoff < BBTOB(1)));
/* move grant heads by roundoff in sync */
- xlog_grant_add_space(log, &log->l_grant_reserve_head, roundoff);
- xlog_grant_add_space(log, &log->l_grant_write_head, roundoff);
+ xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
+ xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
/* put cycle number in every block */
xlog_pack_data(log, iclog, roundoff);
* path. Hence any lock will be globally hot if we take it unconditionally on
* every pass.
*
- * As tickets are only ever moved on and off the reserveq under the
- * l_grant_reserve_lock, we only need to take that lock if we are going to add
+ * As tickets are only ever moved on and off the l_reserve.waiters under the
+ * l_reserve.lock, we only need to take that lock if we are going to add
* the ticket to the queue and sleep. We can avoid taking the lock if the ticket
* was never added to the reserveq because the t_queue list head will be empty
* and we hold the only reference to it so it can safely be checked unlocked.
need_bytes = tic->t_unit_res;
if (tic->t_flags & XFS_LOG_PERM_RESERV)
need_bytes *= tic->t_ocnt;
- free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
- if (!list_empty_careful(&log->l_reserveq)) {
- spin_lock(&log->l_grant_reserve_lock);
+ free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
+ if (!list_empty_careful(&log->l_reserve_head.waiters)) {
+ spin_lock(&log->l_reserve_head.lock);
if (!xlog_reserveq_wake(log, &free_bytes) ||
free_bytes < need_bytes)
error = xlog_reserveq_wait(log, tic, need_bytes);
- spin_unlock(&log->l_grant_reserve_lock);
+ spin_unlock(&log->l_reserve_head.lock);
} else if (free_bytes < need_bytes) {
- spin_lock(&log->l_grant_reserve_lock);
+ spin_lock(&log->l_reserve_head.lock);
error = xlog_reserveq_wait(log, tic, need_bytes);
- spin_unlock(&log->l_grant_reserve_lock);
+ spin_unlock(&log->l_reserve_head.lock);
}
if (error)
return error;
- xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
- xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
+ xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
+ xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
trace_xfs_log_grant_exit(log, tic);
xlog_verify_grant_tail(log);
return 0;
* otherwise try to get some space for this transaction.
*/
need_bytes = tic->t_unit_res;
- free_bytes = xlog_space_left(log, &log->l_grant_write_head);
- if (!list_empty_careful(&log->l_writeq)) {
- spin_lock(&log->l_grant_write_lock);
+ free_bytes = xlog_space_left(log, &log->l_write_head.grant);
+ if (!list_empty_careful(&log->l_write_head.waiters)) {
+ spin_lock(&log->l_write_head.lock);
if (!xlog_writeq_wake(log, &free_bytes) ||
free_bytes < need_bytes)
error = xlog_writeq_wait(log, tic, need_bytes);
- spin_unlock(&log->l_grant_write_lock);
+ spin_unlock(&log->l_write_head.lock);
} else if (free_bytes < need_bytes) {
- spin_lock(&log->l_grant_write_lock);
+ spin_lock(&log->l_write_head.lock);
error = xlog_writeq_wait(log, tic, need_bytes);
- spin_unlock(&log->l_grant_write_lock);
+ spin_unlock(&log->l_write_head.lock);
}
if (error)
return error;
- xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
+ xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
trace_xfs_log_regrant_write_exit(log, tic);
xlog_verify_grant_tail(log);
return 0;
if (ticket->t_cnt > 0)
ticket->t_cnt--;
- xlog_grant_sub_space(log, &log->l_grant_reserve_head,
+ xlog_grant_sub_space(log, &log->l_reserve_head.grant,
ticket->t_curr_res);
- xlog_grant_sub_space(log, &log->l_grant_write_head,
+ xlog_grant_sub_space(log, &log->l_write_head.grant,
ticket->t_curr_res);
ticket->t_curr_res = ticket->t_unit_res;
xlog_tic_reset_res(ticket);
if (ticket->t_cnt > 0)
return;
- xlog_grant_add_space(log, &log->l_grant_reserve_head,
+ xlog_grant_add_space(log, &log->l_reserve_head.grant,
ticket->t_unit_res);
trace_xfs_log_regrant_reserve_exit(log, ticket);
bytes += ticket->t_unit_res*ticket->t_cnt;
}
- xlog_grant_sub_space(log, &log->l_grant_reserve_head, bytes);
- xlog_grant_sub_space(log, &log->l_grant_write_head, bytes);
+ xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
+ xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
trace_xfs_log_ungrant_exit(log, ticket);
int tail_cycle, tail_blocks;
int cycle, space;
- xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
+ xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
if (tail_cycle != cycle) {
if (cycle - 1 != tail_cycle &&
* we don't enqueue anything once the SHUTDOWN flag is set, and this
* action is protected by the grant locks.
*/
- spin_lock(&log->l_grant_reserve_lock);
- list_for_each_entry(tic, &log->l_reserveq, t_queue)
+ spin_lock(&log->l_reserve_head.lock);
+ list_for_each_entry(tic, &log->l_reserve_head.waiters, t_queue)
wake_up_process(tic->t_task);
- spin_unlock(&log->l_grant_reserve_lock);
+ spin_unlock(&log->l_reserve_head.lock);
- spin_lock(&log->l_grant_write_lock);
- list_for_each_entry(tic, &log->l_writeq, t_queue)
+ spin_lock(&log->l_write_head.lock);
+ list_for_each_entry(tic, &log->l_write_head.waiters, t_queue)
wake_up_process(tic->t_task);
- spin_unlock(&log->l_grant_write_lock);
+ spin_unlock(&log->l_write_head.lock);
if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
ASSERT(!logerror);