diff options
Diffstat (limited to 'fs/xfs/xfs_log_priv.h')
| -rw-r--r-- | fs/xfs/xfs_log_priv.h | 708 |
1 files changed, 372 insertions, 336 deletions
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index b9ea262dd1c2..0fe59f0525aa 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -1,76 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. * All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it would be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef __XFS_LOG_PRIV_H__ #define __XFS_LOG_PRIV_H__ +#include "xfs_extent_busy.h" /* for struct xfs_busy_extents */ + struct xfs_buf; struct xlog; struct xlog_ticket; struct xfs_mount; /* - * Macros, structures, prototypes for internal log manager use. - */ - -#define XLOG_MIN_ICLOGS 2 -#define XLOG_MAX_ICLOGS 8 -#define XLOG_HEADER_MAGIC_NUM 0xFEEDbabe /* Invalid cycle number */ -#define XLOG_VERSION_1 1 -#define XLOG_VERSION_2 2 /* Large IClogs, Log sunit */ -#define XLOG_VERSION_OKBITS (XLOG_VERSION_1 | XLOG_VERSION_2) -#define XLOG_MIN_RECORD_BSIZE (16*1024) /* eventually 32k */ -#define XLOG_BIG_RECORD_BSIZE (32*1024) /* 32k buffers */ -#define XLOG_MAX_RECORD_BSIZE (256*1024) -#define XLOG_HEADER_CYCLE_SIZE (32*1024) /* cycle data in header */ -#define XLOG_MIN_RECORD_BSHIFT 14 /* 16384 == 1 << 14 */ -#define XLOG_BIG_RECORD_BSHIFT 15 /* 32k == 1 << 15 */ -#define XLOG_MAX_RECORD_BSHIFT 18 /* 256k == 1 << 18 */ -#define XLOG_BTOLSUNIT(log, b) (((b)+(log)->l_mp->m_sb.sb_logsunit-1) / \ - (log)->l_mp->m_sb.sb_logsunit) -#define XLOG_LSUNITTOB(log, su) ((su) * (log)->l_mp->m_sb.sb_logsunit) - -#define XLOG_HEADER_SIZE 512 - -#define XLOG_REC_SHIFT(log) \ - BTOBB(1 << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \ - XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT)) -#define XLOG_TOTAL_REC_SHIFT(log) \ - BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \ - XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT)) - -static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block) -{ - return ((xfs_lsn_t)cycle << 32) | block; -} - -static inline uint xlog_get_cycle(char *ptr) -{ - if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM) - return be32_to_cpu(*((__be32 *)ptr + 1)); - else - return be32_to_cpu(*(__be32 *)ptr); -} - -#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) - -#ifdef __KERNEL__ - -/* * get client id from packed copy. * * this hack is here because the xlog_pack code copies four bytes @@ -90,64 +33,42 @@ static inline uint xlog_get_client_id(__be32 i) /* * In core log state */ -#define XLOG_STATE_ACTIVE 0x0001 /* Current IC log being written to */ -#define XLOG_STATE_WANT_SYNC 0x0002 /* Want to sync this iclog; no more writes */ -#define XLOG_STATE_SYNCING 0x0004 /* This IC log is syncing */ -#define XLOG_STATE_DONE_SYNC 0x0008 /* Done syncing to disk */ -#define XLOG_STATE_DO_CALLBACK \ - 0x0010 /* Process callback functions */ -#define XLOG_STATE_CALLBACK 0x0020 /* Callback functions now */ -#define XLOG_STATE_DIRTY 0x0040 /* Dirty IC log, not ready for ACTIVE status*/ -#define XLOG_STATE_IOERROR 0x0080 /* IO error happened in sync'ing log */ -#define XLOG_STATE_ALL 0x7FFF /* All possible valid flags */ -#define XLOG_STATE_NOTUSED 0x8000 /* This IC log not being used */ -#endif /* __KERNEL__ */ +enum xlog_iclog_state { + XLOG_STATE_ACTIVE, /* Current IC log being written to */ + XLOG_STATE_WANT_SYNC, /* Want to sync this iclog; no more writes */ + XLOG_STATE_SYNCING, /* This IC log is syncing */ + XLOG_STATE_DONE_SYNC, /* Done syncing to disk */ + XLOG_STATE_CALLBACK, /* Callback functions now */ + XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */ +}; + +#define XLOG_STATE_STRINGS \ + { XLOG_STATE_ACTIVE, "XLOG_STATE_ACTIVE" }, \ + { XLOG_STATE_WANT_SYNC, "XLOG_STATE_WANT_SYNC" }, \ + { XLOG_STATE_SYNCING, "XLOG_STATE_SYNCING" }, \ + { XLOG_STATE_DONE_SYNC, "XLOG_STATE_DONE_SYNC" }, \ + { XLOG_STATE_CALLBACK, "XLOG_STATE_CALLBACK" }, \ + { XLOG_STATE_DIRTY, "XLOG_STATE_DIRTY" } /* - * Flags to log operation header - * - * The first write of a new transaction will be preceded with a start - * record, XLOG_START_TRANS. Once a transaction is committed, a commit - * record is written, XLOG_COMMIT_TRANS. If a single region can not fit into - * the remainder of the current active in-core log, it is split up into - * multiple regions. Each partial region will be marked with a - * XLOG_CONTINUE_TRANS until the last one, which gets marked with XLOG_END_TRANS. - * - */ -#define XLOG_START_TRANS 0x01 /* Start a new transaction */ -#define XLOG_COMMIT_TRANS 0x02 /* Commit this transaction */ -#define XLOG_CONTINUE_TRANS 0x04 /* Cont this trans into new region */ -#define XLOG_WAS_CONT_TRANS 0x08 /* Cont this trans into new region */ -#define XLOG_END_TRANS 0x10 /* End a continued transaction */ -#define XLOG_UNMOUNT_TRANS 0x20 /* Unmount a filesystem transaction */ - -#ifdef __KERNEL__ -/* - * Flags to log ticket + * In core log flags */ -#define XLOG_TIC_INITED 0x1 /* has been initialized */ -#define XLOG_TIC_PERM_RESERV 0x2 /* permanent reservation */ +#define XLOG_ICL_NEED_FLUSH (1u << 0) /* iclog needs REQ_PREFLUSH */ +#define XLOG_ICL_NEED_FUA (1u << 1) /* iclog needs REQ_FUA */ -#define XLOG_TIC_FLAGS \ - { XLOG_TIC_INITED, "XLOG_TIC_INITED" }, \ - { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" } +#define XLOG_ICL_STRINGS \ + { XLOG_ICL_NEED_FLUSH, "XLOG_ICL_NEED_FLUSH" }, \ + { XLOG_ICL_NEED_FUA, "XLOG_ICL_NEED_FUA" } -#endif /* __KERNEL__ */ - -#define XLOG_UNMOUNT_TYPE 0x556e /* Un for Unmount */ /* - * Flags for log structure + * Log ticket flags */ -#define XLOG_ACTIVE_RECOVERY 0x2 /* in the middle of recovery */ -#define XLOG_RECOVERY_NEEDED 0x4 /* log was recovered */ -#define XLOG_IO_ERROR 0x8 /* log hit an I/O error, and being - shutdown */ -#define XLOG_TAIL_WARN 0x10 /* log tail verify warning issued */ +#define XLOG_TIC_PERM_RESERV (1u << 0) /* permanent reservation */ -typedef __uint32_t xlog_tid_t; +#define XLOG_TIC_FLAGS \ + { XLOG_TIC_PERM_RESERV, "XLOG_TIC_PERM_RESERV" } -#ifdef __KERNEL__ /* * Below are states for covering allocation transactions. * By covering, we mean changing the h_tail_lsn in the last on-disk @@ -223,111 +144,26 @@ typedef __uint32_t xlog_tid_t; #define XLOG_COVER_OPS 5 - -/* Ticket reservation region accounting */ -#define XLOG_TIC_LEN_MAX 15 - -/* - * Reservation region - * As would be stored in xfs_log_iovec but without the i_addr which - * we don't care about. - */ -typedef struct xlog_res { - uint r_len; /* region length :4 */ - uint r_type; /* region's transaction type :4 */ -} xlog_res_t; - -typedef struct xlog_ticket { - struct list_head t_queue; /* reserve/write queue */ - struct task_struct *t_task; /* task that owns this ticket */ - xlog_tid_t t_tid; /* transaction identifier : 4 */ - atomic_t t_ref; /* ticket reference count : 4 */ - int t_curr_res; /* current reservation in bytes : 4 */ - int t_unit_res; /* unit reservation in bytes : 4 */ - char t_ocnt; /* original count : 1 */ - char t_cnt; /* current count : 1 */ - char t_clientid; /* who does this belong to; : 1 */ - char t_flags; /* properties of reservation : 1 */ - uint t_trans_type; /* transaction type : 4 */ - - /* reservation array fields */ - uint t_res_num; /* num in array : 4 */ - uint t_res_num_ophdrs; /* num op hdrs : 4 */ - uint t_res_arr_sum; /* array sum : 4 */ - uint t_res_o_flow; /* sum overflow : 4 */ - xlog_res_t t_res_arr[XLOG_TIC_LEN_MAX]; /* array of res : 8 * 15 */ -} xlog_ticket_t; - -#endif - - -typedef struct xlog_op_header { - __be32 oh_tid; /* transaction id of operation : 4 b */ - __be32 oh_len; /* bytes in data region : 4 b */ - __u8 oh_clientid; /* who sent me this : 1 b */ - __u8 oh_flags; /* : 1 b */ - __u16 oh_res2; /* 32 bit align : 2 b */ -} xlog_op_header_t; - - -/* valid values for h_fmt */ -#define XLOG_FMT_UNKNOWN 0 -#define XLOG_FMT_LINUX_LE 1 -#define XLOG_FMT_LINUX_BE 2 -#define XLOG_FMT_IRIX_BE 3 - -/* our fmt */ -#ifdef XFS_NATIVE_HOST -#define XLOG_FMT XLOG_FMT_LINUX_BE -#else -#define XLOG_FMT XLOG_FMT_LINUX_LE -#endif - -typedef struct xlog_rec_header { - __be32 h_magicno; /* log record (LR) identifier : 4 */ - __be32 h_cycle; /* write cycle of log : 4 */ - __be32 h_version; /* LR version : 4 */ - __be32 h_len; /* len in bytes; should be 64-bit aligned: 4 */ - __be64 h_lsn; /* lsn of this LR : 8 */ - __be64 h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */ - __le32 h_crc; /* crc of log record : 4 */ - __be32 h_prev_block; /* block number to previous LR : 4 */ - __be32 h_num_logops; /* number of log operations in this LR : 4 */ - __be32 h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; - /* new fields */ - __be32 h_fmt; /* format of log record : 4 */ - uuid_t h_fs_uuid; /* uuid of FS : 16 */ - __be32 h_size; /* iclog size : 4 */ -} xlog_rec_header_t; - -typedef struct xlog_rec_ext_header { - __be32 xh_cycle; /* write cycle of log : 4 */ - __be32 xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */ -} xlog_rec_ext_header_t; - -#ifdef __KERNEL__ - -/* - * Quite misnamed, because this union lays out the actual on-disk log buffer. - */ -typedef union xlog_in_core2 { - xlog_rec_header_t hic_header; - xlog_rec_ext_header_t hic_xheader; - char hic_sector[XLOG_HEADER_SIZE]; -} xlog_in_core_2_t; +struct xlog_ticket { + struct list_head t_queue; /* reserve/write queue */ + struct task_struct *t_task; /* task that owns this ticket */ + xlog_tid_t t_tid; /* transaction identifier */ + atomic_t t_ref; /* ticket reference count */ + int t_curr_res; /* current reservation */ + int t_unit_res; /* unit reservation */ + char t_ocnt; /* original unit count */ + char t_cnt; /* current unit count */ + uint8_t t_flags; /* properties of reservation */ + int t_iclog_hdrs; /* iclog hdrs in t_curr_res */ +}; /* - * - A log record header is 512 bytes. There is plenty of room to grow the - * xlog_rec_header_t into the reserved space. - * - ic_data follows, so a write to disk can start at the beginning of - * the iclog. + * In-core log structure. + * * - ic_forcewait is used to implement synchronous forcing of the iclog to disk. * - ic_next is the pointer to the next iclog in the ring. - * - ic_bp is a pointer to the buffer used to write this incore log to disk. * - ic_log is a pointer back to the global log structure. - * - ic_callback is a linked list of callback function/argument pairs to be - * called after an iclog finishes writing. - * - ic_size is the full size of the header plus data. + * - ic_size is the full size of the log buffer, minus the cycle headers. * - ic_offset is the current number of bytes written to in this iclog. * - ic_refcnt is bumped when someone is writing to the log. * - ic_state is the state of the iclog. @@ -337,7 +173,7 @@ typedef union xlog_in_core2 { * structure cacheline aligned. The following fields can be contended on * by independent processes: * - * - ic_callback_* + * - ic_callbacks * - ic_refcnt * - fields protected by the global l_icloglock * @@ -345,29 +181,30 @@ typedef union xlog_in_core2 { * We'll put all the read-only and l_icloglock fields in the first cacheline, * and move everything else out to subsequent cachelines. */ -typedef struct xlog_in_core { +struct xlog_in_core { wait_queue_head_t ic_force_wait; wait_queue_head_t ic_write_wait; struct xlog_in_core *ic_next; struct xlog_in_core *ic_prev; - struct xfs_buf *ic_bp; struct xlog *ic_log; - int ic_size; - int ic_offset; - int ic_bwritecnt; - unsigned short ic_state; - char *ic_datap; /* pointer to iclog data */ - - /* Callback structures need their own cacheline */ - spinlock_t ic_callback_lock ____cacheline_aligned_in_smp; - xfs_log_callback_t *ic_callback; - xfs_log_callback_t **ic_callback_tail; + u32 ic_size; + u32 ic_offset; + enum xlog_iclog_state ic_state; + unsigned int ic_flags; + void *ic_datap; /* pointer to iclog data */ + struct list_head ic_callbacks; /* reference counts need their own cacheline */ atomic_t ic_refcnt ____cacheline_aligned_in_smp; - xlog_in_core_2_t *ic_data; -#define ic_header ic_data->hic_header -} xlog_in_core_t; + struct xlog_rec_header *ic_header; +#ifdef DEBUG + bool ic_fail_crc : 1; +#endif + struct semaphore ic_sema; + struct work_struct ic_end_io_work; + struct bio ic_bio; + struct bio_vec ic_bvec[]; +}; /* * The CIL context is used to aggregate per-transaction details as well be @@ -379,16 +216,35 @@ struct xfs_cil; struct xfs_cil_ctx { struct xfs_cil *cil; - xfs_lsn_t sequence; /* chkpt sequence # */ + xfs_csn_t sequence; /* chkpt sequence # */ xfs_lsn_t start_lsn; /* first LSN of chkpt commit */ xfs_lsn_t commit_lsn; /* chkpt commit record lsn */ + struct xlog_in_core *commit_iclog; struct xlog_ticket *ticket; /* chkpt ticket */ - int nvecs; /* number of regions */ - int space_used; /* aggregate size of regions */ - struct list_head busy_extents; /* busy extents in chkpt */ - struct xfs_log_vec *lv_chain; /* logvecs being pushed */ - xfs_log_callback_t log_cb; /* completion callback hook. */ + atomic_t space_used; /* aggregate size of regions */ + struct xfs_busy_extents busy_extents; + struct list_head log_items; /* log items in chkpt */ + struct list_head lv_chain; /* logvecs being pushed */ + struct list_head iclog_entry; struct list_head committing; /* ctx committing list */ + struct work_struct push_work; + atomic_t order_id; + + /* + * CPUs that could have added items to the percpu CIL data. Access is + * coordinated with xc_ctx_lock. + */ + struct cpumask cil_pcpmask; +}; + +/* + * Per-cpu CIL tracking items + */ +struct xlog_cil_pcp { + int32_t space_used; + uint32_t space_reserved; + struct list_head busy_extents; + struct list_head log_items; }; /* @@ -409,16 +265,28 @@ struct xfs_cil_ctx { */ struct xfs_cil { struct xlog *xc_log; - struct list_head xc_cil; - spinlock_t xc_cil_lock; + unsigned long xc_flags; + atomic_t xc_iclog_hdrs; + struct workqueue_struct *xc_push_wq; + + struct rw_semaphore xc_ctx_lock ____cacheline_aligned_in_smp; struct xfs_cil_ctx *xc_ctx; - struct rw_semaphore xc_ctx_lock; + + spinlock_t xc_push_lock ____cacheline_aligned_in_smp; + xfs_csn_t xc_push_seq; + bool xc_push_commit_stable; struct list_head xc_committing; wait_queue_head_t xc_commit_wait; - xfs_lsn_t xc_current_sequence; - struct work_struct xc_push_work; - xfs_lsn_t xc_push_seq; -}; + wait_queue_head_t xc_start_wait; + xfs_csn_t xc_current_sequence; + wait_queue_head_t xc_push_wait; /* background push throttle */ + + void __percpu *xc_pcp; /* percpu CIL structures */ +} ____cacheline_aligned_in_smp; + +/* xc_flags bit values */ +#define XLOG_CIL_EMPTY 1 +#define XLOG_CIL_PCP_SPACE 2 /* * The amount of log space we allow the CIL to aggregate is difficult to size. @@ -461,13 +329,53 @@ struct xfs_cil { * tries to keep 25% of the log free, so we need to keep below that limit or we * risk running out of free log space to start any new transactions. * - * In order to keep background CIL push efficient, we will set a lower - * threshold at which background pushing is attempted without blocking current - * transaction commits. A separate, higher bound defines when CIL pushes are - * enforced to ensure we stay within our maximum checkpoint size bounds. - * threshold, yet give us plenty of space for aggregation on large logs. + * In order to keep background CIL push efficient, we only need to ensure the + * CIL is large enough to maintain sufficient in-memory relogging to avoid + * repeated physical writes of frequently modified metadata. If we allow the CIL + * to grow to a substantial fraction of the log, then we may be pinning hundreds + * of megabytes of metadata in memory until the CIL flushes. This can cause + * issues when we are running low on memory - pinned memory cannot be reclaimed, + * and the CIL consumes a lot of memory. Hence we need to set an upper physical + * size limit for the CIL that limits the maximum amount of memory pinned by the + * CIL but does not limit performance by reducing relogging efficiency + * significantly. + * + * As such, the CIL push threshold ends up being the smaller of two thresholds: + * - a threshold large enough that it allows CIL to be pushed and progress to be + * made without excessive blocking of incoming transaction commits. This is + * defined to be 12.5% of the log space - half the 25% push threshold of the + * AIL. + * - small enough that it doesn't pin excessive amounts of memory but maintains + * close to peak relogging efficiency. This is defined to be 16x the iclog + * buffer window (32MB) as measurements have shown this to be roughly the + * point of diminishing performance increases under highly concurrent + * modification workloads. + * + * To prevent the CIL from overflowing upper commit size bounds, we introduce a + * new threshold at which we block committing transactions until the background + * CIL commit commences and switches to a new context. While this is not a hard + * limit, it forces the process committing a transaction to the CIL to block and + * yeild the CPU, giving the CIL push work a chance to be scheduled and start + * work. This prevents a process running lots of transactions from overfilling + * the CIL because it is not yielding the CPU. We set the blocking limit at + * twice the background push space threshold so we keep in line with the AIL + * push thresholds. + * + * Note: this is not a -hard- limit as blocking is applied after the transaction + * is inserted into the CIL and the push has been triggered. It is largely a + * throttling mechanism that allows the CIL push to be scheduled and run. A hard + * limit will be difficult to implement without introducing global serialisation + * in the CIL commit fast path, and it's not at all clear that we actually need + * such hard limits given the ~7 years we've run without a hard limit before + * finding the first situation where a checkpoint size overflow actually + * occurred. Hence the simple throttle, and an ASSERT check to tell us that + * we've overrun the max size. */ -#define XLOG_CIL_SPACE_LIMIT(log) (log->l_logsize >> 3) +#define XLOG_CIL_SPACE_LIMIT(log) \ + min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4) + +#define XLOG_CIL_BLOCKING_SPACE_LIMIT(log) \ + (XLOG_CIL_SPACE_LIMIT(log) * 2) /* * ticket grant locks, queues and accounting have their own cachlines @@ -490,18 +398,16 @@ struct xlog { struct xfs_mount *l_mp; /* mount point */ struct xfs_ail *l_ailp; /* AIL log is working with */ struct xfs_cil *l_cilp; /* CIL log is working with */ - struct xfs_buf *l_xbuf; /* extra buffer for log - * wrapping */ struct xfs_buftarg *l_targ; /* buftarg of log */ + struct workqueue_struct *l_ioend_workqueue; /* for I/O completions */ struct delayed_work l_work; /* background flush work */ - uint l_flags; + long l_opstate; /* operational state */ uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */ struct list_head *l_buf_cancel_table; + struct list_head r_dfops; /* recovered log intent items */ int l_iclog_hsize; /* size of iclog header */ - int l_iclog_heads; /* # of iclog header sectors */ uint l_sectBBsize; /* sector size in BBs (2^n) */ int l_iclog_size; /* size of log in bytes */ - int l_iclog_size_log; /* log power size of log */ int l_iclog_bufs; /* number of iclog buffers */ xfs_daddr_t l_logBBstart; /* start block of log */ int l_logsize; /* size of log in bytes */ @@ -512,7 +418,7 @@ struct xlog { /* waiting for iclog flush */ int l_covered_state;/* state of "covering disk * log entries" */ - xlog_in_core_t *l_iclog; /* head log queue */ + struct xlog_in_core *l_iclog; /* head log queue */ spinlock_t l_icloglock; /* grab to change iclog state */ int l_curr_cycle; /* Cycle number of log writes */ int l_prev_cycle; /* Cycle number before last @@ -521,30 +427,63 @@ struct xlog { int l_prev_block; /* previous logical log block */ /* - * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and - * read without needing to hold specific locks. To avoid operations - * contending with other hot objects, place each of them on a separate - * cacheline. + * l_tail_lsn is atomic so it can be set and read without needing to + * hold specific locks. To avoid operations contending with other hot + * objects, it on a separate cacheline. */ - /* lsn of last LR on disk */ - atomic64_t l_last_sync_lsn ____cacheline_aligned_in_smp; /* lsn of 1st LR with unflushed * buffers */ atomic64_t l_tail_lsn ____cacheline_aligned_in_smp; struct xlog_grant_head l_reserve_head; struct xlog_grant_head l_write_head; + uint64_t l_tail_space; - /* The following field are used for debugging; need to hold icloglock */ -#ifdef DEBUG - char *l_iclog_bak[XLOG_MAX_ICLOGS]; -#endif + struct xfs_kobj l_kobj; + + /* log recovery lsn tracking (for buffer submission */ + xfs_lsn_t l_recovery_lsn; + uint32_t l_iclog_roundoff;/* padding roundoff */ }; -#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \ - ((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE)) +/* + * Bits for operational state + */ +#define XLOG_ACTIVE_RECOVERY 0 /* in the middle of recovery */ +#define XLOG_RECOVERY_NEEDED 1 /* log was recovered */ +#define XLOG_IO_ERROR 2 /* log hit an I/O error, and being + shutdown */ +#define XLOG_TAIL_WARN 3 /* log tail verify warning issued */ +#define XLOG_SHUTDOWN_STARTED 4 /* xlog_force_shutdown() exclusion */ + +static inline bool +xlog_recovery_needed(struct xlog *log) +{ + return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); +} + +static inline bool +xlog_in_recovery(struct xlog *log) +{ + return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); +} -#define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR) +static inline bool +xlog_is_shutdown(struct xlog *log) +{ + return test_bit(XLOG_IO_ERROR, &log->l_opstate); +} + +/* + * Wait until the xlog_force_shutdown() has marked the log as shut down + * so xlog_is_shutdown() will always return true. + */ +static inline void +xlog_shutdown_wait( + struct xlog *log) +{ + wait_var_event(&log->l_opstate, xlog_is_shutdown(log)); +} /* common routines */ extern int @@ -553,38 +492,28 @@ xlog_recover( extern int xlog_recover_finish( struct xlog *log); +extern void +xlog_recover_cancel(struct xlog *); -extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead, - char *dp, int size); +__le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead, + char *dp, unsigned int hdrsize, unsigned int size); -extern kmem_zone_t *xfs_log_ticket_zone; -struct xlog_ticket * -xlog_ticket_alloc( - struct xlog *log, - int unit_bytes, - int count, - char client, - bool permanent, - xfs_km_flags_t alloc_flags); - - -static inline void -xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes) -{ - *ptr += bytes; - *len -= bytes; - *off += bytes; -} +extern struct kmem_cache *xfs_log_ticket_cache; +struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes, + int count, bool permanent); void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket); -int -xlog_write( - struct xlog *log, - struct xfs_log_vec *log_vector, - struct xlog_ticket *tic, - xfs_lsn_t *start_lsn, - struct xlog_in_core **commit_iclog, - uint flags); +void xlog_print_trans(struct xfs_trans *); +int xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx, + struct list_head *lv_chain, struct xlog_ticket *tic, + uint32_t len); +void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket); +void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket); + +void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog, + int eventual_size); +int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog, + struct xlog_ticket *ticket); /* * When we crack an atomic LSN, we sample it first so that the value will not @@ -612,80 +541,187 @@ xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block) } /* - * When we crack the grant head, we sample it first so that the value will not - * change while we are cracking it into the component values. This means we - * will always get consistent component values to work from. + * Committed Item List interfaces */ +int xlog_cil_init(struct xlog *log); +void xlog_cil_init_post_recovery(struct xlog *log); +void xlog_cil_destroy(struct xlog *log); +bool xlog_cil_empty(struct xlog *log); +void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp, + xfs_csn_t *commit_seq, bool regrant); +void xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx, + struct xlog_in_core *iclog); + + +/* + * CIL force routines + */ +void xlog_cil_flush(struct xlog *log); +xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence); + static inline void -xlog_crack_grant_head_val(int64_t val, int *cycle, int *space) +xlog_cil_force(struct xlog *log) { - *cycle = val >> 32; - *space = val & 0xffffffff; + xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence); } +/* + * Wrapper function for waiting on a wait queue serialised against wakeups + * by a spinlock. This matches the semantics of all the wait queues used in the + * log code. + */ static inline void -xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space) +xlog_wait( + struct wait_queue_head *wq, + struct spinlock *lock) + __releases(lock) { - xlog_crack_grant_head_val(atomic64_read(head), cycle, space); -} + DECLARE_WAITQUEUE(wait, current); -static inline int64_t -xlog_assign_grant_head_val(int cycle, int space) -{ - return ((int64_t)cycle << 32) | space; + add_wait_queue_exclusive(wq, &wait); + __set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock(lock); + schedule(); + remove_wait_queue(wq, &wait); } -static inline void -xlog_assign_grant_head(atomic64_t *head, int cycle, int space) +int xlog_wait_on_iclog(struct xlog_in_core *iclog) + __releases(iclog->ic_log->l_icloglock); + +/* Calculate the distance between two LSNs in bytes */ +static inline uint64_t +xlog_lsn_sub( + struct xlog *log, + xfs_lsn_t high, + xfs_lsn_t low) { - atomic64_set(head, xlog_assign_grant_head_val(cycle, space)); + uint32_t hi_cycle = CYCLE_LSN(high); + uint32_t hi_block = BLOCK_LSN(high); + uint32_t lo_cycle = CYCLE_LSN(low); + uint32_t lo_block = BLOCK_LSN(low); + + if (hi_cycle == lo_cycle) + return BBTOB(hi_block - lo_block); + ASSERT((hi_cycle == lo_cycle + 1) || xlog_is_shutdown(log)); + return (uint64_t)log->l_logsize - BBTOB(lo_block - hi_block); } +void xlog_grant_return_space(struct xlog *log, xfs_lsn_t old_head, + xfs_lsn_t new_head); + /* - * Committed Item List interfaces + * The LSN is valid so long as it is behind the current LSN. If it isn't, this + * means that the next log record that includes this metadata could have a + * smaller LSN. In turn, this means that the modification in the log would not + * replay. */ -int -xlog_cil_init(struct xlog *log); -void -xlog_cil_init_post_recovery(struct xlog *log); -void -xlog_cil_destroy(struct xlog *log); +static inline bool +xlog_valid_lsn( + struct xlog *log, + xfs_lsn_t lsn) +{ + int cur_cycle; + int cur_block; + bool valid = true; + + /* + * First, sample the current lsn without locking to avoid added + * contention from metadata I/O. The current cycle and block are updated + * (in xlog_state_switch_iclogs()) and read here in a particular order + * to avoid false negatives (e.g., thinking the metadata LSN is valid + * when it is not). + * + * The current block is always rewound before the cycle is bumped in + * xlog_state_switch_iclogs() to ensure the current LSN is never seen in + * a transiently forward state. Instead, we can see the LSN in a + * transiently behind state if we happen to race with a cycle wrap. + */ + cur_cycle = READ_ONCE(log->l_curr_cycle); + smp_rmb(); + cur_block = READ_ONCE(log->l_curr_block); + + if ((CYCLE_LSN(lsn) > cur_cycle) || + (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) { + /* + * If the metadata LSN appears invalid, it's possible the check + * above raced with a wrap to the next log cycle. Grab the lock + * to check for sure. + */ + spin_lock(&log->l_icloglock); + cur_cycle = log->l_curr_cycle; + cur_block = log->l_curr_block; + spin_unlock(&log->l_icloglock); + + if ((CYCLE_LSN(lsn) > cur_cycle) || + (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) + valid = false; + } + + return valid; +} /* - * CIL force routines + * Log vector and shadow buffers can be large, so we need to use kvmalloc() here + * to ensure success. Unfortunately, kvmalloc() only allows GFP_KERNEL contexts + * to fall back to vmalloc, so we can't actually do anything useful with gfp + * flags to control the kmalloc() behaviour within kvmalloc(). Hence kmalloc() + * will do direct reclaim and compaction in the slow path, both of which are + * horrendously expensive. We just want kmalloc to fail fast and fall back to + * vmalloc if it can't get something straight away from the free lists or + * buddy allocator. Hence we have to open code kvmalloc outselves here. + * + * This assumes that the caller uses memalloc_nofs_save task context here, so + * despite the use of GFP_KERNEL here, we are going to be doing GFP_NOFS + * allocations. This is actually the only way to make vmalloc() do GFP_NOFS + * allocations, so lets just all pretend this is a GFP_KERNEL context + * operation.... */ -xfs_lsn_t -xlog_cil_force_lsn( - struct xlog *log, - xfs_lsn_t sequence); - -static inline void -xlog_cil_force(struct xlog *log) +static inline void * +xlog_kvmalloc( + size_t buf_size) { - xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence); + gfp_t flags = GFP_KERNEL; + void *p; + + flags &= ~__GFP_DIRECT_RECLAIM; + flags |= __GFP_NOWARN | __GFP_NORETRY; + do { + p = kmalloc(buf_size, flags); + if (!p) + p = vmalloc(buf_size); + } while (!p); + + return p; } /* - * Unmount record type is used as a pseudo transaction type for the ticket. - * It's value must be outside the range of XFS_TRANS_* values. + * Given a count of iovecs and space for a log item, compute the space we need + * in the log to store that data plus the log headers. */ -#define XLOG_UNMOUNT_REC_TYPE (-1U) +static inline unsigned int +xlog_item_space( + unsigned int niovecs, + unsigned int nbytes) +{ + nbytes += niovecs * (sizeof(uint64_t) + sizeof(struct xlog_op_header)); + return round_up(nbytes, sizeof(uint64_t)); +} /* - * Wrapper function for waiting on a wait queue serialised against wakeups - * by a spinlock. This matches the semantics of all the wait queues used in the - * log code. + * Cycles over XLOG_CYCLE_DATA_SIZE overflow into the extended header that was + * added for v2 logs. Addressing for the cycles array there is off by one, + * because the first batch of cycles is in the original header. */ -static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock) +static inline __be32 *xlog_cycle_data(struct xlog_rec_header *rhead, unsigned i) { - DECLARE_WAITQUEUE(wait, current); + if (i >= XLOG_CYCLE_DATA_SIZE) { + unsigned j = i / XLOG_CYCLE_DATA_SIZE; + unsigned k = i % XLOG_CYCLE_DATA_SIZE; - add_wait_queue_exclusive(wq, &wait); - __set_current_state(TASK_UNINTERRUPTIBLE); - spin_unlock(lock); - schedule(); - remove_wait_queue(wq, &wait); + return &rhead->h_ext[j - 1].xh_cycle_data[k]; + } + + return &rhead->h_cycle_data[i]; } -#endif /* __KERNEL__ */ #endif /* __XFS_LOG_PRIV_H__ */ |
