diff options
Diffstat (limited to 'kernel/printk/printk_ringbuffer.c')
| -rw-r--r-- | kernel/printk/printk_ringbuffer.c | 453 |
1 files changed, 372 insertions, 81 deletions
diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c index 2dc4d5a1f1ff..56c8e3d031f4 100644 --- a/kernel/printk/printk_ringbuffer.c +++ b/kernel/printk/printk_ringbuffer.c @@ -1,11 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 +#include <kunit/visibility.h> #include <linux/kernel.h> #include <linux/irqflags.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/bug.h> #include "printk_ringbuffer.h" +#include "internal.h" /** * DOC: printk_ringbuffer overview @@ -303,6 +305,9 @@ * * desc_push_tail:B / desc_reserve:D * set descriptor reusable (state), then push descriptor tail (id) + * + * desc_update_last_finalized:A / desc_last_finalized_seq:A + * store finalized record, then set new highest finalized sequence number */ #define DATA_SIZE(data_ring) _DATA_SIZE((data_ring)->size_bits) @@ -389,25 +394,38 @@ static unsigned int to_blk_size(unsigned int size) * Sanity checker for reserve size. The ringbuffer code assumes that a data * block does not exceed the maximum possible size that could fit within the * ringbuffer. This function provides that basic size check so that the - * assumption is safe. + * assumption is safe. In particular, it guarantees that data_push_tail() will + * never attempt to push the tail beyond the head. */ static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size) { - struct prb_data_block *db = NULL; - + /* Data-less blocks take no space. */ if (size == 0) return true; /* - * Ensure the alignment padded size could possibly fit in the data - * array. The largest possible data block must still leave room for - * at least the ID of the next block. + * If data blocks were allowed to be larger than half the data ring + * size, a wrapping data block could require more space than the full + * ringbuffer. */ - size = to_blk_size(size); - if (size > DATA_SIZE(data_ring) - sizeof(db->id)) - return false; + return to_blk_size(size) <= DATA_SIZE(data_ring) / 2; +} - return true; +/* + * Compare the current and requested logical position and decide + * whether more space is needed. + * + * Return false when @lpos_current is already at or beyond @lpos_target. + * + * Also return false when the difference between the positions is bigger + * than the size of the data buffer. It might happen only when the caller + * raced with another CPU(s) which already made and used the space. + */ +static bool need_more_space(struct prb_data_ring *data_ring, + unsigned long lpos_current, + unsigned long lpos_target) +{ + return lpos_target - lpos_current - 1 < DATA_SIZE(data_ring); } /* Query the state of a descriptor. */ @@ -576,7 +594,7 @@ static bool data_make_reusable(struct printk_ringbuffer *rb, unsigned long id; /* Loop until @lpos_begin has advanced to or beyond @lpos_end. */ - while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) { + while (need_more_space(data_ring, lpos_begin, lpos_end)) { blk = to_block(data_ring, lpos_begin); /* @@ -667,7 +685,7 @@ static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos) * sees the new tail lpos, any descriptor states that transitioned to * the reusable state must already be visible. */ - while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) { + while (need_more_space(data_ring, tail_lpos, lpos)) { /* * Make all descriptors reusable that are associated with * data blocks before @lpos. @@ -998,6 +1016,17 @@ static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out) return true; } +static bool is_blk_wrapped(struct prb_data_ring *data_ring, + unsigned long begin_lpos, unsigned long next_lpos) +{ + /* + * Subtract one from next_lpos since it's not actually part of this data + * block. This allows perfectly fitting records to not wrap. + */ + return DATA_WRAPS(data_ring, begin_lpos) != + DATA_WRAPS(data_ring, next_lpos - 1); +} + /* Determine the end of a data block. */ static unsigned long get_next_lpos(struct prb_data_ring *data_ring, unsigned long lpos, unsigned int size) @@ -1009,7 +1038,7 @@ static unsigned long get_next_lpos(struct prb_data_ring *data_ring, next_lpos = lpos + size; /* First check if the data block does not wrap. */ - if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos)) + if (!is_blk_wrapped(data_ring, begin_lpos, next_lpos)) return next_lpos; /* Wrapping data blocks store their data at the beginning. */ @@ -1030,9 +1059,13 @@ static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size, unsigned long next_lpos; if (size == 0) { - /* Specify a data-less block. */ - blk_lpos->begin = NO_LPOS; - blk_lpos->next = NO_LPOS; + /* + * Data blocks are not created for empty lines. Instead, the + * reader will recognize these special lpos values and handle + * it appropriately. + */ + blk_lpos->begin = EMPTY_LINE_LPOS; + blk_lpos->next = EMPTY_LINE_LPOS; return NULL; } @@ -1043,8 +1076,17 @@ static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size, do { next_lpos = get_next_lpos(data_ring, begin_lpos, size); - if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) { - /* Failed to allocate, specify a data-less block. */ + /* + * data_check_size() prevents data block allocation that could + * cause illegal ringbuffer states. But double check that the + * used space will not be bigger than the ring buffer. Wrapped + * messages need to reserve more space, see get_next_lpos(). + * + * Specify a data-less block when the check or the allocation + * fails. + */ + if (WARN_ON_ONCE(next_lpos - begin_lpos > DATA_SIZE(data_ring)) || + !data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) { blk_lpos->begin = FAILED_LPOS; blk_lpos->next = FAILED_LPOS; return NULL; @@ -1073,7 +1115,7 @@ static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size, blk = to_block(data_ring, begin_lpos); blk->id = id; /* LMM(data_alloc:B) */ - if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) { + if (is_blk_wrapped(data_ring, begin_lpos, next_lpos)) { /* Wrapping data blocks store their data at the beginning. */ blk = to_block(data_ring, 0); @@ -1117,14 +1159,21 @@ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size, return NULL; /* Keep track if @blk_lpos was a wrapping data block. */ - wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next)); + wrapped = is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next); size = to_blk_size(size); next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size); - /* If the data block does not increase, there is nothing to do. */ - if (head_lpos - next_lpos < DATA_SIZE(data_ring)) { + /* + * Use the current data block when the size does not increase, i.e. + * when @head_lpos is already able to accommodate the new @next_lpos. + * + * Note that need_more_space() could never return false here because + * the difference between the positions was bigger than the data + * buffer size. The data block is reopened and can't get reused. + */ + if (!need_more_space(data_ring, head_lpos, next_lpos)) { if (wrapped) blk = to_block(data_ring, 0); else @@ -1132,8 +1181,18 @@ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size, return &blk->data[0]; } - if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) + /* + * data_check_size() prevents data block reallocation that could + * cause illegal ringbuffer states. But double check that the + * new used space will not be bigger than the ring buffer. Wrapped + * messages need to reserve more space, see get_next_lpos(). + * + * Specify failure when the check or the allocation fails. + */ + if (WARN_ON_ONCE(next_lpos - blk_lpos->begin > DATA_SIZE(data_ring)) || + !data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) { return NULL; + } /* The memory barrier involvement is the same as data_alloc:A. */ if (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &head_lpos, @@ -1143,7 +1202,7 @@ static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size, blk = to_block(data_ring, blk_lpos->begin); - if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) { + if (is_blk_wrapped(data_ring, blk_lpos->begin, next_lpos)) { struct prb_data_block *old_blk = blk; /* Wrapping data blocks store their data at the beginning. */ @@ -1179,7 +1238,7 @@ static unsigned int space_used(struct prb_data_ring *data_ring, if (BLK_DATALESS(blk_lpos)) return 0; - if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) { + if (!is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next)) { /* Data block does not wrap. */ return (DATA_INDEX(data_ring, blk_lpos->next) - DATA_INDEX(data_ring, blk_lpos->begin)); @@ -1210,22 +1269,30 @@ static const char *get_data(struct prb_data_ring *data_ring, /* Data-less data block description. */ if (BLK_DATALESS(blk_lpos)) { - if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) { + /* + * Records that are just empty lines are also valid, even + * though they do not have a data block. For such records + * explicitly return empty string data to signify success. + */ + if (blk_lpos->begin == EMPTY_LINE_LPOS && + blk_lpos->next == EMPTY_LINE_LPOS) { *data_size = 0; return ""; } + + /* Data lost, invalid, or otherwise unavailable. */ return NULL; } - /* Regular data block: @begin less than @next and in same wrap. */ - if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) && - blk_lpos->begin < blk_lpos->next) { + /* Regular data block: @begin and @next in the same wrap. */ + if (!is_blk_wrapped(data_ring, blk_lpos->begin, blk_lpos->next)) { db = to_block(data_ring, blk_lpos->begin); *data_size = blk_lpos->next - blk_lpos->begin; /* Wrapping data block: @begin is one wrap behind @next. */ - } else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) == - DATA_WRAPS(data_ring, blk_lpos->next)) { + } else if (!is_blk_wrapped(data_ring, + blk_lpos->begin + DATA_SIZE(data_ring), + blk_lpos->next)) { db = to_block(data_ring, 0); *data_size = DATA_INDEX(data_ring, blk_lpos->next); @@ -1235,6 +1302,10 @@ static const char *get_data(struct prb_data_ring *data_ring, return NULL; } + /* Sanity check. Data-less blocks were handled earlier. */ + if (WARN_ON_ONCE(!data_check_size(data_ring, *data_size) || !*data_size)) + return NULL; + /* A valid data block will always be aligned to the ID size. */ if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) || WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) { @@ -1442,19 +1513,117 @@ fail_reopen: } /* + * @last_finalized_seq value guarantees that all records up to and including + * this sequence number are finalized and can be read. The only exception are + * too old records which have already been overwritten. + * + * It is also guaranteed that @last_finalized_seq only increases. + * + * Be aware that finalized records following non-finalized records are not + * reported because they are not yet available to the reader. For example, + * a new record stored via printk() will not be available to a printer if + * it follows a record that has not been finalized yet. However, once that + * non-finalized record becomes finalized, @last_finalized_seq will be + * appropriately updated and the full set of finalized records will be + * available to the printer. And since each printk() caller will either + * directly print or trigger deferred printing of all available unprinted + * records, all printk() messages will get printed. + */ +static u64 desc_last_finalized_seq(struct printk_ringbuffer *rb) +{ + struct prb_desc_ring *desc_ring = &rb->desc_ring; + unsigned long ulseq; + + /* + * Guarantee the sequence number is loaded before loading the + * associated record in order to guarantee that the record can be + * seen by this CPU. This pairs with desc_update_last_finalized:A. + */ + ulseq = atomic_long_read_acquire(&desc_ring->last_finalized_seq + ); /* LMM(desc_last_finalized_seq:A) */ + + return __ulseq_to_u64seq(rb, ulseq); +} + +static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq, + struct printk_record *r, unsigned int *line_count); + +/* + * Check if there are records directly following @last_finalized_seq that are + * finalized. If so, update @last_finalized_seq to the latest of these + * records. It is not allowed to skip over records that are not yet finalized. + */ +static void desc_update_last_finalized(struct printk_ringbuffer *rb) +{ + struct prb_desc_ring *desc_ring = &rb->desc_ring; + u64 old_seq = desc_last_finalized_seq(rb); + unsigned long oldval; + unsigned long newval; + u64 finalized_seq; + u64 try_seq; + +try_again: + finalized_seq = old_seq; + try_seq = finalized_seq + 1; + + /* Try to find later finalized records. */ + while (_prb_read_valid(rb, &try_seq, NULL, NULL)) { + finalized_seq = try_seq; + try_seq++; + } + + /* No update needed if no later finalized record was found. */ + if (finalized_seq == old_seq) + return; + + oldval = __u64seq_to_ulseq(old_seq); + newval = __u64seq_to_ulseq(finalized_seq); + + /* + * Set the sequence number of a later finalized record that has been + * seen. + * + * Guarantee the record data is visible to other CPUs before storing + * its sequence number. This pairs with desc_last_finalized_seq:A. + * + * Memory barrier involvement: + * + * If desc_last_finalized_seq:A reads from + * desc_update_last_finalized:A, then desc_read:A reads from + * _prb_commit:B. + * + * Relies on: + * + * RELEASE from _prb_commit:B to desc_update_last_finalized:A + * matching + * ACQUIRE from desc_last_finalized_seq:A to desc_read:A + * + * Note: _prb_commit:B and desc_update_last_finalized:A can be + * different CPUs. However, the desc_update_last_finalized:A + * CPU (which performs the release) must have previously seen + * _prb_commit:B. + */ + if (!atomic_long_try_cmpxchg_release(&desc_ring->last_finalized_seq, + &oldval, newval)) { /* LMM(desc_update_last_finalized:A) */ + old_seq = __ulseq_to_u64seq(rb, oldval); + goto try_again; + } +} + +/* * Attempt to finalize a specified descriptor. If this fails, the descriptor * is either already final or it will finalize itself when the writer commits. */ -static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id) +static void desc_make_final(struct printk_ringbuffer *rb, unsigned long id) { + struct prb_desc_ring *desc_ring = &rb->desc_ring; unsigned long prev_state_val = DESC_SV(id, desc_committed); struct prb_desc *d = to_desc(desc_ring, id); - atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val, - DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */ - - /* Best effort to remember the last finalized @id. */ - atomic_long_set(&desc_ring->last_finalized_id, id); + if (atomic_long_try_cmpxchg_relaxed(&d->state_var, &prev_state_val, + DESC_SV(id, desc_finalized))) { /* LMM(desc_make_final:A) */ + desc_update_last_finalized(rb); + } } /** @@ -1550,7 +1719,7 @@ bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, * readers. (For seq==0 there is no previous descriptor.) */ if (info->seq > 0) - desc_make_final(desc_ring, DESC_ID(id - 1)); + desc_make_final(rb, DESC_ID(id - 1)); r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id); /* If text data allocation fails, a data-less record is committed. */ @@ -1571,6 +1740,7 @@ fail: memset(r, 0, sizeof(*r)); return false; } +EXPORT_SYMBOL_IF_KUNIT(prb_reserve); /* Commit the data (possibly finalizing it) and restore interrupts. */ static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val) @@ -1643,8 +1813,9 @@ void prb_commit(struct prb_reserved_entry *e) */ head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */ if (head_id != e->id) - desc_make_final(desc_ring, e->id); + desc_make_final(e->rb, e->id); } +EXPORT_SYMBOL_IF_KUNIT(prb_commit); /** * prb_final_commit() - Commit and finalize (previously reserved) data to @@ -1663,12 +1834,9 @@ void prb_commit(struct prb_reserved_entry *e) */ void prb_final_commit(struct prb_reserved_entry *e) { - struct prb_desc_ring *desc_ring = &e->rb->desc_ring; - _prb_commit(e, desc_finalized); - /* Best effort to remember the last finalized @id. */ - atomic_long_set(&desc_ring->last_finalized_id, e->id); + desc_update_last_finalized(e->rb); } /* @@ -1735,7 +1903,7 @@ static bool copy_data(struct prb_data_ring *data_ring, if (!buf || !buf_size) return true; - data_size = min_t(u16, buf_size, len); + data_size = min_t(unsigned int, buf_size, len); memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */ return true; @@ -1832,7 +2000,7 @@ static int prb_read(struct printk_ringbuffer *rb, u64 seq, } /* Get the sequence number of the tail descriptor. */ -static u64 prb_first_seq(struct printk_ringbuffer *rb) +u64 prb_first_seq(struct printk_ringbuffer *rb) { struct prb_desc_ring *desc_ring = &rb->desc_ring; enum desc_state d_state; @@ -1875,12 +2043,123 @@ static u64 prb_first_seq(struct printk_ringbuffer *rb) return seq; } +/** + * prb_next_reserve_seq() - Get the sequence number after the most recently + * reserved record. + * + * @rb: The ringbuffer to get the sequence number from. + * + * This is the public function available to readers to see what sequence + * number will be assigned to the next reserved record. + * + * Note that depending on the situation, this value can be equal to or + * higher than the sequence number returned by prb_next_seq(). + * + * Context: Any context. + * Return: The sequence number that will be assigned to the next record + * reserved. + */ +u64 prb_next_reserve_seq(struct printk_ringbuffer *rb) +{ + struct prb_desc_ring *desc_ring = &rb->desc_ring; + unsigned long last_finalized_id; + atomic_long_t *state_var; + u64 last_finalized_seq; + unsigned long head_id; + struct prb_desc desc; + unsigned long diff; + struct prb_desc *d; + int err; + + /* + * It may not be possible to read a sequence number for @head_id. + * So the ID of @last_finailzed_seq is used to calculate what the + * sequence number of @head_id will be. + */ + +try_again: + last_finalized_seq = desc_last_finalized_seq(rb); + + /* + * @head_id is loaded after @last_finalized_seq to ensure that + * it points to the record with @last_finalized_seq or newer. + * + * Memory barrier involvement: + * + * If desc_last_finalized_seq:A reads from + * desc_update_last_finalized:A, then + * prb_next_reserve_seq:A reads from desc_reserve:D. + * + * Relies on: + * + * RELEASE from desc_reserve:D to desc_update_last_finalized:A + * matching + * ACQUIRE from desc_last_finalized_seq:A to prb_next_reserve_seq:A + * + * Note: desc_reserve:D and desc_update_last_finalized:A can be + * different CPUs. However, the desc_update_last_finalized:A CPU + * (which performs the release) must have previously seen + * desc_read:C, which implies desc_reserve:D can be seen. + */ + head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_next_reserve_seq:A) */ + + d = to_desc(desc_ring, last_finalized_seq); + state_var = &d->state_var; + + /* Extract the ID, used to specify the descriptor to read. */ + last_finalized_id = DESC_ID(atomic_long_read(state_var)); + + /* Ensure @last_finalized_id is correct. */ + err = desc_read_finalized_seq(desc_ring, last_finalized_id, last_finalized_seq, &desc); + + if (err == -EINVAL) { + if (last_finalized_seq == 0) { + /* + * No record has been finalized or even reserved yet. + * + * The @head_id is initialized such that the first + * increment will yield the first record (seq=0). + * Handle it separately to avoid a negative @diff + * below. + */ + if (head_id == DESC0_ID(desc_ring->count_bits)) + return 0; + + /* + * One or more descriptors are already reserved. Use + * the descriptor ID of the first one (@seq=0) for + * the @diff below. + */ + last_finalized_id = DESC0_ID(desc_ring->count_bits) + 1; + } else { + /* Record must have been overwritten. Try again. */ + goto try_again; + } + } + + /* Diff of known descriptor IDs to compute related sequence numbers. */ + diff = head_id - last_finalized_id; + + /* + * @head_id points to the most recently reserved record, but this + * function returns the sequence number that will be assigned to the + * next (not yet reserved) record. Thus +1 is needed. + */ + return (last_finalized_seq + diff + 1); +} + /* - * Non-blocking read of a record. Updates @seq to the last finalized record - * (which may have no data available). + * Non-blocking read of a record. + * + * On success @seq is updated to the record that was read and (if provided) + * @r and @line_count will contain the read/calculated data. * - * See the description of prb_read_valid() and prb_read_valid_info() - * for details. + * On failure @seq is updated to a record that is not yet available to the + * reader, but it will be the next record available to the reader. + * + * Note: When the current CPU is in panic, this function will skip over any + * non-existent/non-finalized records in order to allow the panic CPU + * to print any and all records that have been finalized. */ static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq, struct printk_record *r, unsigned int *line_count) @@ -1899,12 +2178,35 @@ static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq, *seq = tail_seq; } else if (err == -ENOENT) { - /* Record exists, but no data available. Skip. */ + /* Record exists, but the data was lost. Skip. */ (*seq)++; } else { - /* Non-existent/non-finalized record. Must stop. */ - return false; + /* + * Non-existent/non-finalized record. Must stop. + * + * For panic situations it cannot be expected that + * non-finalized records will become finalized. But + * there may be other finalized records beyond that + * need to be printed for a panic situation. If this + * is the panic CPU, skip this + * non-existent/non-finalized record unless non-panic + * CPUs are still running and their debugging is + * explicitly enabled. + * + * Note that new messages printed on panic CPU are + * finalized when we are here. The only exception + * might be the last message without trailing newline. + * But it would have the sequence number returned + * by "prb_next_reserve_seq() - 1". + */ + if (panic_on_this_cpu() && + (!debug_non_panic_cpus || legacy_allow_panic_sync) && + ((*seq + 1) < prb_next_reserve_seq(rb))) { + (*seq)++; + } else { + return false; + } } } @@ -1932,13 +2234,14 @@ static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq, * On success, the reader must check r->info.seq to see which record was * actually read. This allows the reader to detect dropped records. * - * Failure means @seq refers to a not yet written record. + * Failure means @seq refers to a record not yet available to the reader. */ bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq, struct printk_record *r) { return _prb_read_valid(rb, &seq, r, NULL); } +EXPORT_SYMBOL_IF_KUNIT(prb_read_valid); /** * prb_read_valid_info() - Non-blocking read of meta data for a requested @@ -1962,7 +2265,7 @@ bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq, * On success, the reader must check info->seq to see which record meta data * was actually read. This allows the reader to detect dropped records. * - * Failure means @seq refers to a not yet written record. + * Failure means @seq refers to a record not yet available to the reader. */ bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq, struct printk_info *info, unsigned int *line_count) @@ -2008,7 +2311,9 @@ u64 prb_first_valid_seq(struct printk_ringbuffer *rb) * newest sequence number available to readers will be. * * This provides readers a sequence number to jump to if all currently - * available records should be skipped. + * available records should be skipped. It is guaranteed that all records + * previous to the returned value have been finalized and are (or were) + * available to the reader. * * Context: Any context. * Return: The sequence number of the next newest (not yet available) record @@ -2016,34 +2321,19 @@ u64 prb_first_valid_seq(struct printk_ringbuffer *rb) */ u64 prb_next_seq(struct printk_ringbuffer *rb) { - struct prb_desc_ring *desc_ring = &rb->desc_ring; - enum desc_state d_state; - unsigned long id; u64 seq; - /* Check if the cached @id still points to a valid @seq. */ - id = atomic_long_read(&desc_ring->last_finalized_id); - d_state = desc_read(desc_ring, id, NULL, &seq, NULL); + seq = desc_last_finalized_seq(rb); - if (d_state == desc_finalized || d_state == desc_reusable) { - /* - * Begin searching after the last finalized record. - * - * On 0, the search must begin at 0 because of hack#2 - * of the bootstrapping phase it is not known if a - * record at index 0 exists. - */ - if (seq != 0) - seq++; - } else { - /* - * The information about the last finalized sequence number - * has gone. It should happen only when there is a flood of - * new messages and the ringbuffer is rapidly recycled. - * Give up and start from the beginning. - */ - seq = 0; - } + /* + * Begin searching after the last finalized record. + * + * On 0, the search must begin at 0 because of hack#2 + * of the bootstrapping phase it is not known if a + * record at index 0 exists. + */ + if (seq != 0) + seq++; /* * The information about the last finalized @seq might be inaccurate. @@ -2085,7 +2375,7 @@ void prb_init(struct printk_ringbuffer *rb, rb->desc_ring.infos = infos; atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits)); atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits)); - atomic_long_set(&rb->desc_ring.last_finalized_id, DESC0_ID(descbits)); + atomic_long_set(&rb->desc_ring.last_finalized_seq, 0); rb->text_data_ring.size_bits = textbits; rb->text_data_ring.data = text_buf; @@ -2101,6 +2391,7 @@ void prb_init(struct printk_ringbuffer *rb, infos[0].seq = -(u64)_DESCS_COUNT(descbits); infos[_DESCS_COUNT(descbits) - 1].seq = 0; } +EXPORT_SYMBOL_IF_KUNIT(prb_init); /** * prb_record_text_space() - Query the full actual used ringbuffer space for |
