diff options
Diffstat (limited to 'drivers/md/dm-vdo/data-vio.c')
-rw-r--r-- | drivers/md/dm-vdo/data-vio.c | 54 |
1 files changed, 14 insertions, 40 deletions
diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c index 94f6f1ccfb7d..810002747091 100644 --- a/drivers/md/dm-vdo/data-vio.c +++ b/drivers/md/dm-vdo/data-vio.c @@ -327,8 +327,9 @@ static u32 __must_check pack_status(struct data_vio_compression_status status) /** * set_data_vio_compression_status() - Set the compression status of a data_vio. - * @state: The expected current status of the data_vio. - * @new_state: The status to set. + * @data_vio: The data_vio to change. + * @status: The expected current status of the data_vio. + * @new_status: The status to set. * * Return: true if the new status was set, false if the data_vio's compression status did not * match the expected state, and so was left unchanged. @@ -501,6 +502,7 @@ static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lb memset(&data_vio->record_name, 0, sizeof(data_vio->record_name)); memset(&data_vio->duplicate, 0, sizeof(data_vio->duplicate)); + vdo_reset_completion(&data_vio->decrement_completion); vdo_reset_completion(completion); completion->error_handler = handle_data_vio_error; set_data_vio_logical_callback(data_vio, attempt_logical_block_lock); @@ -604,8 +606,7 @@ static void assign_discard_permit(struct limiter *limiter) static void get_waiters(struct limiter *limiter) { - bio_list_merge(&limiter->waiters, &limiter->new_waiters); - bio_list_init(&limiter->new_waiters); + bio_list_merge_init(&limiter->waiters, &limiter->new_waiters); } static inline struct data_vio *get_available_data_vio(struct data_vio_pool *pool) @@ -836,7 +837,7 @@ static void destroy_data_vio(struct data_vio *data_vio) * @vdo: The vdo to which the pool will belong. * @pool_size: The number of data_vios in the pool. * @discard_limit: The maximum number of data_vios which may be used for discards. - * @pool: A pointer to hold the newly allocated pool. + * @pool_ptr: A pointer to hold the newly allocated pool. */ int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size, data_vio_count_t discard_limit, struct data_vio_pool **pool_ptr) @@ -1074,35 +1075,6 @@ void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios) spin_unlock(&pool->lock); } -data_vio_count_t get_data_vio_pool_active_discards(struct data_vio_pool *pool) -{ - return READ_ONCE(pool->discard_limiter.busy); -} - -data_vio_count_t get_data_vio_pool_discard_limit(struct data_vio_pool *pool) -{ - return READ_ONCE(pool->discard_limiter.limit); -} - -data_vio_count_t get_data_vio_pool_maximum_discards(struct data_vio_pool *pool) -{ - return READ_ONCE(pool->discard_limiter.max_busy); -} - -int set_data_vio_pool_discard_limit(struct data_vio_pool *pool, data_vio_count_t limit) -{ - if (get_data_vio_pool_request_limit(pool) < limit) { - // The discard limit may not be higher than the data_vio limit. - return -EINVAL; - } - - spin_lock(&pool->lock); - pool->discard_limiter.limit = limit; - spin_unlock(&pool->lock); - - return VDO_SUCCESS; -} - data_vio_count_t get_data_vio_pool_active_requests(struct data_vio_pool *pool) { return READ_ONCE(pool->limiter.busy); @@ -1274,12 +1246,14 @@ static void clean_hash_lock(struct vdo_completion *completion) static void finish_cleanup(struct data_vio *data_vio) { struct vdo_completion *completion = &data_vio->vio.completion; + u32 discard_size = min_t(u32, data_vio->remaining_discard, + VDO_BLOCK_SIZE - data_vio->offset); VDO_ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL, "complete data_vio has no allocation lock"); VDO_ASSERT_LOG_ONLY(data_vio->hash_lock == NULL, "complete data_vio has no hash lock"); - if ((data_vio->remaining_discard <= VDO_BLOCK_SIZE) || + if ((data_vio->remaining_discard <= discard_size) || (completion->result != VDO_SUCCESS)) { struct data_vio_pool *pool = completion->vdo->data_vio_pool; @@ -1288,12 +1262,12 @@ static void finish_cleanup(struct data_vio *data_vio) return; } - data_vio->remaining_discard -= min_t(u32, data_vio->remaining_discard, - VDO_BLOCK_SIZE - data_vio->offset); + data_vio->remaining_discard -= discard_size; data_vio->is_partial = (data_vio->remaining_discard < VDO_BLOCK_SIZE); data_vio->read = data_vio->is_partial; data_vio->offset = 0; completion->requeue = true; + data_vio->first_reference_operation_complete = false; launch_data_vio(data_vio, data_vio->logical.lbn + 1); } @@ -1966,7 +1940,8 @@ static void allocate_block(struct vdo_completion *completion) .state = VDO_MAPPING_STATE_UNCOMPRESSED, }; - if (data_vio->fua) { + if (data_vio->fua || + data_vio->remaining_discard > (u32) (VDO_BLOCK_SIZE - data_vio->offset)) { prepare_for_dedupe(data_vio); return; } @@ -2043,7 +2018,6 @@ void continue_data_vio_with_block_map_slot(struct vdo_completion *completion) return; } - /* * We don't need to write any data, so skip allocation and just update the block map and * reference counts (via the journal). @@ -2052,7 +2026,7 @@ void continue_data_vio_with_block_map_slot(struct vdo_completion *completion) if (data_vio->is_zero) data_vio->new_mapped.state = VDO_MAPPING_STATE_UNCOMPRESSED; - if (data_vio->remaining_discard > VDO_BLOCK_SIZE) { + if (data_vio->remaining_discard > (u32) (VDO_BLOCK_SIZE - data_vio->offset)) { /* This is not the final block of a discard so we can't acknowledge it yet. */ update_metadata_for_data_vio_write(data_vio, NULL); return; |