summaryrefslogtreecommitdiff
path: root/drivers/md/dm-vdo/dedupe.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm-vdo/dedupe.c')
-rw-r--r--drivers/md/dm-vdo/dedupe.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
index 117266e1b3ae..3c58b941e067 100644
--- a/drivers/md/dm-vdo/dedupe.c
+++ b/drivers/md/dm-vdo/dedupe.c
@@ -148,11 +148,6 @@
#include "vdo.h"
#include "wait-queue.h"
-struct uds_attribute {
- struct attribute attr;
- const char *(*show_string)(struct hash_zones *hash_zones);
-};
-
#define DEDUPE_QUERY_TIMER_IDLE 0
#define DEDUPE_QUERY_TIMER_RUNNING 1
#define DEDUPE_QUERY_TIMER_FIRED 2
@@ -231,7 +226,7 @@ struct hash_lock {
* A list containing the data VIOs sharing this lock, all having the same record name and
* data block contents, linked by their hash_lock_node fields.
*/
- struct list_head duplicate_ring;
+ struct list_head duplicate_vios;
/* The number of data_vios sharing this lock instance */
data_vio_count_t reference_count;
@@ -348,7 +343,7 @@ static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *l
{
memset(lock, 0, sizeof(*lock));
INIT_LIST_HEAD(&lock->pool_node);
- INIT_LIST_HEAD(&lock->duplicate_ring);
+ INIT_LIST_HEAD(&lock->duplicate_vios);
vdo_waitq_init(&lock->waiters);
list_add_tail(&lock->pool_node, &zone->lock_pool);
}
@@ -446,7 +441,7 @@ static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock)
VDO_ASSERT_LOG_ONLY(data_vio->hash_zone != NULL,
"must have a hash zone when holding a hash lock");
VDO_ASSERT_LOG_ONLY(!list_empty(&data_vio->hash_lock_entry),
- "must be on a hash lock ring when holding a hash lock");
+ "must be on a hash lock list when holding a hash lock");
VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 0,
"hash lock reference must be counted");
@@ -469,10 +464,10 @@ static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock)
if (new_lock != NULL) {
/*
- * Keep all data_vios sharing the lock on a ring since they can complete in any
+ * Keep all data_vios sharing the lock on a list since they can complete in any
* order and we'll always need a pointer to one to compare data.
*/
- list_move_tail(&data_vio->hash_lock_entry, &new_lock->duplicate_ring);
+ list_move_tail(&data_vio->hash_lock_entry, &new_lock->duplicate_vios);
new_lock->reference_count += 1;
if (new_lock->max_references < new_lock->reference_count)
new_lock->max_references = new_lock->reference_count;
@@ -570,7 +565,7 @@ static void wait_on_hash_lock(struct hash_lock *lock, struct data_vio *data_vio)
* @waiter: The data_vio's waiter link.
* @context: Not used.
*/
-static void abort_waiter(struct vdo_waiter *waiter, void *context __always_unused)
+static void abort_waiter(struct vdo_waiter *waiter, void __always_unused *context)
{
write_data_vio(vdo_waiter_as_data_vio(waiter));
}
@@ -734,6 +729,7 @@ static void process_update_result(struct data_vio *agent)
!change_context_state(context, DEDUPE_CONTEXT_COMPLETE, DEDUPE_CONTEXT_IDLE))
return;
+ agent->dedupe_context = NULL;
release_context(context);
}
@@ -1653,6 +1649,7 @@ static void process_query_result(struct data_vio *agent)
if (change_context_state(context, DEDUPE_CONTEXT_COMPLETE, DEDUPE_CONTEXT_IDLE)) {
agent->is_duplicate = decode_uds_advice(context);
+ agent->dedupe_context = NULL;
release_context(context);
}
}
@@ -1730,7 +1727,7 @@ static void report_bogus_lock_state(struct hash_lock *lock, struct data_vio *dat
/**
* vdo_continue_hash_lock() - Continue the processing state after writing, compressing, or
* deduplicating.
- * @data_vio: The data_vio to continue processing in its hash lock.
+ * @completion: The data_vio completion to continue processing in its hash lock.
*
* Asynchronously continue processing a data_vio in its hash lock after it has finished writing,
* compressing, or deduplicating, so it can share the result with any data_vios waiting in the hash
@@ -1792,10 +1789,10 @@ static bool is_hash_collision(struct hash_lock *lock, struct data_vio *candidate
struct hash_zone *zone;
bool collides;
- if (list_empty(&lock->duplicate_ring))
+ if (list_empty(&lock->duplicate_vios))
return false;
- lock_holder = list_first_entry(&lock->duplicate_ring, struct data_vio,
+ lock_holder = list_first_entry(&lock->duplicate_vios, struct data_vio,
hash_lock_entry);
zone = candidate->hash_zone;
collides = !blocks_equal(lock_holder->vio.data, candidate->vio.data);
@@ -1818,7 +1815,7 @@ static inline int assert_hash_lock_preconditions(const struct data_vio *data_vio
return result;
result = VDO_ASSERT(list_empty(&data_vio->hash_lock_entry),
- "must not already be a member of a hash lock ring");
+ "must not already be a member of a hash lock list");
if (result != VDO_SUCCESS)
return result;
@@ -1828,7 +1825,7 @@ static inline int assert_hash_lock_preconditions(const struct data_vio *data_vio
/**
* vdo_acquire_hash_lock() - Acquire or share a lock on a record name.
- * @data_vio: The data_vio acquiring a lock on its record name.
+ * @completion: The data_vio completion acquiring a lock on its record name.
*
* Acquire or share a lock on the hash (record name) of the data in a data_vio, updating the
* data_vio to reference the lock. This must only be called in the correct thread for the zone. In
@@ -1945,8 +1942,8 @@ void vdo_release_hash_lock(struct data_vio *data_vio)
"returned hash lock must not be in use with state %s",
get_hash_lock_state_name(lock->state));
VDO_ASSERT_LOG_ONLY(list_empty(&lock->pool_node),
- "hash lock returned to zone must not be in a pool ring");
- VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_ring),
+ "hash lock returned to zone must not be in a pool list");
+ VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_vios),
"hash lock returned to zone must not reference DataVIOs");
return_hash_lock_to_pool(zone, lock);
@@ -2181,6 +2178,7 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones)
vdo_set_dedupe_index_timeout_interval(vdo_dedupe_index_timeout_interval);
vdo_set_dedupe_index_min_timer_interval(vdo_dedupe_index_min_timer_interval);
+ spin_lock_init(&zones->lock);
/*
* Since we will save up the timeouts that would have been reported but were ratelimited,
@@ -2263,7 +2261,7 @@ static void check_for_drain_complete(struct hash_zone *zone)
if ((atomic_read(&zone->timer_state) == DEDUPE_QUERY_TIMER_IDLE) ||
change_timer_state(zone, DEDUPE_QUERY_TIMER_RUNNING,
DEDUPE_QUERY_TIMER_IDLE)) {
- del_timer_sync(&zone->timer);
+ timer_delete_sync(&zone->timer);
} else {
/*
* There is an in flight time-out, which must get processed before we can continue.
@@ -2326,6 +2324,7 @@ static void timeout_index_operations_callback(struct vdo_completion *completion)
* send its requestor on its way.
*/
list_del_init(&context->list_entry);
+ context->requestor->dedupe_context = NULL;
continue_data_vio(context->requestor);
timed_out++;
}
@@ -2681,7 +2680,8 @@ static void get_index_statistics(struct hash_zones *zones,
/**
* vdo_get_dedupe_statistics() - Tally the statistics from all the hash zones and the UDS index.
- * @hash_zones: The hash zones to query
+ * @zones: The hash zones to query
+ * @stats: A structure to store the statistics
*
* Return: The sum of the hash lock statistics from all hash zones plus the statistics from the UDS
* index