summaryrefslogtreecommitdiff
path: root/drivers/md/persistent-data/dm-space-map.h
blob: a015cd11f6e97e1925d9b5cea1f276a69ecf07ce (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
/*
 * Copyright (C) 2011 Red Hat, Inc.
 *
 * This file is released under the GPL.
 */

#ifndef _LINUX_DM_SPACE_MAP_H
#define _LINUX_DM_SPACE_MAP_H

#include "dm-block-manager.h"

typedef void (*dm_sm_threshold_fn)(void *context);

/*
 * struct dm_space_map keeps a record of how many times each block in a device
 * is referenced.  It needs to be fixed on disk as part of the transaction.
 */
struct dm_space_map {
	void (*destroy)(struct dm_space_map *sm);

	/*
	 * You must commit before allocating the newly added space.
	 */
	int (*extend)(struct dm_space_map *sm, dm_block_t extra_blocks);

	/*
	 * Extensions do not appear in this count until after commit has
	 * been called.
	 */
	int (*get_nr_blocks)(struct dm_space_map *sm, dm_block_t *count);

	/*
	 * Space maps must never allocate a block from the previous
	 * transaction, in case we need to rollback.  This complicates the
	 * semantics of get_nr_free(), it should return the number of blocks
	 * that are available for allocation _now_.  For instance you may
	 * have blocks with a zero reference count that will not be
	 * available for allocation until after the next commit.
	 */
	int (*get_nr_free)(struct dm_space_map *sm, dm_block_t *count);

	int (*get_count)(struct dm_space_map *sm, dm_block_t b, uint32_t *result);
	int (*count_is_more_than_one)(struct dm_space_map *sm, dm_block_t b,
				      int *result);
	int (*set_count)(struct dm_space_map *sm, dm_block_t b, uint32_t count);

	int (*commit)(struct dm_space_map *sm);

	int (*inc_blocks)(struct dm_space_map *sm, dm_block_t b, dm_block_t e);
	int (*dec_blocks)(struct dm_space_map *sm, dm_block_t b, dm_block_t e);

	/*
	 * new_block will increment the returned block.
	 */
	int (*new_block)(struct dm_space_map *sm, dm_block_t *b);

	/*
	 * The root contains all the information needed to fix the space map.
	 * Generally this info is small, so squirrel it away in a disk block
	 * along with other info.
	 */
	int (*root_size)(struct dm_space_map *sm, size_t *result);
	int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);

	/*
	 * You can register one threshold callback which is edge-triggered
	 * when the free space in the space map drops below the threshold.
	 */
	int (*register_threshold_callback)(struct dm_space_map *sm,
					   dm_block_t threshold,
					   dm_sm_threshold_fn fn,
					   void *context);
};

/*----------------------------------------------------------------*/

static inline void dm_sm_destroy(struct dm_space_map *sm)
{
	sm->destroy(sm);
}

static inline int dm_sm_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
{
	return sm->extend(sm, extra_blocks);
}

static inline int dm_sm_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
{
	return sm->get_nr_blocks(sm, count);
}

static inline int dm_sm_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
{
	return sm->get_nr_free(sm, count);
}

static inline int dm_sm_get_count(struct dm_space_map *sm, dm_block_t b,
				  uint32_t *result)
{
	return sm->get_count(sm, b, result);
}

static inline int dm_sm_count_is_more_than_one(struct dm_space_map *sm,
					       dm_block_t b, int *result)
{
	return sm->count_is_more_than_one(sm, b, result);
}

static inline int dm_sm_set_count(struct dm_space_map *sm, dm_block_t b,
				  uint32_t count)
{
	return sm->set_count(sm, b, count);
}

static inline int dm_sm_commit(struct dm_space_map *sm)
{
	return sm->commit(sm);
}

static inline int dm_sm_inc_blocks(struct dm_space_map *sm, dm_block_t b, dm_block_t e)
{
	return sm->inc_blocks(sm, b, e);
}

static inline int dm_sm_inc_block(struct dm_space_map *sm, dm_block_t b)
{
	return dm_sm_inc_blocks(sm, b, b + 1);
}

static inline int dm_sm_dec_blocks(struct dm_space_map *sm, dm_block_t b, dm_block_t e)
{
	return sm->dec_blocks(sm, b, e);
}

static inline int dm_sm_dec_block(struct dm_space_map *sm, dm_block_t b)
{
	return dm_sm_dec_blocks(sm, b, b + 1);
}

static inline int dm_sm_new_block(struct dm_space_map *sm, dm_block_t *b)
{
	return sm->new_block(sm, b);
}

static inline int dm_sm_root_size(struct dm_space_map *sm, size_t *result)
{
	return sm->root_size(sm, result);
}

static inline int dm_sm_copy_root(struct dm_space_map *sm, void *copy_to_here_le, size_t len)
{
	return sm->copy_root(sm, copy_to_here_le, len);
}

static inline int dm_sm_register_threshold_callback(struct dm_space_map *sm,
						    dm_block_t threshold,
						    dm_sm_threshold_fn fn,
						    void *context)
{
	if (sm->register_threshold_callback)
		return sm->register_threshold_callback(sm, threshold, fn, context);

	return -EINVAL;
}


#endif	/* _LINUX_DM_SPACE_MAP_H */