1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _PCACHE_CACHE_H
#define _PCACHE_CACHE_H
#include "segment.h"
/* Garbage collection thresholds */
#define PCACHE_CACHE_GC_PERCENT_MIN 0 /* Minimum GC percentage */
#define PCACHE_CACHE_GC_PERCENT_MAX 90 /* Maximum GC percentage */
#define PCACHE_CACHE_GC_PERCENT_DEFAULT 70 /* Default GC percentage */
#define PCACHE_CACHE_SUBTREE_SIZE (4 * PCACHE_MB) /* 4MB total tree size */
#define PCACHE_CACHE_SUBTREE_SIZE_MASK 0x3FFFFF /* Mask for tree size */
#define PCACHE_CACHE_SUBTREE_SIZE_SHIFT 22 /* Bit shift for tree size */
/* Maximum number of keys per key set */
#define PCACHE_KSET_KEYS_MAX 128
#define PCACHE_CACHE_SEGS_MAX (1024 * 1024) /* maximum cache size for each device is 16T */
#define PCACHE_KSET_ONMEDIA_SIZE_MAX struct_size_t(struct pcache_cache_kset_onmedia, data, PCACHE_KSET_KEYS_MAX)
#define PCACHE_KSET_SIZE (sizeof(struct pcache_cache_kset) + sizeof(struct pcache_cache_key_onmedia) * PCACHE_KSET_KEYS_MAX)
/* Maximum number of keys to clean in one round of clean_work */
#define PCACHE_CLEAN_KEYS_MAX 10
/* Writeback and garbage collection intervals in jiffies */
#define PCACHE_CACHE_WRITEBACK_INTERVAL (5 * HZ)
#define PCACHE_CACHE_GC_INTERVAL (5 * HZ)
/* Macro to get the cache key structure from an rb_node pointer */
#define CACHE_KEY(node) (container_of(node, struct pcache_cache_key, rb_node))
struct pcache_cache_pos_onmedia {
struct pcache_meta_header header;
__u32 cache_seg_id;
__u32 seg_off;
};
/* Offset and size definitions for cache segment control */
#define PCACHE_CACHE_SEG_CTRL_OFF (PCACHE_SEG_INFO_SIZE * PCACHE_META_INDEX_MAX)
#define PCACHE_CACHE_SEG_CTRL_SIZE (4 * PCACHE_KB)
struct pcache_cache_seg_gen {
struct pcache_meta_header header;
__u64 gen;
};
/* Control structure for cache segments */
struct pcache_cache_seg_ctrl {
struct pcache_cache_seg_gen gen[PCACHE_META_INDEX_MAX];
__u64 res[64];
};
#define PCACHE_CACHE_FLAGS_DATA_CRC BIT(0)
#define PCACHE_CACHE_FLAGS_INIT_DONE BIT(1)
#define PCACHE_CACHE_FLAGS_CACHE_MODE_MASK GENMASK(5, 2)
#define PCACHE_CACHE_MODE_WRITEBACK 0
#define PCACHE_CACHE_MODE_WRITETHROUGH 1
#define PCACHE_CACHE_MODE_WRITEAROUND 2
#define PCACHE_CACHE_MODE_WRITEONLY 3
#define PCACHE_CACHE_FLAGS_GC_PERCENT_MASK GENMASK(12, 6)
struct pcache_cache_info {
struct pcache_meta_header header;
__u32 seg_id;
__u32 n_segs;
__u32 flags;
__u32 reserved;
};
struct pcache_cache_pos {
struct pcache_cache_segment *cache_seg;
u32 seg_off;
};
struct pcache_cache_segment {
struct pcache_cache *cache;
u32 cache_seg_id; /* Index in cache->segments */
struct pcache_segment segment;
atomic_t refs;
struct pcache_segment_info cache_seg_info;
struct mutex info_lock;
u32 info_index;
spinlock_t gen_lock;
u64 gen;
u64 gen_seq;
u32 gen_index;
struct pcache_cache_seg_ctrl *cache_seg_ctrl;
};
/* rbtree for cache entries */
struct pcache_cache_subtree {
struct rb_root root;
spinlock_t tree_lock;
};
struct pcache_cache_tree {
struct pcache_cache *cache;
u32 n_subtrees;
mempool_t key_pool;
struct pcache_cache_subtree *subtrees;
};
extern struct kmem_cache *key_cache;
struct pcache_cache_key {
struct pcache_cache_tree *cache_tree;
struct pcache_cache_subtree *cache_subtree;
struct kref ref;
struct rb_node rb_node;
struct list_head list_node;
u64 off;
u32 len;
u32 flags;
struct pcache_cache_pos cache_pos;
u64 seg_gen;
};
#define PCACHE_CACHE_KEY_FLAGS_EMPTY BIT(0)
#define PCACHE_CACHE_KEY_FLAGS_CLEAN BIT(1)
struct pcache_cache_key_onmedia {
__u64 off;
__u32 len;
__u32 flags;
__u32 cache_seg_id;
__u32 cache_seg_off;
__u64 seg_gen;
__u32 data_crc;
__u32 reserved;
};
struct pcache_cache_kset_onmedia {
__u32 crc;
union {
__u32 key_num;
__u32 next_cache_seg_id;
};
__u64 magic;
__u64 flags;
struct pcache_cache_key_onmedia data[];
};
struct pcache_cache {
struct pcache_backing_dev *backing_dev;
struct pcache_cache_dev *cache_dev;
struct pcache_cache_ctrl *cache_ctrl;
u64 dev_size;
struct pcache_cache_data_head __percpu *data_heads;
spinlock_t key_head_lock;
struct pcache_cache_pos key_head;
u32 n_ksets;
struct pcache_cache_kset *ksets;
struct mutex key_tail_lock;
struct pcache_cache_pos key_tail;
u64 key_tail_seq;
u32 key_tail_index;
struct mutex dirty_tail_lock;
struct pcache_cache_pos dirty_tail;
u64 dirty_tail_seq;
u32 dirty_tail_index;
struct pcache_cache_tree req_key_tree;
struct work_struct clean_work;
struct mutex writeback_lock;
char wb_kset_onmedia_buf[PCACHE_KSET_ONMEDIA_SIZE_MAX];
struct pcache_cache_tree writeback_key_tree;
struct delayed_work writeback_work;
struct {
atomic_t pending;
u32 advance;
int ret;
} writeback_ctx;
char gc_kset_onmedia_buf[PCACHE_KSET_ONMEDIA_SIZE_MAX];
struct delayed_work gc_work;
atomic_t gc_errors;
struct mutex cache_info_lock;
struct pcache_cache_info cache_info;
struct pcache_cache_info *cache_info_addr;
u32 info_index;
u32 n_segs;
unsigned long *seg_map;
u32 last_cache_seg;
bool cache_full;
spinlock_t seg_map_lock;
struct pcache_cache_segment *segments;
};
struct workqueue_struct *cache_get_wq(struct pcache_cache *cache);
struct dm_pcache;
struct pcache_cache_options {
u32 cache_mode:4;
u32 data_crc:1;
};
int pcache_cache_start(struct dm_pcache *pcache);
void pcache_cache_stop(struct dm_pcache *pcache);
struct pcache_cache_ctrl {
/* Updated by gc_thread */
struct pcache_cache_pos_onmedia key_tail_pos[PCACHE_META_INDEX_MAX];
/* Updated by writeback_thread */
struct pcache_cache_pos_onmedia dirty_tail_pos[PCACHE_META_INDEX_MAX];
};
struct pcache_cache_data_head {
struct pcache_cache_pos head_pos;
};
static inline u16 pcache_cache_get_gc_percent(struct pcache_cache *cache)
{
return FIELD_GET(PCACHE_CACHE_FLAGS_GC_PERCENT_MASK, cache->cache_info.flags);
}
int pcache_cache_set_gc_percent(struct pcache_cache *cache, u8 percent);
/* cache key */
struct pcache_cache_key *cache_key_alloc(struct pcache_cache_tree *cache_tree, gfp_t gfp_mask);
void cache_key_init(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key);
void cache_key_get(struct pcache_cache_key *key);
void cache_key_put(struct pcache_cache_key *key);
int cache_key_append(struct pcache_cache *cache, struct pcache_cache_key *key, bool force_close);
void cache_key_insert(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key, bool fixup);
int cache_key_decode(struct pcache_cache *cache,
struct pcache_cache_key_onmedia *key_onmedia,
struct pcache_cache_key *key);
void cache_pos_advance(struct pcache_cache_pos *pos, u32 len);
#define PCACHE_KSET_FLAGS_LAST BIT(0)
#define PCACHE_KSET_MAGIC 0x676894a64e164f1aULL
struct pcache_cache_kset {
struct pcache_cache *cache;
spinlock_t kset_lock;
struct delayed_work flush_work;
struct pcache_cache_kset_onmedia kset_onmedia;
};
extern struct pcache_cache_kset_onmedia pcache_empty_kset;
#define SUBTREE_WALK_RET_OK 0
#define SUBTREE_WALK_RET_ERR 1
#define SUBTREE_WALK_RET_NEED_KEY 2
#define SUBTREE_WALK_RET_NEED_REQ 3
#define SUBTREE_WALK_RET_RESEARCH 4
struct pcache_cache_subtree_walk_ctx {
struct pcache_cache_tree *cache_tree;
struct rb_node *start_node;
struct pcache_request *pcache_req;
struct pcache_cache_key *key;
u32 req_done;
int ret;
/* pre-allocated key and backing_dev_req */
struct pcache_cache_key *pre_alloc_key;
struct pcache_backing_dev_req *pre_alloc_req;
struct list_head *delete_key_list;
struct list_head *submit_req_list;
/*
* |--------| key_tmp
* |====| key
*/
int (*before)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
struct pcache_cache_subtree_walk_ctx *ctx);
/*
* |----------| key_tmp
* |=====| key
*/
int (*after)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
struct pcache_cache_subtree_walk_ctx *ctx);
/*
* |----------------| key_tmp
* |===========| key
*/
int (*overlap_tail)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
struct pcache_cache_subtree_walk_ctx *ctx);
/*
* |--------| key_tmp
* |==========| key
*/
int (*overlap_head)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
struct pcache_cache_subtree_walk_ctx *ctx);
/*
* |----| key_tmp
* |==========| key
*/
int (*overlap_contain)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
struct pcache_cache_subtree_walk_ctx *ctx);
/*
* |-----------| key_tmp
* |====| key
*/
int (*overlap_contained)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
struct pcache_cache_subtree_walk_ctx *ctx);
int (*walk_finally)(struct pcache_cache_subtree_walk_ctx *ctx, int ret);
bool (*walk_done)(struct pcache_cache_subtree_walk_ctx *ctx);
};
int cache_subtree_walk(struct pcache_cache_subtree_walk_ctx *ctx);
struct rb_node *cache_subtree_search(struct pcache_cache_subtree *cache_subtree, struct pcache_cache_key *key,
struct rb_node **parentp, struct rb_node ***newp,
struct list_head *delete_key_list);
int cache_kset_close(struct pcache_cache *cache, struct pcache_cache_kset *kset);
void clean_fn(struct work_struct *work);
void kset_flush_fn(struct work_struct *work);
int cache_replay(struct pcache_cache *cache);
int cache_tree_init(struct pcache_cache *cache, struct pcache_cache_tree *cache_tree, u32 n_subtrees);
void cache_tree_clear(struct pcache_cache_tree *cache_tree);
void cache_tree_exit(struct pcache_cache_tree *cache_tree);
/* cache segments */
struct pcache_cache_segment *get_cache_segment(struct pcache_cache *cache);
int cache_seg_init(struct pcache_cache *cache, u32 seg_id, u32 cache_seg_id,
bool new_cache);
void cache_seg_get(struct pcache_cache_segment *cache_seg);
void cache_seg_put(struct pcache_cache_segment *cache_seg);
void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id);
/* cache request*/
int cache_flush(struct pcache_cache *cache);
void miss_read_end_work_fn(struct work_struct *work);
int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *pcache_req);
/* gc */
void pcache_cache_gc_fn(struct work_struct *work);
/* writeback */
void cache_writeback_exit(struct pcache_cache *cache);
int cache_writeback_init(struct pcache_cache *cache);
void cache_writeback_fn(struct work_struct *work);
/* inline functions */
static inline struct pcache_cache_subtree *get_subtree(struct pcache_cache_tree *cache_tree, u64 off)
{
if (cache_tree->n_subtrees == 1)
return &cache_tree->subtrees[0];
return &cache_tree->subtrees[off >> PCACHE_CACHE_SUBTREE_SIZE_SHIFT];
}
static inline void *cache_pos_addr(struct pcache_cache_pos *pos)
{
return (pos->cache_seg->segment.data + pos->seg_off);
}
static inline void *get_key_head_addr(struct pcache_cache *cache)
{
return cache_pos_addr(&cache->key_head);
}
static inline u32 get_kset_id(struct pcache_cache *cache, u64 off)
{
u32 kset_id;
div_u64_rem(off >> PCACHE_CACHE_SUBTREE_SIZE_SHIFT, cache->n_ksets, &kset_id);
return kset_id;
}
static inline struct pcache_cache_kset *get_kset(struct pcache_cache *cache, u32 kset_id)
{
return (void *)cache->ksets + PCACHE_KSET_SIZE * kset_id;
}
static inline struct pcache_cache_data_head *get_data_head(struct pcache_cache *cache)
{
return this_cpu_ptr(cache->data_heads);
}
static inline bool cache_key_empty(struct pcache_cache_key *key)
{
return key->flags & PCACHE_CACHE_KEY_FLAGS_EMPTY;
}
static inline bool cache_key_clean(struct pcache_cache_key *key)
{
return key->flags & PCACHE_CACHE_KEY_FLAGS_CLEAN;
}
static inline void cache_pos_copy(struct pcache_cache_pos *dst, struct pcache_cache_pos *src)
{
memcpy(dst, src, sizeof(struct pcache_cache_pos));
}
/**
* cache_seg_is_ctrl_seg - Checks if a cache segment is a cache ctrl segment.
* @cache_seg_id: ID of the cache segment.
*
* Returns true if the cache segment ID corresponds to a cache ctrl segment.
*
* Note: We extend the segment control of the first cache segment
* (cache segment ID 0) to serve as the cache control (pcache_cache_ctrl)
* for the entire PCACHE cache. This function determines whether the given
* cache segment is the one storing the pcache_cache_ctrl information.
*/
static inline bool cache_seg_is_ctrl_seg(u32 cache_seg_id)
{
return (cache_seg_id == 0);
}
/**
* cache_key_cutfront - Cuts a specified length from the front of a cache key.
* @key: Pointer to pcache_cache_key structure.
* @cut_len: Length to cut from the front.
*
* Advances the cache key position by cut_len and adjusts offset and length accordingly.
*/
static inline void cache_key_cutfront(struct pcache_cache_key *key, u32 cut_len)
{
if (key->cache_pos.cache_seg)
cache_pos_advance(&key->cache_pos, cut_len);
key->off += cut_len;
key->len -= cut_len;
}
/**
* cache_key_cutback - Cuts a specified length from the back of a cache key.
* @key: Pointer to pcache_cache_key structure.
* @cut_len: Length to cut from the back.
*
* Reduces the length of the cache key by cut_len.
*/
static inline void cache_key_cutback(struct pcache_cache_key *key, u32 cut_len)
{
key->len -= cut_len;
}
static inline void cache_key_delete(struct pcache_cache_key *key)
{
struct pcache_cache_subtree *cache_subtree;
cache_subtree = key->cache_subtree;
BUG_ON(!cache_subtree);
rb_erase(&key->rb_node, &cache_subtree->root);
key->flags = 0;
cache_key_put(key);
}
static inline bool cache_data_crc_on(struct pcache_cache *cache)
{
return (cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC);
}
static inline u32 cache_mode_get(struct pcache_cache *cache)
{
return FIELD_GET(PCACHE_CACHE_FLAGS_CACHE_MODE_MASK, cache->cache_info.flags);
}
static inline void cache_mode_set(struct pcache_cache *cache, u32 cache_mode)
{
cache->cache_info.flags &= ~PCACHE_CACHE_FLAGS_CACHE_MODE_MASK;
cache->cache_info.flags |= FIELD_PREP(PCACHE_CACHE_FLAGS_CACHE_MODE_MASK, cache_mode);
}
/**
* cache_key_data_crc - Calculates CRC for data in a cache key.
* @key: Pointer to the pcache_cache_key structure.
*
* Returns the CRC-32 checksum of the data within the cache key's position.
*/
static inline u32 cache_key_data_crc(struct pcache_cache_key *key)
{
void *data;
data = cache_pos_addr(&key->cache_pos);
return crc32c(PCACHE_CRC_SEED, data, key->len);
}
static inline u32 cache_kset_crc(struct pcache_cache_kset_onmedia *kset_onmedia)
{
u32 crc_size;
if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST)
crc_size = sizeof(struct pcache_cache_kset_onmedia) - 4;
else
crc_size = struct_size(kset_onmedia, data, kset_onmedia->key_num) - 4;
return crc32c(PCACHE_CRC_SEED, (void *)kset_onmedia + 4, crc_size);
}
static inline u32 get_kset_onmedia_size(struct pcache_cache_kset_onmedia *kset_onmedia)
{
return struct_size_t(struct pcache_cache_kset_onmedia, data, kset_onmedia->key_num);
}
/**
* cache_seg_remain - Computes remaining space in a cache segment.
* @pos: Pointer to pcache_cache_pos structure.
*
* Returns the amount of remaining space in the segment data starting from
* the current position offset.
*/
static inline u32 cache_seg_remain(struct pcache_cache_pos *pos)
{
struct pcache_cache_segment *cache_seg;
struct pcache_segment *segment;
u32 seg_remain;
cache_seg = pos->cache_seg;
segment = &cache_seg->segment;
seg_remain = segment->data_size - pos->seg_off;
return seg_remain;
}
/**
* cache_key_invalid - Checks if a cache key is invalid.
* @key: Pointer to pcache_cache_key structure.
*
* Returns true if the cache key is invalid due to its generation being
* less than the generation of its segment; otherwise returns false.
*
* When the GC (garbage collection) thread identifies a segment
* as reclaimable, it increments the segment's generation (gen). However,
* it does not immediately remove all related cache keys. When accessing
* such a cache key, this function can be used to determine if the cache
* key has already become invalid.
*/
static inline bool cache_key_invalid(struct pcache_cache_key *key)
{
if (cache_key_empty(key))
return false;
return (key->seg_gen < key->cache_pos.cache_seg->gen);
}
/**
* cache_key_lstart - Retrieves the logical start offset of a cache key.
* @key: Pointer to pcache_cache_key structure.
*
* Returns the logical start offset for the cache key.
*/
static inline u64 cache_key_lstart(struct pcache_cache_key *key)
{
return key->off;
}
/**
* cache_key_lend - Retrieves the logical end offset of a cache key.
* @key: Pointer to pcache_cache_key structure.
*
* Returns the logical end offset for the cache key.
*/
static inline u64 cache_key_lend(struct pcache_cache_key *key)
{
return key->off + key->len;
}
static inline void cache_key_copy(struct pcache_cache_key *key_dst, struct pcache_cache_key *key_src)
{
key_dst->off = key_src->off;
key_dst->len = key_src->len;
key_dst->seg_gen = key_src->seg_gen;
key_dst->cache_tree = key_src->cache_tree;
key_dst->cache_subtree = key_src->cache_subtree;
key_dst->flags = key_src->flags;
cache_pos_copy(&key_dst->cache_pos, &key_src->cache_pos);
}
/**
* cache_pos_onmedia_crc - Calculates the CRC for an on-media cache position.
* @pos_om: Pointer to pcache_cache_pos_onmedia structure.
*
* Calculates the CRC-32 checksum of the position, excluding the first 4 bytes.
* Returns the computed CRC value.
*/
static inline u32 cache_pos_onmedia_crc(struct pcache_cache_pos_onmedia *pos_om)
{
return pcache_meta_crc(&pos_om->header, sizeof(struct pcache_cache_pos_onmedia));
}
void cache_pos_encode(struct pcache_cache *cache,
struct pcache_cache_pos_onmedia *pos_onmedia,
struct pcache_cache_pos *pos, u64 seq, u32 *index);
int cache_pos_decode(struct pcache_cache *cache,
struct pcache_cache_pos_onmedia *pos_onmedia,
struct pcache_cache_pos *pos, u64 *seq, u32 *index);
static inline void cache_encode_key_tail(struct pcache_cache *cache)
{
cache_pos_encode(cache, cache->cache_ctrl->key_tail_pos,
&cache->key_tail, ++cache->key_tail_seq,
&cache->key_tail_index);
}
static inline int cache_decode_key_tail(struct pcache_cache *cache)
{
return cache_pos_decode(cache, cache->cache_ctrl->key_tail_pos,
&cache->key_tail, &cache->key_tail_seq,
&cache->key_tail_index);
}
static inline void cache_encode_dirty_tail(struct pcache_cache *cache)
{
cache_pos_encode(cache, cache->cache_ctrl->dirty_tail_pos,
&cache->dirty_tail, ++cache->dirty_tail_seq,
&cache->dirty_tail_index);
}
static inline int cache_decode_dirty_tail(struct pcache_cache *cache)
{
return cache_pos_decode(cache, cache->cache_ctrl->dirty_tail_pos,
&cache->dirty_tail, &cache->dirty_tail_seq,
&cache->dirty_tail_index);
}
int pcache_cache_init(void);
void pcache_cache_exit(void);
#endif /* _PCACHE_CACHE_H */
|