summaryrefslogtreecommitdiff
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h414
1 files changed, 304 insertions, 110 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 32868fbedc9e..639cae2c158b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -4,13 +4,11 @@
#include <linux/sched.h>
#include <linux/sched/clock.h>
-
-#ifdef CONFIG_BLOCK
-
#include <linux/major.h>
#include <linux/genhd.h>
#include <linux/list.h>
#include <linux/llist.h>
+#include <linux/minmax.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/pagemap.h>
@@ -27,6 +25,7 @@
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
+#include <linux/pm.h>
struct module;
struct scsi_ioctl_command;
@@ -43,6 +42,7 @@ struct pr_ops;
struct rq_qos;
struct blk_queue_stats;
struct blk_stat_callback;
+struct blk_keyslot_manager;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -82,8 +82,6 @@ typedef __u32 __bitwise req_flags_t;
/* set for "ide_preempt" requests and also for requests for which the SCSI
"quiesce" state must be ignored. */
#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
-/* contains copies of user pages */
-#define RQF_COPY_USER ((__force req_flags_t)(1 << 9))
/* vaguely specified driver internal error. Ignored by the block layer */
#define RQF_FAILED ((__force req_flags_t)(1 << 10))
/* don't warn about errors */
@@ -223,11 +221,14 @@ struct request {
unsigned short nr_integrity_segments;
#endif
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct bio_crypt_ctx *crypt_ctx;
+ struct blk_ksm_keyslot *crypt_keyslot;
+#endif
+
unsigned short write_hint;
unsigned short ioprio;
- unsigned int extra_len; /* length of alignment and padding */
-
enum mq_rq_state state;
refcount_t ref;
@@ -287,10 +288,7 @@ static inline unsigned short req_get_ioprio(struct request *req)
struct blk_queue_ctx;
-typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
-
struct bio_vec;
-typedef int (dma_drain_needed_fn)(struct request *);
enum blk_eh_timer_return {
BLK_EH_DONE, /* drivers has completed the command */
@@ -310,11 +308,14 @@ enum blk_queue_state {
/*
* Zoned block device models (zoned limit).
+ *
+ * Note: This needs to be ordered from the least to the most severe
+ * restrictions for the inheritance in blk_stack_limits() to work.
*/
enum blk_zoned_model {
- BLK_ZONED_NONE, /* Regular block device */
- BLK_ZONED_HA, /* Host-aware zoned block device */
- BLK_ZONED_HM, /* Host-managed zoned block device */
+ BLK_ZONED_NONE = 0, /* Regular block device */
+ BLK_ZONED_HA, /* Host-aware zoned block device */
+ BLK_ZONED_HM, /* Host-managed zoned block device */
};
struct queue_limits {
@@ -336,6 +337,7 @@ struct queue_limits {
unsigned int max_hw_discard_sectors;
unsigned int max_write_same_sectors;
unsigned int max_write_zeroes_sectors;
+ unsigned int max_zone_append_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
@@ -352,6 +354,8 @@ struct queue_limits {
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
void *data);
+void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
+
#ifdef CONFIG_BLK_DEV_ZONED
#define BLK_ALL_ZONES ((unsigned int)-1)
@@ -361,7 +365,8 @@ unsigned int blkdev_nr_zones(struct gendisk *disk);
extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
sector_t sectors, sector_t nr_sectors,
gfp_t gfp_mask);
-extern int blk_revalidate_disk_zones(struct gendisk *disk);
+int blk_revalidate_disk_zones(struct gendisk *disk,
+ void (*update_driver_data)(struct gendisk *disk));
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
@@ -395,12 +400,11 @@ struct request_queue {
struct request *last_merge;
struct elevator_queue *elevator;
+ struct percpu_ref q_usage_counter;
+
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
- make_request_fn *make_request_fn;
- dma_drain_needed_fn *dma_drain_needed;
-
const struct blk_mq_ops *mq_ops;
/* sw queues */
@@ -460,7 +464,7 @@ struct request_queue {
#ifdef CONFIG_PM
struct device *dev;
- int rpm_status;
+ enum rpm_status rpm_status;
unsigned int nr_pending;
#endif
@@ -469,11 +473,14 @@ struct request_queue {
*/
unsigned long nr_requests; /* Max # of requests */
- unsigned int dma_drain_size;
- void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ /* Inline crypto capabilities */
+ struct blk_keyslot_manager *ksm;
+#endif
+
unsigned int rq_timeout;
int poll_nsec;
@@ -483,6 +490,8 @@ struct request_queue {
struct timer_list timeout;
struct work_struct timeout_work;
+ atomic_t nr_active_requests_shared_sbitmap;
+
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
@@ -515,6 +524,8 @@ struct request_queue {
unsigned int nr_zones;
unsigned long *conv_zones_bitmap;
unsigned long *seq_zones_wlock;
+ unsigned int max_open_zones;
+ unsigned int max_active_zones;
#endif /* CONFIG_BLK_DEV_ZONED */
/*
@@ -523,9 +534,9 @@ struct request_queue {
unsigned int sg_timeout;
unsigned int sg_reserved_size;
int node;
+ struct mutex debugfs_mutex;
#ifdef CONFIG_BLK_DEV_IO_TRACE
struct blk_trace __rcu *blk_trace;
- struct mutex blk_trace_mutex;
#endif
/*
* for flush operations
@@ -563,14 +574,14 @@ struct request_queue {
* percpu_ref_kill() and percpu_ref_reinit().
*/
struct mutex mq_freeze_lock;
- struct percpu_ref q_usage_counter;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
struct bio_set bio_split;
-#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
+
+#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *sched_debugfs_dir;
struct dentry *rqos_debugfs_dir;
#endif
@@ -579,12 +590,11 @@ struct request_queue {
size_t cmd_size;
- struct work_struct release_work;
-
#define BLK_MAX_WRITE_HINTS 5
u64 write_hints[BLK_MAX_WRITE_HINTS];
};
+/* Keep blk_queue_flag_name[] in sync with the definitions below */
#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
#define QUEUE_FLAG_DYING 1 /* queue being torn down */
#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
@@ -600,6 +610,7 @@ struct request_queue {
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
+#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
#define QUEUE_FLAG_WC 17 /* Write back caching */
#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
@@ -612,9 +623,12 @@ struct request_queue {
#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
#define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
#define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
+#define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
+#define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_SAME_COMP))
+ (1 << QUEUE_FLAG_SAME_COMP) | \
+ (1 << QUEUE_FLAG_NOWAIT))
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
@@ -628,6 +642,8 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_noxmerges(q) \
test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
+#define blk_queue_stable_writes(q) \
+ test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
@@ -654,6 +670,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
+#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
@@ -682,7 +699,9 @@ static inline bool queue_is_mq(struct request_queue *q)
static inline enum blk_zoned_model
blk_queue_zoned_model(struct request_queue *q)
{
- return q->limits.zoned;
+ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
+ return q->limits.zoned;
+ return BLK_ZONED_NONE;
}
static inline bool blk_queue_is_zoned(struct request_queue *q)
@@ -724,11 +743,51 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
return true;
return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
}
+
+static inline void blk_queue_max_open_zones(struct request_queue *q,
+ unsigned int max_open_zones)
+{
+ q->max_open_zones = max_open_zones;
+}
+
+static inline unsigned int queue_max_open_zones(const struct request_queue *q)
+{
+ return q->max_open_zones;
+}
+
+static inline void blk_queue_max_active_zones(struct request_queue *q,
+ unsigned int max_active_zones)
+{
+ q->max_active_zones = max_active_zones;
+}
+
+static inline unsigned int queue_max_active_zones(const struct request_queue *q)
+{
+ return q->max_active_zones;
+}
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
{
return 0;
}
+static inline bool blk_queue_zone_is_seq(struct request_queue *q,
+ sector_t sector)
+{
+ return false;
+}
+static inline unsigned int blk_queue_zone_no(struct request_queue *q,
+ sector_t sector)
+{
+ return 0;
+}
+static inline unsigned int queue_max_open_zones(const struct request_queue *q)
+{
+ return 0;
+}
+static inline unsigned int queue_max_active_zones(const struct request_queue *q)
+{
+ return 0;
+}
#endif /* CONFIG_BLK_DEV_ZONED */
static inline bool rq_is_sync(struct request *rq)
@@ -747,6 +806,9 @@ static inline bool rq_mergeable(struct request *rq)
if (req_op(rq) == REQ_OP_WRITE_ZEROES)
return false;
+ if (req_op(rq) == REQ_OP_ZONE_APPEND)
+ return false;
+
if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
return false;
if (rq->rq_flags & RQF_NOMERGE_FLAGS)
@@ -842,8 +904,7 @@ static inline void rq_flush_dcache_pages(struct request *rq)
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
-extern blk_qc_t generic_make_request(struct bio *bio);
-extern blk_qc_t direct_make_request(struct bio *bio);
+blk_qc_t submit_bio_noacct(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
@@ -857,7 +918,7 @@ extern void blk_rq_unprep_clone(struct request *rq);
extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
-extern void blk_queue_split(struct request_queue *, struct bio **);
+extern void blk_queue_split(struct bio **);
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
unsigned int, void __user *);
@@ -1014,11 +1075,17 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
static inline unsigned int blk_max_size_offset(struct request_queue *q,
sector_t offset)
{
- if (!q->limits.chunk_sectors)
+ unsigned int chunk_sectors = q->limits.chunk_sectors;
+
+ if (!chunk_sectors)
return q->limits.max_sectors;
- return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
- (offset & (q->limits.chunk_sectors - 1))));
+ if (likely(is_power_of_2(chunk_sectors)))
+ chunk_sectors -= offset & (chunk_sectors - 1);
+ else
+ chunk_sectors -= sector_div(offset, chunk_sectors);
+
+ return min(q->limits.max_sectors, chunk_sectors);
}
static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
@@ -1060,7 +1127,6 @@ void blk_steal_bios(struct bio_list *list, struct request *rq);
extern bool blk_update_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes);
-extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
/*
@@ -1081,9 +1147,12 @@ extern void blk_queue_max_write_same_sectors(struct request_queue *q,
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
+extern void blk_queue_max_zone_append_sectors(struct request_queue *q,
+ unsigned int max_zone_append_sectors);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
+void blk_queue_update_readahead(struct request_queue *q);
extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
@@ -1093,15 +1162,9 @@ extern void blk_set_default_limits(struct queue_limits *lim);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
-extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
- sector_t offset);
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset);
-extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
-extern int blk_queue_dma_drain(struct request_queue *q,
- dma_drain_needed_fn *dma_drain_needed,
- void *buf, unsigned int size);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
extern void blk_queue_dma_alignment(struct request_queue *, int);
@@ -1138,15 +1201,23 @@ static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
return max_t(unsigned short, rq->nr_phys_segments, 1);
}
-extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
+int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist, struct scatterlist **last_sg);
+static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist)
+{
+ struct scatterlist *last_sg = NULL;
+
+ return __blk_rq_map_sg(q, rq, sglist, &last_sg);
+}
extern void blk_dump_rq_flags(struct request *, char *);
-extern long nr_blockdev_pages(void);
bool __must_check blk_get_queue(struct request_queue *);
-struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id);
+struct request_queue *blk_alloc_queue(int node_id);
extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
+#ifdef CONFIG_BLOCK
/*
* blk_plug permits building a queue of related requests by holding the I/O
* fragments for a short period. This allows merging of sequential requests
@@ -1164,6 +1235,7 @@ struct blk_plug {
struct list_head cb_list; /* md requires an unplug callback */
unsigned short rq_count;
bool multiple_queues;
+ bool nowait;
};
#define BLK_MAX_REQUEST_COUNT 16
#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
@@ -1206,7 +1278,47 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
!list_empty(&plug->cb_list));
}
-extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
+int blkdev_issue_flush(struct block_device *, gfp_t);
+long nr_blockdev_pages(void);
+#else /* CONFIG_BLOCK */
+struct blk_plug {
+};
+
+static inline void blk_start_plug(struct blk_plug *plug)
+{
+}
+
+static inline void blk_finish_plug(struct blk_plug *plug)
+{
+}
+
+static inline void blk_flush_plug(struct task_struct *task)
+{
+}
+
+static inline void blk_schedule_flush_plug(struct task_struct *task)
+{
+}
+
+
+static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+{
+ return false;
+}
+
+static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
+{
+ return 0;
+}
+
+static inline long nr_blockdev_pages(void)
+{
+ return 0;
+}
+#endif /* CONFIG_BLOCK */
+
+extern void blk_io_schedule(void);
+
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
@@ -1250,6 +1362,11 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
+static inline bool bdev_is_partition(struct block_device *bdev)
+{
+ return bdev->bd_partno;
+}
+
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
@@ -1293,6 +1410,14 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
return q->limits.max_segment_size;
}
+static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q)
+{
+
+ const struct queue_limits *l = &q->limits;
+
+ return min(l->max_zone_append_sectors, l->max_sectors);
+}
+
static inline unsigned queue_logical_block_size(const struct request_queue *q)
{
int retval = 512;
@@ -1361,10 +1486,9 @@ static inline int bdev_alignment_offset(struct block_device *bdev)
if (q->limits.misaligned)
return -1;
-
- if (bdev != bdev->bd_contains)
- return bdev->bd_part->alignment_offset;
-
+ if (bdev_is_partition(bdev))
+ return queue_limit_alignment_offset(&q->limits,
+ bdev->bd_part->start_sect);
return q->limits.alignment_offset;
}
@@ -1403,9 +1527,9 @@ static inline int bdev_discard_alignment(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
- if (bdev != bdev->bd_contains)
- return bdev->bd_part->discard_alignment;
-
+ if (bdev_is_partition(bdev))
+ return queue_limit_discard_alignment(&q->limits,
+ bdev->bd_part->start_sect);
return q->limits.discard_alignment;
}
@@ -1458,6 +1582,24 @@ static inline sector_t bdev_zone_sectors(struct block_device *bdev)
return 0;
}
+static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (q)
+ return queue_max_open_zones(q);
+ return 0;
+}
+
+static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (q)
+ return queue_max_active_zones(q);
+ return 0;
+}
+
static inline int queue_dma_alignment(const struct request_queue *q)
{
return q ? q->dma_alignment : 511;
@@ -1483,7 +1625,7 @@ static inline unsigned int blksize_bits(unsigned int size)
static inline unsigned int block_size(struct block_device *bdev)
{
- return bdev->bd_block_size;
+ return 1 << bdev->bd_inode->i_blkbits;
}
int kblockd_schedule_work(struct work_struct *work);
@@ -1530,10 +1672,6 @@ extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
struct scatterlist *);
extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
-extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
- struct request *);
-extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
- struct bio *);
static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
{
@@ -1551,6 +1689,12 @@ struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
return blk_get_integrity(bdev->bd_disk);
}
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return q->integrity.profile;
+}
+
static inline bool blk_integrity_rq(struct request *rq)
{
return rq->cmd_flags & REQ_INTEGRITY;
@@ -1631,6 +1775,11 @@ static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
{
return NULL;
}
+static inline bool
+blk_integrity_queue_supports_integrity(struct request_queue *q)
+{
+ return false;
+}
static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
{
return 0;
@@ -1650,18 +1799,6 @@ static inline unsigned short queue_max_integrity_segments(const struct request_q
{
return 0;
}
-static inline bool blk_integrity_merge_rq(struct request_queue *rq,
- struct request *r1,
- struct request *r2)
-{
- return true;
-}
-static inline bool blk_integrity_merge_bio(struct request_queue *rq,
- struct request *r,
- struct bio *b)
-{
- return true;
-}
static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
unsigned int sectors)
@@ -1682,7 +1819,27 @@ static inline struct bio_vec *rq_integrity_vec(struct request *rq)
#endif /* CONFIG_BLK_DEV_INTEGRITY */
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+
+bool blk_ksm_register(struct blk_keyslot_manager *ksm, struct request_queue *q);
+
+void blk_ksm_unregister(struct request_queue *q);
+
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+static inline bool blk_ksm_register(struct blk_keyslot_manager *ksm,
+ struct request_queue *q)
+{
+ return true;
+}
+
+static inline void blk_ksm_unregister(struct request_queue *q) { }
+
+#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+
+
struct block_device_operations {
+ blk_qc_t (*submit_bio) (struct bio *bio);
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
@@ -1690,8 +1847,6 @@ struct block_device_operations {
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
- /* ->media_changed() is DEPRECATED, use ->check_events() instead */
- int (*media_changed) (struct gendisk *);
void (*unlock_native_capacity) (struct gendisk *);
int (*revalidate_disk) (struct gendisk *);
int (*getgeo)(struct block_device *, struct hd_geometry *);
@@ -1719,6 +1874,7 @@ extern int bdev_write_page(struct block_device *, sector_t, struct page *,
#ifdef CONFIG_BLK_DEV_ZONED
bool blk_req_needs_zone_write_lock(struct request *rq);
+bool blk_req_zone_write_trylock(struct request *rq);
void __blk_req_zone_write_lock(struct request *rq);
void __blk_req_zone_write_unlock(struct request *rq);
@@ -1770,64 +1926,102 @@ static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
}
#endif /* CONFIG_BLK_DEV_ZONED */
-#else /* CONFIG_BLOCK */
-
-struct block_device;
-
-/*
- * stubs for when the block layer is configured out
- */
-#define buffer_heads_over_limit 0
-
-static inline long nr_blockdev_pages(void)
+static inline void blk_wake_io_task(struct task_struct *waiter)
{
- return 0;
+ /*
+ * If we're polling, the task itself is doing the completions. For
+ * that case, we don't need to signal a wakeup, it's enough to just
+ * mark us as RUNNING.
+ */
+ if (waiter == current)
+ __set_current_state(TASK_RUNNING);
+ else
+ wake_up_process(waiter);
}
-struct blk_plug {
-};
+unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
+ unsigned int op);
+void disk_end_io_acct(struct gendisk *disk, unsigned int op,
+ unsigned long start_time);
-static inline void blk_start_plug(struct blk_plug *plug)
-{
-}
+unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
+ struct bio *bio);
+void part_end_io_acct(struct hd_struct *part, struct bio *bio,
+ unsigned long start_time);
-static inline void blk_finish_plug(struct blk_plug *plug)
+/**
+ * bio_start_io_acct - start I/O accounting for bio based drivers
+ * @bio: bio to start account for
+ *
+ * Returns the start time that should be passed back to bio_end_io_acct().
+ */
+static inline unsigned long bio_start_io_acct(struct bio *bio)
{
+ return disk_start_io_acct(bio->bi_disk, bio_sectors(bio), bio_op(bio));
}
-static inline void blk_flush_plug(struct task_struct *task)
+/**
+ * bio_end_io_acct - end I/O accounting for bio based drivers
+ * @bio: bio to end account for
+ * @start: start time returned by bio_start_io_acct()
+ */
+static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
{
+ return disk_end_io_acct(bio->bi_disk, bio_op(bio), start_time);
}
-static inline void blk_schedule_flush_plug(struct task_struct *task)
-{
-}
+int bdev_read_only(struct block_device *bdev);
+int set_blocksize(struct block_device *bdev, int size);
+const char *bdevname(struct block_device *bdev, char *buffer);
+struct block_device *lookup_bdev(const char *);
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+void blkdev_show(struct seq_file *seqf, off_t offset);
+
+#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
+#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
+#ifdef CONFIG_BLOCK
+#define BLKDEV_MAJOR_MAX 512
+#else
+#define BLKDEV_MAJOR_MAX 0
+#endif
+
+struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
+ void *holder);
+struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
+int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole,
+ void *holder);
+void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
+ void *holder);
+void blkdev_put(struct block_device *bdev, fmode_t mode);
+
+struct block_device *I_BDEV(struct inode *inode);
+struct block_device *bdget_part(struct hd_struct *part);
+struct block_device *bdgrab(struct block_device *bdev);
+void bdput(struct block_device *);
+
+#ifdef CONFIG_BLOCK
+void invalidate_bdev(struct block_device *bdev);
+int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
+ loff_t lend);
+int sync_blockdev(struct block_device *bdev);
+#else
+static inline void invalidate_bdev(struct block_device *bdev)
{
- return false;
}
-
-static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
- sector_t *error_sector)
+static inline int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
+ loff_t lstart, loff_t lend)
{
return 0;
}
-
-#endif /* CONFIG_BLOCK */
-
-static inline void blk_wake_io_task(struct task_struct *waiter)
+static inline int sync_blockdev(struct block_device *bdev)
{
- /*
- * If we're polling, the task itself is doing the completions. For
- * that case, we don't need to signal a wakeup, it's enough to just
- * mark us as RUNNING.
- */
- if (waiter == current)
- __set_current_state(TASK_RUNNING);
- else
- wake_up_process(waiter);
+ return 0;
}
-
#endif
+int fsync_bdev(struct block_device *bdev);
+
+struct super_block *freeze_bdev(struct block_device *bdev);
+int thaw_bdev(struct block_device *bdev, struct super_block *sb);
+
+#endif /* _LINUX_BLKDEV_H */