summaryrefslogtreecommitdiff
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h2387
1 files changed, 1346 insertions, 1041 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2fdb4a451b49..72e34acd439c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1,992 +1,1163 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions Copyright (C) 1992 Drew Eckhardt
+ */
#ifndef _LINUX_BLKDEV_H
#define _LINUX_BLKDEV_H
-#include <linux/sched.h>
-
-#ifdef CONFIG_BLOCK
-
-#include <linux/major.h>
-#include <linux/genhd.h>
+#include <linux/types.h>
+#include <linux/blk_types.h>
+#include <linux/device.h>
#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/minmax.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
-#include <linux/pagemap.h>
-#include <linux/backing-dev.h>
#include <linux/wait.h>
-#include <linux/mempool.h>
#include <linux/bio.h>
-#include <linux/stringify.h>
#include <linux/gfp.h>
-#include <linux/bsg.h>
-#include <linux/smp.h>
+#include <linux/kdev_t.h>
#include <linux/rcupdate.h>
-
-#include <asm/scatterlist.h>
+#include <linux/percpu-refcount.h>
+#include <linux/blkzoned.h>
+#include <linux/sched.h>
+#include <linux/sbitmap.h>
+#include <linux/uuid.h>
+#include <linux/xarray.h>
+#include <linux/file.h>
+#include <linux/lockdep.h>
struct module;
-struct scsi_ioctl_command;
-
struct request_queue;
struct elevator_queue;
-struct request_pm_state;
struct blk_trace;
struct request;
struct sg_io_hdr;
-struct bsg_job;
struct blkcg_gq;
-
-#define BLKDEV_MIN_RQ 4
-#define BLKDEV_MAX_RQ 128 /* Default maximum */
+struct blk_flush_queue;
+struct kiocb;
+struct pr_ops;
+struct rq_qos;
+struct blk_report_zones_args;
+struct blk_queue_stats;
+struct blk_stat_callback;
+struct blk_crypto_profile;
+
+extern const struct device_type disk_type;
+extern const struct device_type part_type;
+extern const struct class block_class;
/*
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 2
-
-struct request;
-typedef void (rq_end_io_fn)(struct request *, int);
+#define BLKCG_MAX_POLS 6
-#define BLK_RL_SYNCFULL (1U << 0)
-#define BLK_RL_ASYNCFULL (1U << 1)
-
-struct request_list {
- struct request_queue *q; /* the queue this rl belongs to */
-#ifdef CONFIG_BLK_CGROUP
- struct blkcg_gq *blkg; /* blkg this request pool belongs to */
-#endif
- /*
- * count[], starved[], and wait[] are indexed by
- * BLK_RW_SYNC/BLK_RW_ASYNC
- */
- int count[2];
- int starved[2];
- mempool_t *rq_pool;
- wait_queue_head_t wait[2];
- unsigned int flags;
-};
+#define DISK_MAX_PARTS 256
+#define DISK_NAME_LEN 32
+#define PARTITION_META_INFO_VOLNAMELTH 64
/*
- * request command types
+ * Enough for the string representation of any kind of UUID plus NULL.
+ * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
*/
-enum rq_cmd_type_bits {
- REQ_TYPE_FS = 1, /* fs request */
- REQ_TYPE_BLOCK_PC, /* scsi command */
- REQ_TYPE_SENSE, /* sense request */
- REQ_TYPE_PM_SUSPEND, /* suspend request */
- REQ_TYPE_PM_RESUME, /* resume request */
- REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
- REQ_TYPE_SPECIAL, /* driver defined type */
- /*
- * for ATA/ATAPI devices. this really doesn't belong here, ide should
- * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
- * private REQ_LB opcodes to differentiate what type of request this is
- */
- REQ_TYPE_ATA_TASKFILE,
- REQ_TYPE_ATA_PC,
-};
+#define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
-#define BLK_MAX_CDB 16
+struct partition_meta_info {
+ char uuid[PARTITION_META_INFO_UUIDLTH];
+ u8 volname[PARTITION_META_INFO_VOLNAMELTH];
+};
-/*
- * try to put the fields that are referenced together in the same cacheline.
- * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init()
- * as well!
+/**
+ * DOC: genhd capability flags
+ *
+ * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
+ * removable media. When set, the device remains present even when media is not
+ * inserted. Shall not be set for devices which are removed entirely when the
+ * media is removed.
+ *
+ * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
+ * doesn't appear in sysfs, and can't be opened from userspace or using
+ * blkdev_get*. Used for the underlying components of multipath devices.
+ *
+ * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
+ * scan for partitions from add_disk, and users can't add partitions manually.
+ *
*/
-struct request {
- struct list_head queuelist;
- struct call_single_data csd;
+enum {
+ GENHD_FL_REMOVABLE = 1 << 0,
+ GENHD_FL_HIDDEN = 1 << 1,
+ GENHD_FL_NO_PART = 1 << 2,
+};
- struct request_queue *q;
+enum {
+ DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
+ DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
+};
- unsigned int cmd_flags;
- enum rq_cmd_type_bits cmd_type;
- unsigned long atomic_flags;
+enum {
+ /* Poll even if events_poll_msecs is unset */
+ DISK_EVENT_FLAG_POLL = 1 << 0,
+ /* Forward events to udev */
+ DISK_EVENT_FLAG_UEVENT = 1 << 1,
+ /* Block event polling when open for exclusive write */
+ DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
+};
- int cpu;
+struct disk_events;
+struct badblocks;
- /* the following two fields are internal, NEVER access directly */
- unsigned int __data_len; /* total data len */
- sector_t __sector; /* sector cursor */
+enum blk_integrity_checksum {
+ BLK_INTEGRITY_CSUM_NONE = 0,
+ BLK_INTEGRITY_CSUM_IP = 1,
+ BLK_INTEGRITY_CSUM_CRC = 2,
+ BLK_INTEGRITY_CSUM_CRC64 = 3,
+} __packed ;
- struct bio *bio;
- struct bio *biotail;
+struct blk_integrity {
+ unsigned char flags;
+ enum blk_integrity_checksum csum_type;
+ unsigned char metadata_size;
+ unsigned char pi_offset;
+ unsigned char interval_exp;
+ unsigned char tag_size;
+ unsigned char pi_tuple_size;
+};
- struct hlist_node hash; /* merge hash */
+typedef unsigned int __bitwise blk_mode_t;
+
+/* open for reading */
+#define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0))
+/* open for writing */
+#define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1))
+/* open exclusively (vs other exclusive openers */
+#define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2))
+/* opened with O_NDELAY */
+#define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3))
+/* open for "writes" only for ioctls (specialy hack for floppy.c) */
+#define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
+/* open is exclusive wrt all other BLK_OPEN_WRITE opens to the device */
+#define BLK_OPEN_RESTRICT_WRITES ((__force blk_mode_t)(1 << 5))
+/* return partition scanning errors */
+#define BLK_OPEN_STRICT_SCAN ((__force blk_mode_t)(1 << 6))
+
+struct gendisk {
/*
- * The rb_node is only used inside the io scheduler, requests
- * are pruned when moved to the dispatch queue. So let the
- * completion_data share space with the rb_node.
+ * major/first_minor/minors should not be set by any new driver, the
+ * block core will take care of allocating them automatically.
*/
- union {
- struct rb_node rb_node; /* sort/lookup */
- void *completion_data;
- };
+ int major;
+ int first_minor;
+ int minors;
+
+ char disk_name[DISK_NAME_LEN]; /* name of major driver */
+
+ unsigned short events; /* supported events */
+ unsigned short event_flags; /* flags related to event processing */
+
+ struct xarray part_tbl;
+ struct block_device *part0;
+
+ const struct block_device_operations *fops;
+ struct request_queue *queue;
+ void *private_data;
+
+ struct bio_set bio_split;
+
+ int flags;
+ unsigned long state;
+#define GD_NEED_PART_SCAN 0
+#define GD_READ_ONLY 1
+#define GD_DEAD 2
+#define GD_NATIVE_CAPACITY 3
+#define GD_ADDED 4
+#define GD_SUPPRESS_PART_SCAN 5
+#define GD_OWNS_QUEUE 6
+#define GD_ZONE_APPEND_USED 7
+
+ struct mutex open_mutex; /* open/close mutex */
+ unsigned open_partitions; /* number of open partitions */
+
+ struct backing_dev_info *bdi;
+ struct kobject queue_kobj; /* the queue/ directory */
+ struct kobject *slave_dir;
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+ struct list_head slave_bdevs;
+#endif
+ struct timer_rand_state *random;
+ struct disk_events *ev;
+#ifdef CONFIG_BLK_DEV_ZONED
/*
- * Three pointers are available for the IO schedulers, if they need
- * more they have to dynamically allocate it. Flush requests are
- * never put on the IO scheduler. So let the flush fields share
- * space with the elevator data.
- */
- union {
- struct {
- struct io_cq *icq;
- void *priv[2];
- } elv;
-
- struct {
- unsigned int seq;
- struct list_head list;
- rq_end_io_fn *saved_end_io;
- } flush;
- };
-
- struct gendisk *rq_disk;
- struct hd_struct *part;
- unsigned long start_time;
-#ifdef CONFIG_BLK_CGROUP
- struct request_list *rl; /* rl this rq is alloced from */
- unsigned long long start_time_ns;
- unsigned long long io_start_time_ns; /* when passed to hardware */
-#endif
- /* Number of scatter-gather DMA addr+len pairs after
- * physical address coalescing is performed.
+ * Zoned block device information. Reads of this information must be
+ * protected with blk_queue_enter() / blk_queue_exit(). Modifying this
+ * information is only allowed while no requests are being processed.
+ * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue().
*/
- unsigned short nr_phys_segments;
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
- unsigned short nr_integrity_segments;
+ unsigned int nr_zones;
+ unsigned int zone_capacity;
+ unsigned int last_zone_capacity;
+ u8 __rcu *zones_cond;
+ unsigned int zone_wplugs_hash_bits;
+ atomic_t nr_zone_wplugs;
+ spinlock_t zone_wplugs_lock;
+ struct mempool *zone_wplugs_pool;
+ struct hlist_head *zone_wplugs_hash;
+ struct workqueue_struct *zone_wplugs_wq;
+#endif /* CONFIG_BLK_DEV_ZONED */
+
+#if IS_ENABLED(CONFIG_CDROM)
+ struct cdrom_device_info *cdi;
#endif
-
- unsigned short ioprio;
-
- int ref_count;
-
- void *special; /* opaque pointer available for LLD use */
- char *buffer; /* kaddr of the current segment if available */
-
- int tag;
- int errors;
+ int node_id;
+ struct badblocks *bb;
+ struct lockdep_map lockdep_map;
+ u64 diskseq;
+ blk_mode_t open_mode;
/*
- * when request is used as a packet command carrier
+ * Independent sector access ranges. This is always NULL for
+ * devices that do not have multiple independent access ranges.
*/
- unsigned char __cmd[BLK_MAX_CDB];
- unsigned char *cmd;
- unsigned short cmd_len;
+ struct blk_independent_access_ranges *ia_ranges;
- unsigned int extra_len; /* length of alignment and padding */
- unsigned int sense_len;
- unsigned int resid_len; /* residual count */
- void *sense;
+ struct mutex rqos_state_mutex; /* rqos state change mutex */
+};
- unsigned long deadline;
- struct list_head timeout_list;
- unsigned int timeout;
- int retries;
+/**
+ * disk_openers - returns how many openers are there for a disk
+ * @disk: disk to check
+ *
+ * This returns the number of openers for a disk. Note that this value is only
+ * stable if disk->open_mutex is held.
+ *
+ * Note: Due to a quirk in the block layer open code, each open partition is
+ * only counted once even if there are multiple openers.
+ */
+static inline unsigned int disk_openers(struct gendisk *disk)
+{
+ return atomic_read(&disk->part0->bd_openers);
+}
- /*
- * completion callback.
- */
- rq_end_io_fn *end_io;
- void *end_io_data;
+/**
+ * disk_has_partscan - return %true if partition scanning is enabled on a disk
+ * @disk: disk to check
+ *
+ * Returns %true if partitions scanning is enabled for @disk, or %false if
+ * partition scanning is disabled either permanently or temporarily.
+ */
+static inline bool disk_has_partscan(struct gendisk *disk)
+{
+ return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) &&
+ !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
+}
- /* for bidi */
- struct request *next_rq;
-};
+/*
+ * The gendisk is refcounted by the part0 block_device, and the bd_device
+ * therein is also used for device model presentation in sysfs.
+ */
+#define dev_to_disk(device) \
+ (dev_to_bdev(device)->bd_disk)
+#define disk_to_dev(disk) \
+ (&((disk)->part0->bd_device))
-static inline unsigned short req_get_ioprio(struct request *req)
+#if IS_REACHABLE(CONFIG_CDROM)
+#define disk_to_cdi(disk) ((disk)->cdi)
+#else
+#define disk_to_cdi(disk) NULL
+#endif
+
+static inline dev_t disk_devt(struct gendisk *disk)
{
- return req->ioprio;
+ return MKDEV(disk->major, disk->first_minor);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
- * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
- * requests. Some step values could eventually be made generic.
+ * We should strive for 1 << (PAGE_SHIFT + MAX_PAGECACHE_ORDER)
+ * however we constrain this to what we can validate and test.
*/
-struct request_pm_state
+#define BLK_MAX_BLOCK_SIZE SZ_64K
+#else
+#define BLK_MAX_BLOCK_SIZE PAGE_SIZE
+#endif
+
+
+/* blk_validate_limits() validates bsize, so drivers don't usually need to */
+static inline int blk_validate_block_size(unsigned long bsize)
{
- /* PM state machine step value, currently driver specific */
- int pm_step;
- /* requested PM state value (S1, S2, S3, S4, ...) */
- u32 pm_state;
- void* data; /* for driver use */
-};
+ if (bsize < 512 || bsize > BLK_MAX_BLOCK_SIZE || !is_power_of_2(bsize))
+ return -EINVAL;
-#include <linux/elevator.h>
+ return 0;
+}
-typedef void (request_fn_proc) (struct request_queue *q);
-typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
-typedef int (prep_rq_fn) (struct request_queue *, struct request *);
-typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
+static inline bool blk_op_is_passthrough(blk_opf_t op)
+{
+ op &= REQ_OP_MASK;
+ return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
+}
-struct bio_vec;
-struct bvec_merge_data {
- struct block_device *bi_bdev;
- sector_t bi_sector;
- unsigned bi_size;
- unsigned long bi_rw;
-};
-typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
- struct bio_vec *);
-typedef void (softirq_done_fn)(struct request *);
-typedef int (dma_drain_needed_fn)(struct request *);
-typedef int (lld_busy_fn) (struct request_queue *q);
-typedef int (bsg_job_fn) (struct bsg_job *);
-
-enum blk_eh_timer_return {
- BLK_EH_NOT_HANDLED,
- BLK_EH_HANDLED,
- BLK_EH_RESET_TIMER,
-};
+/* flags set by the driver in queue_limits.features */
+typedef unsigned int __bitwise blk_features_t;
-typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
+/* supports a volatile write cache */
+#define BLK_FEAT_WRITE_CACHE ((__force blk_features_t)(1u << 0))
-enum blk_queue_state {
- Queue_down,
- Queue_up,
-};
+/* supports passing on the FUA bit */
+#define BLK_FEAT_FUA ((__force blk_features_t)(1u << 1))
-struct blk_queue_tag {
- struct request **tag_index; /* map of busy tags */
- unsigned long *tag_map; /* bit map of free/busy tags */
- int busy; /* current depth */
- int max_depth; /* what we will send to device */
- int real_max_depth; /* what the array can hold */
- atomic_t refcnt; /* map can be shared */
-};
+/* rotational device (hard drive or floppy) */
+#define BLK_FEAT_ROTATIONAL ((__force blk_features_t)(1u << 2))
+
+/* contributes to the random number pool */
+#define BLK_FEAT_ADD_RANDOM ((__force blk_features_t)(1u << 3))
-#define BLK_SCSI_MAX_CMDS (256)
-#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
+/* do disk/partitions IO accounting */
+#define BLK_FEAT_IO_STAT ((__force blk_features_t)(1u << 4))
+
+/* don't modify data until writeback is done */
+#define BLK_FEAT_STABLE_WRITES ((__force blk_features_t)(1u << 5))
+
+/* always completes in submit context */
+#define BLK_FEAT_SYNCHRONOUS ((__force blk_features_t)(1u << 6))
+
+/* supports REQ_NOWAIT */
+#define BLK_FEAT_NOWAIT ((__force blk_features_t)(1u << 7))
+
+/* supports DAX */
+#define BLK_FEAT_DAX ((__force blk_features_t)(1u << 8))
+
+/* supports I/O polling */
+#define BLK_FEAT_POLL ((__force blk_features_t)(1u << 9))
+
+/* is a zoned device */
+#define BLK_FEAT_ZONED ((__force blk_features_t)(1u << 10))
+
+/* supports PCI(e) p2p requests */
+#define BLK_FEAT_PCI_P2PDMA ((__force blk_features_t)(1u << 12))
+
+/* skip this queue in blk_mq_(un)quiesce_tagset */
+#define BLK_FEAT_SKIP_TAGSET_QUIESCE ((__force blk_features_t)(1u << 13))
+
+/* undocumented magic for bcache */
+#define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
+ ((__force blk_features_t)(1u << 15))
+
+/* atomic writes enabled */
+#define BLK_FEAT_ATOMIC_WRITES \
+ ((__force blk_features_t)(1u << 16))
+
+/*
+ * Flags automatically inherited when stacking limits.
+ */
+#define BLK_FEAT_INHERIT_MASK \
+ (BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
+ BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | \
+ BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
+
+/* internal flags in queue_limits.flags */
+typedef unsigned int __bitwise blk_flags_t;
+
+/* do not send FLUSH/FUA commands despite advertising a write cache */
+#define BLK_FLAG_WRITE_CACHE_DISABLED ((__force blk_flags_t)(1u << 0))
+
+/* I/O topology is misaligned */
+#define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1))
+
+/* passthrough command IO accounting */
+#define BLK_FLAG_IOSTATS_PASSTHROUGH ((__force blk_flags_t)(1u << 2))
struct queue_limits {
- unsigned long bounce_pfn;
+ blk_features_t features;
+ blk_flags_t flags;
unsigned long seg_boundary_mask;
+ unsigned long virt_boundary_mask;
unsigned int max_hw_sectors;
+ unsigned int max_dev_sectors;
+ unsigned int chunk_sectors;
unsigned int max_sectors;
+ unsigned int max_user_sectors;
unsigned int max_segment_size;
+ unsigned int max_fast_segment_size;
unsigned int physical_block_size;
+ unsigned int logical_block_size;
unsigned int alignment_offset;
unsigned int io_min;
unsigned int io_opt;
unsigned int max_discard_sectors;
- unsigned int max_write_same_sectors;
+ unsigned int max_hw_discard_sectors;
+ unsigned int max_user_discard_sectors;
+ unsigned int max_secure_erase_sectors;
+ unsigned int max_write_zeroes_sectors;
+ unsigned int max_wzeroes_unmap_sectors;
+ unsigned int max_hw_wzeroes_unmap_sectors;
+ unsigned int max_user_wzeroes_unmap_sectors;
+ unsigned int max_hw_zone_append_sectors;
+ unsigned int max_zone_append_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
+ unsigned int zone_write_granularity;
+
+ /* atomic write limits */
+ unsigned int atomic_write_hw_max;
+ unsigned int atomic_write_max_sectors;
+ unsigned int atomic_write_hw_boundary;
+ unsigned int atomic_write_boundary_sectors;
+ unsigned int atomic_write_hw_unit_min;
+ unsigned int atomic_write_unit_min;
+ unsigned int atomic_write_hw_unit_max;
+ unsigned int atomic_write_unit_max;
- unsigned short logical_block_size;
unsigned short max_segments;
unsigned short max_integrity_segments;
+ unsigned short max_discard_segments;
- unsigned char misaligned;
- unsigned char discard_misaligned;
- unsigned char cluster;
- unsigned char discard_zeroes_data;
-};
+ unsigned short max_write_streams;
+ unsigned int write_stream_granularity;
-struct request_queue {
- /*
- * Together with queue_head for cacheline sharing
- */
- struct list_head queue_head;
- struct request *last_merge;
- struct elevator_queue *elevator;
- int nr_rqs[2]; /* # allocated [a]sync rqs */
- int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
+ unsigned int max_open_zones;
+ unsigned int max_active_zones;
/*
- * If blkcg is not used, @q->root_rl serves all requests. If blkcg
- * is used, root blkg allocates from @q->root_rl and all other
- * blkgs from their own blkg->rl. Which one to use should be
- * determined using bio_request_list().
+ * Drivers that set dma_alignment to less than 511 must be prepared to
+ * handle individual bvec's that are not a multiple of a SECTOR_SIZE
+ * due to possible offsets.
*/
- struct request_list root_rl;
-
- request_fn_proc *request_fn;
- make_request_fn *make_request_fn;
- prep_rq_fn *prep_rq_fn;
- unprep_rq_fn *unprep_rq_fn;
- merge_bvec_fn *merge_bvec_fn;
- softirq_done_fn *softirq_done_fn;
- rq_timed_out_fn *rq_timed_out_fn;
- dma_drain_needed_fn *dma_drain_needed;
- lld_busy_fn *lld_busy_fn;
+ unsigned int dma_alignment;
+ unsigned int dma_pad_mask;
- /*
- * Dispatch queue sorting
- */
- sector_t end_sector;
- struct request *boundary_rq;
+ struct blk_integrity integrity;
+};
- /*
- * Delayed queue handling
- */
- struct delayed_work delay_work;
+typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
+ void *data);
+
+int disk_report_zone(struct gendisk *disk, struct blk_zone *zone,
+ unsigned int idx, struct blk_report_zones_args *args);
+
+int blkdev_get_zone_info(struct block_device *bdev, sector_t sector,
+ struct blk_zone *zone);
+
+#define BLK_ALL_ZONES ((unsigned int)-1)
+int blkdev_report_zones(struct block_device *bdev, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+int blkdev_report_zones_cached(struct block_device *bdev, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op,
+ sector_t sectors, sector_t nr_sectors);
+int blk_revalidate_disk_zones(struct gendisk *disk);
+
+/*
+ * Independent access ranges: struct blk_independent_access_range describes
+ * a range of contiguous sectors that can be accessed using device command
+ * execution resources that are independent from the resources used for
+ * other access ranges. This is typically found with single-LUN multi-actuator
+ * HDDs where each access range is served by a different set of heads.
+ * The set of independent ranges supported by the device is defined using
+ * struct blk_independent_access_ranges. The independent ranges must not overlap
+ * and must include all sectors within the disk capacity (no sector holes
+ * allowed).
+ * For a device with multiple ranges, requests targeting sectors in different
+ * ranges can be executed in parallel. A request can straddle an access range
+ * boundary.
+ */
+struct blk_independent_access_range {
+ struct kobject kobj;
+ sector_t sector;
+ sector_t nr_sectors;
+};
- struct backing_dev_info backing_dev_info;
+struct blk_independent_access_ranges {
+ struct kobject kobj;
+ bool sysfs_registered;
+ unsigned int nr_ia_ranges;
+ struct blk_independent_access_range ia_range[];
+};
+struct request_queue {
/*
* The queue owner gets to use this for whatever they like.
* ll_rw_blk doesn't touch it.
*/
void *queuedata;
+ struct elevator_queue *elevator;
+
+ const struct blk_mq_ops *mq_ops;
+
+ /* sw queues */
+ struct blk_mq_ctx __percpu *queue_ctx;
+
/*
* various queue flags, see QUEUE_* below
*/
unsigned long queue_flags;
- /*
- * ida allocated id for this queue. Used to index queues from
- * ioctx.
- */
- int id;
+ unsigned int __data_racy rq_timeout;
- /*
- * queue needs bounce pages for pages above this limit
- */
- gfp_t bounce_gfp;
+ unsigned int queue_depth;
- /*
- * protects queue structures from reentrancy. ->__queue_lock should
- * _never_ be used directly, it is queue private. always use
- * ->queue_lock.
- */
- spinlock_t __queue_lock;
- spinlock_t *queue_lock;
+ refcount_t refs;
+
+ /* hw dispatch queues */
+ unsigned int nr_hw_queues;
+ struct blk_mq_hw_ctx * __rcu *queue_hw_ctx;
+
+ struct percpu_ref q_usage_counter;
+ struct lock_class_key io_lock_cls_key;
+ struct lockdep_map io_lockdep_map;
+
+ struct lock_class_key q_lock_cls_key;
+ struct lockdep_map q_lockdep_map;
+
+ struct request *last_merge;
+
+ spinlock_t queue_lock;
+
+ int quiesce_depth;
+
+ struct gendisk *disk;
/*
- * queue kobject
+ * mq queue kobject
*/
- struct kobject kobj;
+ struct kobject *mq_kobj;
+
+ struct queue_limits limits;
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
struct device *dev;
- int rpm_status;
- unsigned int nr_pending;
+ enum rpm_status rpm_status;
#endif
/*
- * queue settings
+ * Number of contexts that have called blk_set_pm_only(). If this
+ * counter is above zero then only RQF_PM requests are processed.
*/
- unsigned long nr_requests; /* Max # of requests */
- unsigned int nr_congestion_on;
- unsigned int nr_congestion_off;
- unsigned int nr_batching;
+ atomic_t pm_only;
- unsigned int dma_drain_size;
- void *dma_drain_buffer;
- unsigned int dma_pad_mask;
- unsigned int dma_alignment;
+ struct blk_queue_stats *stats;
+ struct rq_qos *rq_qos;
+ struct mutex rq_qos_mutex;
- struct blk_queue_tag *queue_tags;
- struct list_head tag_busy_list;
+ /*
+ * ida allocated id for this queue. Used to index queues from
+ * ioctx.
+ */
+ int id;
- unsigned int nr_sorted;
- unsigned int in_flight[2];
/*
- * Number of active block driver functions for which blk_drain_queue()
- * must wait. Must be incremented around functions that unlock the
- * queue_lock internally, e.g. scsi_request_fn().
+ * queue settings
*/
- unsigned int request_fn_active;
+ unsigned long nr_requests; /* Max # of requests */
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+ struct blk_crypto_profile *crypto_profile;
+ struct kobject *crypto_kobject;
+#endif
- unsigned int rq_timeout;
struct timer_list timeout;
- struct list_head timeout_list;
+ struct work_struct timeout_work;
+
+ atomic_t nr_active_requests_shared_tags;
+
+ struct blk_mq_tags *sched_shared_tags;
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
struct blkcg_gq *root_blkg;
struct list_head blkg_list;
+ struct mutex blkcg_mutex;
#endif
- struct queue_limits limits;
-
- /*
- * sg stuff
- */
- unsigned int sg_timeout;
- unsigned int sg_reserved_size;
int node;
+
+ spinlock_t requeue_lock;
+ struct list_head requeue_list;
+ struct delayed_work requeue_work;
+
#ifdef CONFIG_BLK_DEV_IO_TRACE
- struct blk_trace *blk_trace;
+ struct blk_trace __rcu *blk_trace;
#endif
/*
* for flush operations
*/
- unsigned int flush_flags;
- unsigned int flush_not_queueable:1;
- unsigned int flush_queue_delayed:1;
- unsigned int flush_pending_idx:1;
- unsigned int flush_running_idx:1;
- unsigned long flush_pending_since;
- struct list_head flush_queue[2];
- struct list_head flush_data_in_flight;
- struct request flush_rq;
+ struct blk_flush_queue *fq;
+ struct list_head flush_list;
+
+ /*
+ * Protects against I/O scheduler switching, particularly when updating
+ * q->elevator. Since the elevator update code path may also modify q->
+ * nr_requests and wbt latency, this lock also protects the sysfs attrs
+ * nr_requests and wbt_lat_usec. Additionally the nr_hw_queues update
+ * may modify hctx tags, reserved-tags and cpumask, so this lock also
+ * helps protect the hctx sysfs/debugfs attrs. To ensure proper locking
+ * order during an elevator or nr_hw_queue update, first freeze the
+ * queue, then acquire ->elevator_lock.
+ */
+ struct mutex elevator_lock;
struct mutex sysfs_lock;
+ /*
+ * Protects queue limits and also sysfs attribute read_ahead_kb.
+ */
+ struct mutex limits_lock;
- int bypass_depth;
+ /*
+ * for reusing dead hctx instance in case of updating
+ * nr_hw_queues
+ */
+ struct list_head unused_hctx_list;
+ spinlock_t unused_hctx_lock;
-#if defined(CONFIG_BLK_DEV_BSG)
- bsg_job_fn *bsg_job_fn;
- int bsg_job_size;
- struct bsg_class_device bsg_dev;
-#endif
+ int mq_freeze_depth;
-#ifdef CONFIG_BLK_CGROUP
- struct list_head all_q_node;
-#endif
#ifdef CONFIG_BLK_DEV_THROTTLING
/* Throttle data */
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
+#ifdef CONFIG_LOCKDEP
+ struct task_struct *mq_freeze_owner;
+ int mq_freeze_owner_depth;
+ /*
+ * Records disk & queue state in current context, used in unfreeze
+ * queue
+ */
+ bool mq_freeze_disk_dead;
+ bool mq_freeze_queue_dying;
+#endif
+ wait_queue_head_t mq_freeze_wq;
+ /*
+ * Protect concurrent access to q_usage_counter by
+ * percpu_ref_kill() and percpu_ref_reinit().
+ */
+ struct mutex mq_freeze_lock;
+
+ struct blk_mq_tag_set *tag_set;
+ struct list_head tag_set_list;
+
+ struct dentry *debugfs_dir;
+ struct dentry *sched_debugfs_dir;
+ struct dentry *rqos_debugfs_dir;
+ /*
+ * Serializes all debugfs metadata operations using the above dentries.
+ */
+ struct mutex debugfs_mutex;
};
-#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
-#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
-#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
-#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
-#define QUEUE_FLAG_DYING 5 /* queue being torn down */
-#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
-#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
-#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
-#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
-#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
-#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
-#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
-#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
-#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
-#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
-#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
-#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
-#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
-#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
+/* Keep blk_queue_flag_name[] in sync with the definitions below */
+enum {
+ QUEUE_FLAG_DYING, /* queue being torn down */
+ QUEUE_FLAG_NOMERGES, /* disable merge attempts */
+ QUEUE_FLAG_SAME_COMP, /* complete on same CPU-group */
+ QUEUE_FLAG_FAIL_IO, /* fake timeout */
+ QUEUE_FLAG_NOXMERGES, /* No extended merges */
+ QUEUE_FLAG_SAME_FORCE, /* force complete on same CPU */
+ QUEUE_FLAG_INIT_DONE, /* queue is initialized */
+ QUEUE_FLAG_STATS, /* track IO start and completion times */
+ QUEUE_FLAG_REGISTERED, /* queue has been registered to a disk */
+ QUEUE_FLAG_QUIESCED, /* queue has been quiesced */
+ QUEUE_FLAG_RQ_ALLOC_TIME, /* record rq->alloc_time_ns */
+ QUEUE_FLAG_HCTX_ACTIVE, /* at least one blk-mq hctx is active */
+ QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
+ QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
+ QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
+ QUEUE_FLAG_QOS_ENABLED, /* qos is enabled */
+ QUEUE_FLAG_BIO_ISSUE_TIME, /* record bio->issue_time_ns */
+ QUEUE_FLAG_MAX
+};
+
+#define QUEUE_FLAG_MQ_DEFAULT (1UL << QUEUE_FLAG_SAME_COMP)
+
+void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
+void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
+
+#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
+#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
+#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
+#define blk_queue_noxmerges(q) \
+ test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
+#define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL))
+#define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT)
+#define blk_queue_passthrough_stat(q) \
+ ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH)
+#define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX)
+#define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA)
+#ifdef CONFIG_BLK_RQ_ALLOC_TIME
+#define blk_queue_rq_alloc_time(q) \
+ test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
+#else
+#define blk_queue_rq_alloc_time(q) false
+#endif
+
+#define blk_noretry_request(rq) \
+ ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
+ REQ_FAILFAST_DRIVER))
+#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
+#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
+#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
+#define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
+#define blk_queue_skip_tagset_quiesce(q) \
+ ((q)->limits.features & BLK_FEAT_SKIP_TAGSET_QUIESCE)
+#define blk_queue_disable_wbt(q) \
+ test_bit(QUEUE_FLAG_DISABLE_WBT_DEF, &(q)->queue_flags)
+#define blk_queue_no_elv_switch(q) \
+ test_bit(QUEUE_FLAG_NO_ELV_SWITCH, &(q)->queue_flags)
+
+extern void blk_set_pm_only(struct request_queue *q);
+extern void blk_clear_pm_only(struct request_queue *q);
+
+#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
-#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_STACKABLE) | \
- (1 << QUEUE_FLAG_SAME_COMP) | \
- (1 << QUEUE_FLAG_ADD_RANDOM))
+#define dma_map_bvec(dev, bv, dir, attrs) \
+ dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
+ (dir), (attrs))
-static inline void queue_lockdep_assert_held(struct request_queue *q)
+static inline bool queue_is_mq(struct request_queue *q)
{
- if (q->queue_lock)
- lockdep_assert_held(q->queue_lock);
+ return q->mq_ops;
}
-static inline void queue_flag_set_unlocked(unsigned int flag,
- struct request_queue *q)
+#ifdef CONFIG_PM
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
- __set_bit(flag, &q->queue_flags);
+ return q->rpm_status;
}
-
-static inline int queue_flag_test_and_clear(unsigned int flag,
- struct request_queue *q)
+#else
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
{
- queue_lockdep_assert_held(q);
-
- if (test_bit(flag, &q->queue_flags)) {
- __clear_bit(flag, &q->queue_flags);
- return 1;
- }
-
- return 0;
+ return RPM_ACTIVE;
}
+#endif
-static inline int queue_flag_test_and_set(unsigned int flag,
- struct request_queue *q)
+static inline bool blk_queue_is_zoned(struct request_queue *q)
{
- queue_lockdep_assert_held(q);
+ return IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
+ (q->limits.features & BLK_FEAT_ZONED);
+}
- if (!test_bit(flag, &q->queue_flags)) {
- __set_bit(flag, &q->queue_flags);
+static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector)
+{
+ if (!blk_queue_is_zoned(disk->queue))
return 0;
- }
+ return sector >> ilog2(disk->queue->limits.chunk_sectors);
+}
- return 1;
+static inline unsigned int bdev_max_open_zones(struct block_device *bdev)
+{
+ return bdev->bd_disk->queue->limits.max_open_zones;
}
-static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
+static inline unsigned int bdev_max_active_zones(struct block_device *bdev)
{
- queue_lockdep_assert_held(q);
- __set_bit(flag, &q->queue_flags);
+ return bdev->bd_disk->queue->limits.max_active_zones;
}
-static inline void queue_flag_clear_unlocked(unsigned int flag,
- struct request_queue *q)
+static inline unsigned int blk_queue_depth(struct request_queue *q)
{
- __clear_bit(flag, &q->queue_flags);
+ if (q->queue_depth)
+ return q->queue_depth;
+
+ return q->nr_requests;
}
-static inline int queue_in_flight(struct request_queue *q)
+/*
+ * default timeout for SG_IO if none specified
+ */
+#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
+#define BLK_MIN_SG_TIMEOUT (7 * HZ)
+
+/* This should not be used directly - use rq_for_each_segment */
+#define for_each_bio(_bio) \
+ for (; _bio; _bio = _bio->bi_next)
+
+int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups,
+ struct fwnode_handle *fwnode);
+int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups);
+static inline int __must_check add_disk(struct gendisk *disk)
{
- return q->in_flight[0] + q->in_flight[1];
+ return device_add_disk(NULL, disk, NULL);
}
+void del_gendisk(struct gendisk *gp);
+void invalidate_disk(struct gendisk *disk);
+void set_disk_ro(struct gendisk *disk, bool read_only);
+void disk_uevent(struct gendisk *disk, enum kobject_action action);
-static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
+static inline u8 bdev_partno(const struct block_device *bdev)
{
- queue_lockdep_assert_held(q);
- __clear_bit(flag, &q->queue_flags);
+ return atomic_read(&bdev->__bd_flags) & BD_PARTNO;
}
-#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
-#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
-#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
-#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
-#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
-#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
-#define blk_queue_noxmerges(q) \
- test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
-#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
-#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
-#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
-#define blk_queue_stackable(q) \
- test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
-#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
-#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
- test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
+static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag)
+{
+ return atomic_read(&bdev->__bd_flags) & flag;
+}
-#define blk_noretry_request(rq) \
- ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
- REQ_FAILFAST_DRIVER))
+static inline void bdev_set_flag(struct block_device *bdev, unsigned flag)
+{
+ atomic_or(flag, &bdev->__bd_flags);
+}
-#define blk_account_rq(rq) \
- (((rq)->cmd_flags & REQ_STARTED) && \
- ((rq)->cmd_type == REQ_TYPE_FS))
+static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag)
+{
+ atomic_andnot(flag, &bdev->__bd_flags);
+}
-#define blk_pm_request(rq) \
- ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
- (rq)->cmd_type == REQ_TYPE_PM_RESUME)
+static inline bool get_disk_ro(struct gendisk *disk)
+{
+ return bdev_test_flag(disk->part0, BD_READ_ONLY) ||
+ test_bit(GD_READ_ONLY, &disk->state);
+}
-#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
-#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
-/* rq->queuelist of dequeued request must be list_empty() */
-#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
+static inline bool bdev_read_only(struct block_device *bdev)
+{
+ return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk);
+}
-#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
+bool set_capacity_and_notify(struct gendisk *disk, sector_t size);
+void disk_force_media_change(struct gendisk *disk);
+void bdev_mark_dead(struct block_device *bdev, bool surprise);
-#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
+void add_disk_randomness(struct gendisk *disk) __latent_entropy;
+void rand_initialize_disk(struct gendisk *disk);
-static inline unsigned int blk_queue_cluster(struct request_queue *q)
+static inline sector_t get_start_sect(struct block_device *bdev)
{
- return q->limits.cluster;
+ return bdev->bd_start_sect;
}
-/*
- * We regard a request as sync, if either a read or a sync write
- */
-static inline bool rw_is_sync(unsigned int rw_flags)
+static inline sector_t bdev_nr_sectors(struct block_device *bdev)
{
- return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
+ return bdev->bd_nr_sectors;
}
-static inline bool rq_is_sync(struct request *rq)
+static inline loff_t bdev_nr_bytes(struct block_device *bdev)
{
- return rw_is_sync(rq->cmd_flags);
+ return (loff_t)bdev_nr_sectors(bdev) << SECTOR_SHIFT;
}
-static inline bool blk_rl_full(struct request_list *rl, bool sync)
+static inline sector_t get_capacity(struct gendisk *disk)
{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- return rl->flags & flag;
+ return bdev_nr_sectors(disk->part0);
}
-static inline void blk_set_rl_full(struct request_list *rl, bool sync)
+static inline u64 sb_bdev_nr_blocks(struct super_block *sb)
{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- rl->flags |= flag;
+ return bdev_nr_sectors(sb->s_bdev) >>
+ (sb->s_blocksize_bits - SECTOR_SHIFT);
}
-static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
+#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- rl->flags &= ~flag;
+ return disk->nr_zones;
}
-static inline bool rq_mergeable(struct request *rq)
+/**
+ * bio_needs_zone_write_plugging - Check if a BIO needs to be handled with zone
+ * write plugging
+ * @bio: The BIO being submitted
+ *
+ * Return true whenever @bio execution needs to be handled through zone
+ * write plugging (using blk_zone_plug_bio()). Return false otherwise.
+ */
+static inline bool bio_needs_zone_write_plugging(struct bio *bio)
{
- if (rq->cmd_type != REQ_TYPE_FS)
- return false;
+ enum req_op op = bio_op(bio);
- if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
+ /*
+ * Only zoned block devices have a zone write plug hash table. But not
+ * all of them have one (e.g. DM devices may not need one).
+ */
+ if (!bio->bi_bdev->bd_disk->zone_wplugs_hash)
return false;
- return true;
-}
-
-static inline bool blk_check_merge_flags(unsigned int flags1,
- unsigned int flags2)
-{
- if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
+ /* Only write operations need zone write plugging. */
+ if (!op_is_write(op))
return false;
- if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
+ /* Ignore empty flush */
+ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return false;
- if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
+ /* Ignore BIOs that already have been handled by zone write plugging. */
+ if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING))
return false;
- return true;
-}
-
-static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
-{
- if (bio_data(a) == bio_data(b))
+ /*
+ * All zone write operations must be handled through zone write plugging
+ * using blk_zone_plug_bio().
+ */
+ switch (op) {
+ case REQ_OP_ZONE_APPEND:
+ case REQ_OP_WRITE:
+ case REQ_OP_WRITE_ZEROES:
+ case REQ_OP_ZONE_FINISH:
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_RESET_ALL:
return true;
-
- return false;
+ default:
+ return false;
+ }
}
-/*
- * q->prep_rq_fn return values
- */
-#define BLKPREP_OK 0 /* serve it */
-#define BLKPREP_KILL 1 /* fatal error, kill */
-#define BLKPREP_DEFER 2 /* leave on queue */
+bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs);
-extern unsigned long blk_max_low_pfn, blk_max_pfn;
-
-/*
- * standard bounce addresses:
+/**
+ * disk_zone_capacity - returns the zone capacity of zone containing @sector
+ * @disk: disk to work with
+ * @sector: sector number within the querying zone
*
- * BLK_BOUNCE_HIGH : bounce all highmem pages
- * BLK_BOUNCE_ANY : don't bounce anything
- * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
+ * Returns the zone capacity of a zone containing @sector. @sector can be any
+ * sector in the zone.
*/
+static inline unsigned int disk_zone_capacity(struct gendisk *disk,
+ sector_t sector)
+{
+ sector_t zone_sectors = disk->queue->limits.chunk_sectors;
-#if BITS_PER_LONG == 32
-#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
-#else
-#define BLK_BOUNCE_HIGH -1ULL
-#endif
-#define BLK_BOUNCE_ANY (-1ULL)
-#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
+ if (sector + zone_sectors >= get_capacity(disk))
+ return disk->last_zone_capacity;
+ return disk->zone_capacity;
+}
+static inline unsigned int bdev_zone_capacity(struct block_device *bdev,
+ sector_t pos)
+{
+ return disk_zone_capacity(bdev->bd_disk, pos);
+}
-/*
- * default timeout for SG_IO if none specified
- */
-#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
-#define BLK_MIN_SG_TIMEOUT (7 * HZ)
+bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector);
-#ifdef CONFIG_BOUNCE
-extern int init_emergency_isa_pool(void);
-extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
-#else
-static inline int init_emergency_isa_pool(void)
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int disk_nr_zones(struct gendisk *disk)
{
return 0;
}
-static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
+
+static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector)
{
+ return false;
}
-#endif /* CONFIG_MMU */
-struct rq_map_data {
- struct page **pages;
- int page_order;
- int nr_entries;
- unsigned long offset;
- int null_mapped;
- int from_user;
-};
+static inline bool bio_needs_zone_write_plugging(struct bio *bio)
+{
+ return false;
+}
-struct req_iterator {
- int i;
- struct bio *bio;
-};
+static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs)
+{
+ return false;
+}
+#endif /* CONFIG_BLK_DEV_ZONED */
-/* This should not be used directly - use rq_for_each_segment */
-#define for_each_bio(_bio) \
- for (; _bio; _bio = _bio->bi_next)
-#define __rq_for_each_bio(_bio, rq) \
- if ((rq->bio)) \
- for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
+static inline unsigned int bdev_nr_zones(struct block_device *bdev)
+{
+ return disk_nr_zones(bdev->bd_disk);
+}
-#define rq_for_each_segment(bvl, _rq, _iter) \
- __rq_for_each_bio(_iter.bio, _rq) \
- bio_for_each_segment(bvl, _iter.bio, _iter.i)
+int bdev_disk_changed(struct gendisk *disk, bool invalidate);
-#define rq_iter_last(rq, _iter) \
- (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
+void put_disk(struct gendisk *disk);
+struct gendisk *__blk_alloc_disk(struct queue_limits *lim, int node,
+ struct lock_class_key *lkclass);
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
-#endif
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-extern void rq_flush_dcache_pages(struct request *rq);
+/**
+ * blk_alloc_disk - allocate a gendisk structure
+ * @lim: queue limits to be used for this disk.
+ * @node_id: numa node to allocate on
+ *
+ * Allocate and pre-initialize a gendisk structure for use with BIO based
+ * drivers.
+ *
+ * Returns an ERR_PTR on error, else the allocated disk.
+ *
+ * Context: can sleep
+ */
+#define blk_alloc_disk(lim, node_id) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ __blk_alloc_disk(lim, node_id, &__key); \
+})
+
+int __register_blkdev(unsigned int major, const char *name,
+ void (*probe)(dev_t devt));
+#define register_blkdev(major, name) \
+ __register_blkdev(major, name, NULL)
+void unregister_blkdev(unsigned int major, const char *name);
+
+bool disk_check_media_change(struct gendisk *disk);
+void set_capacity(struct gendisk *disk, sector_t size);
+
+#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
+int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
+void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk);
#else
-static inline void rq_flush_dcache_pages(struct request *rq)
+static inline int bd_link_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
{
+ return 0;
}
-#endif
+static inline void bd_unlink_disk_holder(struct block_device *bdev,
+ struct gendisk *disk)
+{
+}
+#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
+
+dev_t part_devt(struct gendisk *disk, u8 partno);
+void inc_diskseq(struct gendisk *disk);
+void blk_request_module(dev_t devt);
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
-extern void generic_make_request(struct bio *bio);
-extern void blk_rq_init(struct request_queue *q, struct request *rq);
-extern void blk_put_request(struct request *);
-extern void __blk_put_request(struct request_queue *, struct request *);
-extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
-extern struct request *blk_make_request(struct request_queue *, struct bio *,
- gfp_t);
-extern void blk_requeue_request(struct request_queue *, struct request *);
-extern void blk_add_request_payload(struct request *rq, struct page *page,
- unsigned int len);
-extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
+void submit_bio_noacct(struct bio *bio);
+struct bio *bio_split_to_limits(struct bio *bio);
+struct bio *bio_submit_split_bioset(struct bio *bio, unsigned int split_sectors,
+ struct bio_set *bs);
+
extern int blk_lld_busy(struct request_queue *q);
-extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
- struct bio_set *bs, gfp_t gfp_mask,
- int (*bio_ctr)(struct bio *, struct bio *, void *),
- void *data);
-extern void blk_rq_unprep_clone(struct request *rq);
-extern int blk_insert_cloned_request(struct request_queue *q,
- struct request *rq);
-extern void blk_delay_queue(struct request_queue *, unsigned long);
-extern void blk_recount_segments(struct request_queue *, struct bio *);
-extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
-extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
- unsigned int, void __user *);
-extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- unsigned int, void __user *);
-extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
- struct scsi_ioctl_command __user *);
-
-extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
+extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
+extern void blk_queue_exit(struct request_queue *q);
+extern void blk_sync_queue(struct request_queue *q);
-/*
- * A queue has just exitted congestion. Note this in the global counter of
- * congested queues, and wake up anyone who was waiting for requests to be
- * put back.
- */
-static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
-{
- clear_bdi_congested(&q->backing_dev_info, sync);
-}
+/* Helper to convert REQ_OP_XXX to its string format XXX */
+extern const char *blk_op_str(enum req_op op);
-/*
- * A queue has just entered congestion. Flag that in the queue's VM-visible
- * state flags and increment the global gounter of congested queues.
- */
-static inline void blk_set_queue_congested(struct request_queue *q, int sync)
-{
- set_bdi_congested(&q->backing_dev_info, sync);
-}
+int blk_status_to_errno(blk_status_t status);
+blk_status_t errno_to_blk_status(int errno);
+const char *blk_status_to_str(blk_status_t status);
-extern void blk_start_queue(struct request_queue *q);
-extern void blk_stop_queue(struct request_queue *q);
-extern void blk_sync_queue(struct request_queue *q);
-extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *q);
-extern void blk_run_queue(struct request_queue *);
-extern void blk_run_queue_async(struct request_queue *q);
-extern int blk_rq_map_user(struct request_queue *, struct request *,
- struct rq_map_data *, void __user *, unsigned long,
- gfp_t);
-extern int blk_rq_unmap_user(struct bio *);
-extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
-extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
- struct rq_map_data *, struct sg_iovec *, int,
- unsigned int, gfp_t);
-extern int blk_execute_rq(struct request_queue *, struct gendisk *,
- struct request *, int);
-extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
- struct request *, int, rq_end_io_fn *);
+/* only poll the hardware once, don't continue until a completion was found */
+#define BLK_POLL_ONESHOT (1 << 0)
+int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags);
+int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
+ unsigned int flags);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
- return bdev->bd_disk->queue;
+ return bdev->bd_queue; /* this is never NULL */
}
-/*
- * blk_rq_pos() : the current sector
- * blk_rq_bytes() : bytes left in the entire request
- * blk_rq_cur_bytes() : bytes left in the current segment
- * blk_rq_err_bytes() : bytes left till the next error boundary
- * blk_rq_sectors() : sectors left in the entire request
- * blk_rq_cur_sectors() : sectors left in the current segment
- */
-static inline sector_t blk_rq_pos(const struct request *rq)
+/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
+const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
+
+static inline unsigned int bio_zone_no(struct bio *bio)
{
- return rq->__sector;
+ return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector);
}
-static inline unsigned int blk_rq_bytes(const struct request *rq)
+static inline bool bio_straddles_zones(struct bio *bio)
{
- return rq->__data_len;
+ return bio_sectors(bio) &&
+ bio_zone_no(bio) !=
+ disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1);
}
-static inline int blk_rq_cur_bytes(const struct request *rq)
+/*
+ * Return how much within the boundary is left to be used for I/O at a given
+ * offset.
+ */
+static inline unsigned int blk_boundary_sectors_left(sector_t offset,
+ unsigned int boundary_sectors)
{
- return rq->bio ? bio_cur_bytes(rq->bio) : 0;
+ if (unlikely(!is_power_of_2(boundary_sectors)))
+ return boundary_sectors - sector_div(offset, boundary_sectors);
+ return boundary_sectors - (offset & (boundary_sectors - 1));
}
-extern unsigned int blk_rq_err_bytes(const struct request *rq);
-
-static inline unsigned int blk_rq_sectors(const struct request *rq)
+/**
+ * queue_limits_start_update - start an atomic update of queue limits
+ * @q: queue to update
+ *
+ * This functions starts an atomic update of the queue limits. It takes a lock
+ * to prevent other updates and returns a snapshot of the current limits that
+ * the caller can modify. The caller must call queue_limits_commit_update()
+ * to finish the update.
+ *
+ * Context: process context.
+ */
+static inline struct queue_limits
+queue_limits_start_update(struct request_queue *q)
+{
+ mutex_lock(&q->limits_lock);
+ return q->limits;
+}
+int queue_limits_commit_update_frozen(struct request_queue *q,
+ struct queue_limits *lim);
+int queue_limits_commit_update(struct request_queue *q,
+ struct queue_limits *lim);
+int queue_limits_set(struct request_queue *q, struct queue_limits *lim);
+int blk_validate_limits(struct queue_limits *lim);
+
+/**
+ * queue_limits_cancel_update - cancel an atomic update of queue limits
+ * @q: queue to update
+ *
+ * This functions cancels an atomic update of the queue limits started by
+ * queue_limits_start_update() and should be used when an error occurs after
+ * starting update.
+ */
+static inline void queue_limits_cancel_update(struct request_queue *q)
{
- return blk_rq_bytes(rq) >> 9;
+ mutex_unlock(&q->limits_lock);
}
-static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+/*
+ * These helpers are for drivers that have sloppy feature negotiation and might
+ * have to disable DISCARD, WRITE_ZEROES or SECURE_DISCARD from the I/O
+ * completion handler when the device returned an indicator that the respective
+ * feature is not actually supported. They are racy and the driver needs to
+ * cope with that. Try to avoid this scheme if you can.
+ */
+static inline void blk_queue_disable_discard(struct request_queue *q)
{
- return blk_rq_cur_bytes(rq) >> 9;
+ q->limits.max_discard_sectors = 0;
}
-static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
- unsigned int cmd_flags)
+static inline void blk_queue_disable_secure_erase(struct request_queue *q)
{
- if (unlikely(cmd_flags & REQ_DISCARD))
- return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
-
- if (unlikely(cmd_flags & REQ_WRITE_SAME))
- return q->limits.max_write_same_sectors;
-
- return q->limits.max_sectors;
+ q->limits.max_secure_erase_sectors = 0;
}
-static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
+static inline void blk_queue_disable_write_zeroes(struct request_queue *q)
{
- struct request_queue *q = rq->q;
-
- if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
- return q->limits.max_hw_sectors;
-
- return blk_queue_get_max_sectors(q, rq->cmd_flags);
+ q->limits.max_write_zeroes_sectors = 0;
+ q->limits.max_wzeroes_unmap_sectors = 0;
}
/*
- * Request issue related functions.
- */
-extern struct request *blk_peek_request(struct request_queue *q);
-extern void blk_start_request(struct request *rq);
-extern struct request *blk_fetch_request(struct request_queue *q);
-
-/*
- * Request completion related functions.
- *
- * blk_update_request() completes given number of bytes and updates
- * the request without completing it.
- *
- * blk_end_request() and friends. __blk_end_request() must be called
- * with the request queue spinlock acquired.
- *
- * Several drivers define their own end_request and call
- * blk_end_request() for parts of the original function.
- * This prevents code duplication in drivers.
- */
-extern bool blk_update_request(struct request *rq, int error,
- unsigned int nr_bytes);
-extern bool blk_end_request(struct request *rq, int error,
- unsigned int nr_bytes);
-extern void blk_end_request_all(struct request *rq, int error);
-extern bool blk_end_request_cur(struct request *rq, int error);
-extern bool blk_end_request_err(struct request *rq, int error);
-extern bool __blk_end_request(struct request *rq, int error,
- unsigned int nr_bytes);
-extern void __blk_end_request_all(struct request *rq, int error);
-extern bool __blk_end_request_cur(struct request *rq, int error);
-extern bool __blk_end_request_err(struct request *rq, int error);
-
-extern void blk_complete_request(struct request *);
-extern void __blk_complete_request(struct request *);
-extern void blk_abort_request(struct request *);
-extern void blk_unprep_request(struct request *);
-
-/*
* Access functions for manipulating queue properties
*/
-extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
- spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
-extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
- request_fn_proc *, spinlock_t *);
-extern void blk_cleanup_queue(struct request_queue *);
-extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
-extern void blk_queue_bounce_limit(struct request_queue *, u64);
-extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
-extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
-extern void blk_queue_max_segments(struct request_queue *, unsigned short);
-extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
-extern void blk_queue_max_discard_sectors(struct request_queue *q,
- unsigned int max_discard_sectors);
-extern void blk_queue_max_write_same_sectors(struct request_queue *q,
- unsigned int max_write_same_sectors);
-extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
-extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
-extern void blk_queue_alignment_offset(struct request_queue *q,
- unsigned int alignment);
-extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
-extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
-extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
-extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
-extern void blk_set_default_limits(struct queue_limits *lim);
+extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
extern void blk_set_stacking_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
-extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
- sector_t offset);
-extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
- sector_t offset);
-extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
-extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
-extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
-extern int blk_queue_dma_drain(struct request_queue *q,
- dma_drain_needed_fn *dma_drain_needed,
- void *buf, unsigned int size);
-extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
-extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
-extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
-extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
-extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
-extern void blk_queue_dma_alignment(struct request_queue *, int);
-extern void blk_queue_update_dma_alignment(struct request_queue *, int);
-extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
-extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
+void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
+ sector_t offset, const char *pfx);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
-extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
-extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
-extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
-extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
-extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
- struct scatterlist *sglist);
-extern void blk_dump_rq_flags(struct request *, char *);
-extern long nr_blockdev_pages(void);
+struct blk_independent_access_ranges *
+disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges);
+void disk_set_independent_access_ranges(struct gendisk *disk,
+ struct blk_independent_access_ranges *iars);
bool __must_check blk_get_queue(struct request_queue *);
-struct request_queue *blk_alloc_queue(gfp_t);
-struct request_queue *blk_alloc_queue_node(gfp_t, int);
extern void blk_put_queue(struct request_queue *);
-/*
- * block layer runtime pm functions
- */
-#ifdef CONFIG_PM_RUNTIME
-extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
-extern int blk_pre_runtime_suspend(struct request_queue *q);
-extern void blk_post_runtime_suspend(struct request_queue *q, int err);
-extern void blk_pre_runtime_resume(struct request_queue *q);
-extern void blk_post_runtime_resume(struct request_queue *q, int err);
-#else
-static inline void blk_pm_runtime_init(struct request_queue *q,
- struct device *dev) {}
-static inline int blk_pre_runtime_suspend(struct request_queue *q)
-{
- return -ENOSYS;
-}
-static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
-static inline void blk_pre_runtime_resume(struct request_queue *q) {}
-static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
-#endif
+void blk_mark_disk_dead(struct gendisk *disk);
+struct rq_list {
+ struct request *head;
+ struct request *tail;
+};
+
+#ifdef CONFIG_BLOCK
/*
* blk_plug permits building a queue of related requests by holding the I/O
* fragments for a short period. This allows merging of sequential requests
@@ -995,16 +1166,24 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
* as the lock contention for request_queue lock is reduced.
*
* It is ok not to disable preemption when adding the request to the plug list
- * or when attempting a merge, because blk_schedule_flush_list() will only flush
- * the plug list when the task sleeps by itself. For details, please see
- * schedule() where blk_schedule_flush_plug() is called.
+ * or when attempting a merge. For details, please see schedule() where
+ * blk_flush_plug() is called.
*/
struct blk_plug {
- unsigned long magic; /* detect uninitialized use-cases */
- struct list_head list; /* requests */
+ struct rq_list mq_list; /* blk-mq requests */
+
+ /* if ios_left is > 1, we can batch tag/rq allocations */
+ struct rq_list cached_rqs;
+ u64 cur_ktime;
+ unsigned short nr_ios;
+
+ unsigned short rq_count;
+
+ bool multiple_queues;
+ bool has_elevator;
+
struct list_head cb_list; /* md requires an unplug callback */
};
-#define BLK_MAX_REQUEST_COUNT 16
struct blk_plug_cb;
typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
@@ -1016,137 +1195,202 @@ struct blk_plug_cb {
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
void *data, int size);
extern void blk_start_plug(struct blk_plug *);
+extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
extern void blk_finish_plug(struct blk_plug *);
-extern void blk_flush_plug_list(struct blk_plug *, bool);
-static inline void blk_flush_plug(struct task_struct *tsk)
+void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
+static inline void blk_flush_plug(struct blk_plug *plug, bool async)
{
- struct blk_plug *plug = tsk->plug;
-
if (plug)
- blk_flush_plug_list(plug, false);
+ __blk_flush_plug(plug, async);
}
-static inline void blk_schedule_flush_plug(struct task_struct *tsk)
+/*
+ * tsk == current here
+ */
+static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
{
struct blk_plug *plug = tsk->plug;
if (plug)
- blk_flush_plug_list(plug, true);
+ plug->cur_ktime = 0;
+ current->flags &= ~PF_BLOCK_TS;
}
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+int blkdev_issue_flush(struct block_device *bdev);
+long nr_blockdev_pages(void);
+#else /* CONFIG_BLOCK */
+struct blk_plug {
+};
+
+static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
+ unsigned short nr_ios)
{
- struct blk_plug *plug = tsk->plug;
+}
- return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
+static inline void blk_start_plug(struct blk_plug *plug)
+{
}
-/*
- * tag stuff
- */
-#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
-extern int blk_queue_start_tag(struct request_queue *, struct request *);
-extern struct request *blk_queue_find_tag(struct request_queue *, int);
-extern void blk_queue_end_tag(struct request_queue *, struct request *);
-extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
-extern void blk_queue_free_tags(struct request_queue *);
-extern int blk_queue_resize_tags(struct request_queue *, int);
-extern void blk_queue_invalidate_tags(struct request_queue *);
-extern struct blk_queue_tag *blk_init_tags(int);
-extern void blk_free_tags(struct blk_queue_tag *);
-
-static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
- int tag)
-{
- if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
- return NULL;
- return bqt->tag_index[tag];
-}
-
-#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
-
-extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
-extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
-extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, struct page *page);
+static inline void blk_finish_plug(struct blk_plug *plug)
+{
+}
+
+static inline void blk_flush_plug(struct blk_plug *plug, bool async)
+{
+}
+
+static inline void blk_plug_invalidate_ts(struct task_struct *tsk)
+{
+}
+
+static inline int blkdev_issue_flush(struct block_device *bdev)
+{
+ return 0;
+}
+
+static inline long nr_blockdev_pages(void)
+{
+ return 0;
+}
+#endif /* CONFIG_BLOCK */
+
+extern void blk_io_schedule(void);
+
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
+int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp);
+
+#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
+#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
+#define BLKDEV_ZERO_KILLABLE (1 << 2) /* interruptible by fatal signals */
+
+extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
+ unsigned flags);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask);
+ sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
+
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{
- return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
- nr_blocks << (sb->s_blocksize_bits - 9),
- gfp_mask, flags);
+ return blkdev_issue_discard(sb->s_bdev,
+ block << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ nr_blocks << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ gfp_mask);
}
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask)
{
return blkdev_issue_zeroout(sb->s_bdev,
- block << (sb->s_blocksize_bits - 9),
- nr_blocks << (sb->s_blocksize_bits - 9),
- gfp_mask);
+ block << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ nr_blocks << (sb->s_blocksize_bits -
+ SECTOR_SHIFT),
+ gfp_mask, 0);
}
-extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
+static inline bool bdev_is_partition(struct block_device *bdev)
+{
+ return bdev_partno(bdev) != 0;
+}
enum blk_default_limits {
BLK_MAX_SEGMENTS = 128,
BLK_SAFE_MAX_SECTORS = 255,
- BLK_DEF_MAX_SECTORS = 1024,
BLK_MAX_SEGMENT_SIZE = 65536,
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
-#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
-
-static inline unsigned long queue_bounce_pfn(struct request_queue *q)
+static inline struct queue_limits *bdev_limits(struct block_device *bdev)
{
- return q->limits.bounce_pfn;
+ return &bdev_get_queue(bdev)->limits;
}
-static inline unsigned long queue_segment_boundary(struct request_queue *q)
+static inline unsigned long queue_segment_boundary(const struct request_queue *q)
{
return q->limits.seg_boundary_mask;
}
-static inline unsigned int queue_max_sectors(struct request_queue *q)
+static inline unsigned long queue_virt_boundary(const struct request_queue *q)
+{
+ return q->limits.virt_boundary_mask;
+}
+
+static inline unsigned int queue_max_sectors(const struct request_queue *q)
{
return q->limits.max_sectors;
}
-static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
+static inline unsigned int queue_max_bytes(struct request_queue *q)
+{
+ return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9;
+}
+
+static inline unsigned int queue_max_hw_sectors(const struct request_queue *q)
{
return q->limits.max_hw_sectors;
}
-static inline unsigned short queue_max_segments(struct request_queue *q)
+static inline unsigned short queue_max_segments(const struct request_queue *q)
{
return q->limits.max_segments;
}
-static inline unsigned int queue_max_segment_size(struct request_queue *q)
+static inline unsigned short queue_max_discard_segments(const struct request_queue *q)
+{
+ return q->limits.max_discard_segments;
+}
+
+static inline unsigned int queue_max_segment_size(const struct request_queue *q)
{
return q->limits.max_segment_size;
}
-static inline unsigned short queue_logical_block_size(struct request_queue *q)
+static inline bool queue_emulates_zone_append(struct request_queue *q)
+{
+ return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors;
+}
+
+static inline bool bdev_emulates_zone_append(struct block_device *bdev)
{
- int retval = 512;
+ return queue_emulates_zone_append(bdev_get_queue(bdev));
+}
+
+static inline unsigned int
+bdev_max_zone_append_sectors(struct block_device *bdev)
+{
+ return bdev_limits(bdev)->max_zone_append_sectors;
+}
- if (q && q->limits.logical_block_size)
- retval = q->limits.logical_block_size;
+static inline unsigned int bdev_max_segments(struct block_device *bdev)
+{
+ return queue_max_segments(bdev_get_queue(bdev));
+}
+
+static inline unsigned short bdev_max_write_streams(struct block_device *bdev)
+{
+ if (bdev_is_partition(bdev))
+ return 0;
+ return bdev_limits(bdev)->max_write_streams;
+}
- return retval;
+static inline unsigned queue_logical_block_size(const struct request_queue *q)
+{
+ return q->limits.logical_block_size;
}
-static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
{
return queue_logical_block_size(bdev_get_queue(bdev));
}
-static inline unsigned int queue_physical_block_size(struct request_queue *q)
+static inline unsigned int queue_physical_block_size(const struct request_queue *q)
{
return q->limits.physical_block_size;
}
@@ -1156,416 +1400,477 @@ static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
return queue_physical_block_size(bdev_get_queue(bdev));
}
-static inline unsigned int queue_io_min(struct request_queue *q)
+static inline unsigned int queue_io_min(const struct request_queue *q)
{
return q->limits.io_min;
}
-static inline int bdev_io_min(struct block_device *bdev)
+static inline unsigned int bdev_io_min(struct block_device *bdev)
{
return queue_io_min(bdev_get_queue(bdev));
}
-static inline unsigned int queue_io_opt(struct request_queue *q)
+static inline unsigned int queue_io_opt(const struct request_queue *q)
{
return q->limits.io_opt;
}
-static inline int bdev_io_opt(struct block_device *bdev)
+static inline unsigned int bdev_io_opt(struct block_device *bdev)
{
return queue_io_opt(bdev_get_queue(bdev));
}
-static inline int queue_alignment_offset(struct request_queue *q)
+static inline unsigned int
+queue_zone_write_granularity(const struct request_queue *q)
{
- if (q->limits.misaligned)
- return -1;
-
- return q->limits.alignment_offset;
+ return q->limits.zone_write_granularity;
}
-static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
+static inline unsigned int
+bdev_zone_write_granularity(struct block_device *bdev)
{
- unsigned int granularity = max(lim->physical_block_size, lim->io_min);
- unsigned int alignment = (sector << 9) & (granularity - 1);
-
- return (granularity + lim->alignment_offset - alignment)
- & (granularity - 1);
+ return queue_zone_write_granularity(bdev_get_queue(bdev));
}
-static inline int bdev_alignment_offset(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q->limits.misaligned)
- return -1;
-
- if (bdev != bdev->bd_contains)
- return bdev->bd_part->alignment_offset;
+int bdev_alignment_offset(struct block_device *bdev);
+unsigned int bdev_discard_alignment(struct block_device *bdev);
- return q->limits.alignment_offset;
+static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
+{
+ return bdev_limits(bdev)->max_discard_sectors;
}
-static inline int queue_discard_alignment(struct request_queue *q)
+static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
{
- if (q->limits.discard_misaligned)
- return -1;
+ return bdev_limits(bdev)->discard_granularity;
+}
- return q->limits.discard_alignment;
+static inline unsigned int
+bdev_max_secure_erase_sectors(struct block_device *bdev)
+{
+ return bdev_limits(bdev)->max_secure_erase_sectors;
}
-static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
+static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{
- unsigned int alignment, granularity, offset;
+ return bdev_limits(bdev)->max_write_zeroes_sectors;
+}
- if (!lim->max_discard_sectors)
- return 0;
+static inline unsigned int
+bdev_write_zeroes_unmap_sectors(struct block_device *bdev)
+{
+ return bdev_limits(bdev)->max_wzeroes_unmap_sectors;
+}
- /* Why are these in bytes, not sectors? */
- alignment = lim->discard_alignment >> 9;
- granularity = lim->discard_granularity >> 9;
- if (!granularity)
- return 0;
+static inline bool bdev_nonrot(struct block_device *bdev)
+{
+ return blk_queue_nonrot(bdev_get_queue(bdev));
+}
- /* Offset of the partition start in 'granularity' sectors */
- offset = sector_div(sector, granularity);
+static inline bool bdev_synchronous(struct block_device *bdev)
+{
+ return bdev->bd_disk->queue->limits.features & BLK_FEAT_SYNCHRONOUS;
+}
- /* And why do we do this modulus *again* in blkdev_issue_discard()? */
- offset = (granularity + alignment - offset) % granularity;
+static inline bool bdev_stable_writes(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
- /* Turn it back into bytes, gaah */
- return offset << 9;
+ if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
+ q->limits.integrity.csum_type != BLK_INTEGRITY_CSUM_NONE)
+ return true;
+ return q->limits.features & BLK_FEAT_STABLE_WRITES;
}
-static inline int bdev_discard_alignment(struct block_device *bdev)
+static inline bool blk_queue_write_cache(struct request_queue *q)
{
- struct request_queue *q = bdev_get_queue(bdev);
+ return (q->limits.features & BLK_FEAT_WRITE_CACHE) &&
+ !(q->limits.flags & BLK_FLAG_WRITE_CACHE_DISABLED);
+}
- if (bdev != bdev->bd_contains)
- return bdev->bd_part->discard_alignment;
+static inline bool bdev_write_cache(struct block_device *bdev)
+{
+ return blk_queue_write_cache(bdev_get_queue(bdev));
+}
- return q->limits.discard_alignment;
+static inline bool bdev_fua(struct block_device *bdev)
+{
+ return bdev_limits(bdev)->features & BLK_FEAT_FUA;
}
-static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
+static inline bool bdev_nowait(struct block_device *bdev)
{
- if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
- return 1;
+ return bdev->bd_disk->queue->limits.features & BLK_FEAT_NOWAIT;
+}
- return 0;
+static inline bool bdev_is_zoned(struct block_device *bdev)
+{
+ return blk_queue_is_zoned(bdev_get_queue(bdev));
}
-static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
+static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec)
{
- return queue_discard_zeroes_data(bdev_get_queue(bdev));
+ return disk_zone_no(bdev->bd_disk, sec);
}
-static inline unsigned int bdev_write_same(struct block_device *bdev)
+static inline sector_t bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
- if (q)
- return q->limits.max_write_same_sectors;
-
- return 0;
+ if (!blk_queue_is_zoned(q))
+ return 0;
+ return q->limits.chunk_sectors;
}
-static inline int queue_dma_alignment(struct request_queue *q)
+static inline sector_t bdev_zone_start(struct block_device *bdev,
+ sector_t sector)
{
- return q ? q->dma_alignment : 511;
+ return sector & ~(bdev_zone_sectors(bdev) - 1);
}
-static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
- unsigned int len)
+static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev,
+ sector_t sector)
{
- unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
- return !(addr & alignment) && !(len & alignment);
+ return sector & (bdev_zone_sectors(bdev) - 1);
}
-/* assumes size > 256 */
-static inline unsigned int blksize_bits(unsigned int size)
+static inline sector_t bio_offset_from_zone_start(struct bio *bio)
{
- unsigned int bits = 8;
- do {
- bits++;
- size >>= 1;
- } while (size > 256);
- return bits;
+ return bdev_offset_from_zone_start(bio->bi_bdev,
+ bio->bi_iter.bi_sector);
}
-static inline unsigned int block_size(struct block_device *bdev)
+static inline bool bdev_is_zone_start(struct block_device *bdev,
+ sector_t sector)
{
- return bdev->bd_block_size;
+ return bdev_offset_from_zone_start(bdev, sector) == 0;
}
-static inline bool queue_flush_queueable(struct request_queue *q)
+/* Check whether @sector is a multiple of the zone size. */
+static inline bool bdev_is_zone_aligned(struct block_device *bdev,
+ sector_t sector)
{
- return !q->flush_not_queueable;
+ return bdev_is_zone_start(bdev, sector);
}
-typedef struct {struct page *v;} Sector;
+int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
-unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
+static inline unsigned int queue_dma_alignment(const struct request_queue *q)
+{
+ return q->limits.dma_alignment;
+}
-static inline void put_dev_sector(Sector p)
+static inline unsigned int
+queue_atomic_write_unit_max_bytes(const struct request_queue *q)
{
- page_cache_release(p.v);
+ return q->limits.atomic_write_unit_max;
}
-struct work_struct;
-int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
+static inline unsigned int
+queue_atomic_write_unit_min_bytes(const struct request_queue *q)
+{
+ return q->limits.atomic_write_unit_min;
+}
-#ifdef CONFIG_BLK_CGROUP
-/*
- * This should not be using sched_clock(). A real patch is in progress
- * to fix this up, until that is in place we need to disable preemption
- * around sched_clock() in this function and set_io_start_time_ns().
- */
-static inline void set_start_time_ns(struct request *req)
+static inline unsigned int
+queue_atomic_write_boundary_bytes(const struct request_queue *q)
{
- preempt_disable();
- req->start_time_ns = sched_clock();
- preempt_enable();
+ return q->limits.atomic_write_boundary_sectors << SECTOR_SHIFT;
}
-static inline void set_io_start_time_ns(struct request *req)
+static inline unsigned int
+queue_atomic_write_max_bytes(const struct request_queue *q)
{
- preempt_disable();
- req->io_start_time_ns = sched_clock();
- preempt_enable();
+ return q->limits.atomic_write_max_sectors << SECTOR_SHIFT;
}
-static inline uint64_t rq_start_time_ns(struct request *req)
+static inline unsigned int bdev_dma_alignment(struct block_device *bdev)
{
- return req->start_time_ns;
+ return queue_dma_alignment(bdev_get_queue(bdev));
}
-static inline uint64_t rq_io_start_time_ns(struct request *req)
+static inline unsigned int
+blk_lim_dma_alignment_and_pad(struct queue_limits *lim)
{
- return req->io_start_time_ns;
+ return lim->dma_alignment | lim->dma_pad_mask;
}
-#else
-static inline void set_start_time_ns(struct request *req) {}
-static inline void set_io_start_time_ns(struct request *req) {}
-static inline uint64_t rq_start_time_ns(struct request *req)
+
+static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr,
+ unsigned int len)
{
- return 0;
+ unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits);
+
+ return !(addr & alignment) && !(len & alignment);
}
-static inline uint64_t rq_io_start_time_ns(struct request *req)
+
+/* assumes size > 256 */
+static inline unsigned int blksize_bits(unsigned int size)
{
- return 0;
+ return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT;
}
-#endif
+
+int kblockd_schedule_work(struct work_struct *work);
+int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
MODULE_ALIAS("block-major-" __stringify(major) "-*")
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
-#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */
-#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */
+bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q);
-struct blk_integrity_exchg {
- void *prot_buf;
- void *data_buf;
- sector_t sector;
- unsigned int data_size;
- unsigned short sector_size;
- const char *disk_name;
-};
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
-typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
-typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
-typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
-typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
+static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
+ struct request_queue *q)
+{
+ return true;
+}
-struct blk_integrity {
- integrity_gen_fn *generate_fn;
- integrity_vrfy_fn *verify_fn;
- integrity_set_tag_fn *set_tag_fn;
- integrity_get_tag_fn *get_tag_fn;
+#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
- unsigned short flags;
- unsigned short tuple_size;
- unsigned short sector_size;
- unsigned short tag_size;
+enum blk_unique_id {
+ /* these match the Designator Types specified in SPC */
+ BLK_UID_T10 = 1,
+ BLK_UID_EUI64 = 2,
+ BLK_UID_NAA = 3,
+};
- const char *name;
+struct block_device_operations {
+ void (*submit_bio)(struct bio *bio);
+ int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
+ unsigned int flags);
+ int (*open)(struct gendisk *disk, blk_mode_t mode);
+ void (*release)(struct gendisk *disk);
+ int (*ioctl)(struct block_device *bdev, blk_mode_t mode,
+ unsigned cmd, unsigned long arg);
+ int (*compat_ioctl)(struct block_device *bdev, blk_mode_t mode,
+ unsigned cmd, unsigned long arg);
+ unsigned int (*check_events) (struct gendisk *disk,
+ unsigned int clearing);
+ void (*unlock_native_capacity) (struct gendisk *);
+ int (*getgeo)(struct gendisk *, struct hd_geometry *);
+ int (*set_read_only)(struct block_device *bdev, bool ro);
+ void (*free_disk)(struct gendisk *disk);
+ /* this callback is with swap_lock and sometimes page table lock held */
+ void (*swap_slot_free_notify) (struct block_device *, unsigned long);
+ int (*report_zones)(struct gendisk *, sector_t sector,
+ unsigned int nr_zones,
+ struct blk_report_zones_args *args);
+ char *(*devnode)(struct gendisk *disk, umode_t *mode);
+ /* returns the length of the identifier or a negative errno: */
+ int (*get_unique_id)(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id id_type);
+ struct module *owner;
+ const struct pr_ops *pr_ops;
- struct kobject kobj;
+ /*
+ * Special callback for probing GPT entry at a given sector.
+ * Needed by Android devices, used by GPT scanner and MMC blk
+ * driver.
+ */
+ int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector);
};
-extern bool blk_integrity_is_initialized(struct gendisk *);
-extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
-extern void blk_integrity_unregister(struct gendisk *);
-extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
-extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
- struct scatterlist *);
-extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
-extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
- struct request *);
-extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
- struct bio *);
+#ifdef CONFIG_COMPAT
+extern int blkdev_compat_ptr_ioctl(struct block_device *, blk_mode_t,
+ unsigned int, unsigned long);
+#else
+#define blkdev_compat_ptr_ioctl NULL
+#endif
-static inline
-struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
+static inline void blk_wake_io_task(struct task_struct *waiter)
{
- return bdev->bd_disk->integrity;
+ /*
+ * If we're polling, the task itself is doing the completions. For
+ * that case, we don't need to signal a wakeup, it's enough to just
+ * mark us as RUNNING.
+ */
+ if (waiter == current)
+ __set_current_state(TASK_RUNNING);
+ else
+ wake_up_process(waiter);
}
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op,
+ unsigned long start_time);
+void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
+ unsigned int sectors, unsigned long start_time);
+
+unsigned long bio_start_io_acct(struct bio *bio);
+void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
+ struct block_device *orig_bdev);
+
+/**
+ * bio_end_io_acct - end I/O accounting for bio based drivers
+ * @bio: bio to end account for
+ * @start_time: start time returned by bio_start_io_acct()
+ */
+static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time)
{
- return disk->integrity;
+ return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev);
}
-static inline int blk_integrity_rq(struct request *rq)
-{
- if (rq->bio == NULL)
- return 0;
+int bdev_validate_blocksize(struct block_device *bdev, int block_size);
+int set_blocksize(struct file *file, int size);
- return bio_integrity(rq->bio);
-}
+int lookup_bdev(const char *pathname, dev_t *dev);
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
-{
- q->limits.max_integrity_segments = segs;
-}
+void blkdev_show(struct seq_file *seqf, off_t offset);
-static inline unsigned short
-queue_max_integrity_segments(struct request_queue *q)
-{
- return q->limits.max_integrity_segments;
-}
+#define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
+#define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
+#ifdef CONFIG_BLOCK
+#define BLKDEV_MAJOR_MAX 512
+#else
+#define BLKDEV_MAJOR_MAX 0
+#endif
-#else /* CONFIG_BLK_DEV_INTEGRITY */
+struct blk_holder_ops {
+ void (*mark_dead)(struct block_device *bdev, bool surprise);
-struct bio;
-struct block_device;
-struct gendisk;
-struct blk_integrity;
+ /*
+ * Sync the file system mounted on the block device.
+ */
+ void (*sync)(struct block_device *bdev);
-static inline int blk_integrity_rq(struct request *rq)
-{
- return 0;
-}
-static inline int blk_rq_count_integrity_sg(struct request_queue *q,
- struct bio *b)
-{
- return 0;
-}
-static inline int blk_rq_map_integrity_sg(struct request_queue *q,
- struct bio *b,
- struct scatterlist *s)
-{
- return 0;
-}
-static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
-{
- return 0;
-}
-static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
+ /*
+ * Freeze the file system mounted on the block device.
+ */
+ int (*freeze)(struct block_device *bdev);
+
+ /*
+ * Thaw the file system mounted on the block device.
+ */
+ int (*thaw)(struct block_device *bdev);
+};
+
+/*
+ * For filesystems using @fs_holder_ops, the @holder argument passed to
+ * helpers used to open and claim block devices via
+ * bd_prepare_to_claim() must point to a superblock.
+ */
+extern const struct blk_holder_ops fs_holder_ops;
+
+/*
+ * Return the correct open flags for blkdev_get_by_* for super block flags
+ * as stored in sb->s_flags.
+ */
+#define sb_open_mode(flags) \
+ (BLK_OPEN_READ | BLK_OPEN_RESTRICT_WRITES | \
+ (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
+
+struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
+ const struct blk_holder_ops *hops);
+struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
+ void *holder, const struct blk_holder_ops *hops);
+int bd_prepare_to_claim(struct block_device *bdev, void *holder,
+ const struct blk_holder_ops *hops);
+void bd_abort_claiming(struct block_device *bdev, void *holder);
+
+struct block_device *I_BDEV(struct inode *inode);
+struct block_device *file_bdev(struct file *bdev_file);
+bool disk_live(struct gendisk *disk);
+unsigned int block_size(struct block_device *bdev);
+
+#ifdef CONFIG_BLOCK
+void invalidate_bdev(struct block_device *bdev);
+int sync_blockdev(struct block_device *bdev);
+int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend);
+int sync_blockdev_nowait(struct block_device *bdev);
+void sync_bdevs(bool wait);
+void bdev_statx(const struct path *path, struct kstat *stat, u32 request_mask);
+void printk_all_partitions(void);
+int __init early_lookup_bdev(const char *pathname, dev_t *dev);
+#else
+static inline void invalidate_bdev(struct block_device *bdev)
{
- return NULL;
}
-static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
+static inline int sync_blockdev(struct block_device *bdev)
{
return 0;
}
-static inline int blk_integrity_register(struct gendisk *d,
- struct blk_integrity *b)
+static inline int sync_blockdev_nowait(struct block_device *bdev)
{
return 0;
}
-static inline void blk_integrity_unregister(struct gendisk *d)
+static inline void sync_bdevs(bool wait)
{
}
-static inline void blk_queue_max_integrity_segments(struct request_queue *q,
- unsigned int segs)
+static inline void bdev_statx(const struct path *path, struct kstat *stat,
+ u32 request_mask)
{
}
-static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
+static inline void printk_all_partitions(void)
{
- return 0;
}
-static inline int blk_integrity_merge_rq(struct request_queue *rq,
- struct request *r1,
- struct request *r2)
+static inline int early_lookup_bdev(const char *pathname, dev_t *dev)
{
- return 0;
-}
-static inline int blk_integrity_merge_bio(struct request_queue *rq,
- struct request *r,
- struct bio *b)
-{
- return 0;
-}
-static inline bool blk_integrity_is_initialized(struct gendisk *g)
-{
- return 0;
+ return -EINVAL;
}
+#endif /* CONFIG_BLOCK */
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
+int bdev_freeze(struct block_device *bdev);
+int bdev_thaw(struct block_device *bdev);
+void bdev_fput(struct file *bdev_file);
-struct block_device_operations {
- int (*open) (struct block_device *, fmode_t);
- void (*release) (struct gendisk *, fmode_t);
- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- int (*direct_access) (struct block_device *, sector_t,
- void **, unsigned long *);
- unsigned int (*check_events) (struct gendisk *disk,
- unsigned int clearing);
- /* ->media_changed() is DEPRECATED, use ->check_events() instead */
- int (*media_changed) (struct gendisk *);
- void (*unlock_native_capacity) (struct gendisk *);
- int (*revalidate_disk) (struct gendisk *);
- int (*getgeo)(struct block_device *, struct hd_geometry *);
- /* this callback is with swap_lock and sometimes page table lock held */
- void (*swap_slot_free_notify) (struct block_device *, unsigned long);
- struct module *owner;
+struct io_comp_batch {
+ struct rq_list req_list;
+ bool need_ts;
+ void (*complete)(struct io_comp_batch *);
};
-extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
- unsigned long);
-#else /* CONFIG_BLOCK */
-/*
- * stubs for when the block layer is configured out
- */
-#define buffer_heads_over_limit 0
-
-static inline long nr_blockdev_pages(void)
+static inline bool blk_atomic_write_start_sect_aligned(sector_t sector,
+ struct queue_limits *limits)
{
- return 0;
-}
+ unsigned int alignment = max(limits->atomic_write_hw_unit_min,
+ limits->atomic_write_hw_boundary);
-struct blk_plug {
-};
-
-static inline void blk_start_plug(struct blk_plug *plug)
-{
+ return IS_ALIGNED(sector, alignment >> SECTOR_SHIFT);
}
-static inline void blk_finish_plug(struct blk_plug *plug)
+static inline bool bdev_can_atomic_write(struct block_device *bdev)
{
+ struct request_queue *bd_queue = bdev->bd_queue;
+ struct queue_limits *limits = &bd_queue->limits;
+
+ if (!limits->atomic_write_unit_min)
+ return false;
+
+ if (bdev_is_partition(bdev))
+ return blk_atomic_write_start_sect_aligned(bdev->bd_start_sect,
+ limits);
+
+ return true;
}
-static inline void blk_flush_plug(struct task_struct *task)
+static inline unsigned int
+bdev_atomic_write_unit_min_bytes(struct block_device *bdev)
{
+ if (!bdev_can_atomic_write(bdev))
+ return 0;
+ return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev));
}
-static inline void blk_schedule_flush_plug(struct task_struct *task)
+static inline unsigned int
+bdev_atomic_write_unit_max_bytes(struct block_device *bdev)
{
+ if (!bdev_can_atomic_write(bdev))
+ return 0;
+ return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev));
}
-
-static inline bool blk_needs_flush_plug(struct task_struct *tsk)
+static inline int bio_split_rw_at(struct bio *bio,
+ const struct queue_limits *lim,
+ unsigned *segs, unsigned max_bytes)
{
- return false;
+ return bio_split_io_at(bio, lim, segs, max_bytes, lim->dma_alignment);
}
-#endif /* CONFIG_BLOCK */
+#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
-#endif
+#endif /* _LINUX_BLKDEV_H */