diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2021-03-08 10:11:33 -0300 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2021-03-08 10:11:33 -0300 |
commit | 009ef05f98129aa91c62c3baab859ba593a15bb2 (patch) | |
tree | f3414f08d636a597545b1e4f443b373b9d6d8f4b /kernel | |
parent | 2777b81b379df772defd654bc4d3fa82dca17a4b (diff) | |
parent | 144c79ef33536b4ecb4951e07dbc1f2b7fa99d32 (diff) |
Merge remote-tracking branch 'torvalds/master' into perf/core
To pick up the fixes sent for v5.12 and continue development based on
v5.12-rc2, i.e. without the swap on file bug.
This also gets a slightly newer and better tools/perf/arch/arm/util/cs-etm.c
patch version, using the BIT() macro, that had already been slated to
v5.13 but ended up going to v5.12-rc1 on an older version.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'kernel')
41 files changed, 640 insertions, 467 deletions
diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c index 5b3f01da172b..60739d5e3373 100644 --- a/kernel/audit_fsnotify.c +++ b/kernel/audit_fsnotify.c @@ -84,7 +84,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa dentry = kern_path_locked(pathname, &path); if (IS_ERR(dentry)) - return (void *)dentry; /* returning an error */ + return ERR_CAST(dentry); /* returning an error */ inode = path.dentry->d_inode; inode_unlock(inode); diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 434337ab6b2b..47fb48f42c93 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1930,7 +1930,7 @@ static inline int audit_copy_fcaps(struct audit_names *name, if (!dentry) return 0; - rc = get_vfs_caps_from_disk(dentry, &caps); + rc = get_vfs_caps_from_disk(&init_user_ns, dentry, &caps); if (rc) return rc; @@ -2481,7 +2481,8 @@ int __audit_log_bprm_fcaps(struct linux_binprm *bprm, ax->d.next = context->aux; context->aux = (void *)ax; - get_vfs_caps_from_disk(bprm->file->f_path.dentry, &vcaps); + get_vfs_caps_from_disk(&init_user_ns, + bprm->file->f_path.dentry, &vcaps); ax->fcap.permitted = vcaps.permitted; ax->fcap.inheritable = vcaps.inheritable; diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index dd4b7fd60ee7..1576ff331ee4 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -122,7 +122,7 @@ static struct inode *bpf_get_inode(struct super_block *sb, inode->i_mtime = inode->i_atime; inode->i_ctime = inode->i_atime; - inode_init_owner(inode, dir, mode); + inode_init_owner(&init_user_ns, inode, dir, mode); return inode; } @@ -152,7 +152,8 @@ static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode, dir->i_ctime = dir->i_mtime; } -static int bpf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +static int bpf_mkdir(struct user_namespace *mnt_userns, struct inode *dir, + struct dentry *dentry, umode_t mode) { struct inode *inode; @@ -381,8 +382,8 @@ bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags) return simple_lookup(dir, dentry, flags); } -static int bpf_symlink(struct inode *dir, struct dentry *dentry, - const char *target) +static int bpf_symlink(struct user_namespace *mnt_userns, struct inode *dir, + struct dentry *dentry, const char *target) { char *link = kstrdup(target, GFP_USER | __GFP_NOWARN); struct inode *inode; @@ -507,7 +508,7 @@ static void *bpf_obj_do_get(const char __user *pathname, return ERR_PTR(ret); inode = d_backing_inode(path.dentry); - ret = inode_permission(inode, ACC_MODE(flags)); + ret = path_permission(&path, ACC_MODE(flags)); if (ret) goto out; @@ -558,7 +559,7 @@ int bpf_obj_get_user(const char __user *pathname, int flags) static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type) { struct bpf_prog *prog; - int ret = inode_permission(inode, MAY_READ); + int ret = inode_permission(&init_user_ns, inode, MAY_READ); if (ret) return ERR_PTR(ret); diff --git a/kernel/capability.c b/kernel/capability.c index de7eac903a2a..46a361dde042 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -484,10 +484,12 @@ EXPORT_SYMBOL(file_ns_capable); * * Return true if the inode uid and gid are within the namespace. */ -bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode) +bool privileged_wrt_inode_uidgid(struct user_namespace *ns, + struct user_namespace *mnt_userns, + const struct inode *inode) { - return kuid_has_mapping(ns, inode->i_uid) && - kgid_has_mapping(ns, inode->i_gid); + return kuid_has_mapping(ns, i_uid_into_mnt(mnt_userns, inode)) && + kgid_has_mapping(ns, i_gid_into_mnt(mnt_userns, inode)); } /** @@ -499,11 +501,13 @@ bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode * * its own user namespace and that the given inode's uid and gid are * mapped into the current user namespace. */ -bool capable_wrt_inode_uidgid(const struct inode *inode, int cap) +bool capable_wrt_inode_uidgid(struct user_namespace *mnt_userns, + const struct inode *inode, int cap) { struct user_namespace *ns = current_user_ns(); - return ns_capable(ns, cap) && privileged_wrt_inode_uidgid(ns, inode); + return ns_capable(ns, cap) && + privileged_wrt_inode_uidgid(ns, mnt_userns, inode); } EXPORT_SYMBOL(capable_wrt_inode_uidgid); diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index c80fe99f85ae..9153b20e5cc6 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4672,7 +4672,7 @@ static int cgroup_may_write(const struct cgroup *cgrp, struct super_block *sb) if (!inode) return -ENOMEM; - ret = inode_permission(inode, MAY_WRITE); + ret = inode_permission(&init_user_ns, inode, MAY_WRITE); iput(inode); return ret; } diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index b636d517c02c..4708aec492df 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -455,6 +455,17 @@ setundefined: return 0; } +void kgdb_free_init_mem(void) +{ + int i; + + /* Clear init memory breakpoints. */ + for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { + if (init_section_contains((void *)kgdb_break[i].bpt_addr, 0)) + kgdb_break[i].state = BP_UNDEFINED; + } +} + #ifdef CONFIG_KGDB_KDB void kdb_dump_stack_on_cpu(int cpu) { diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c index da95df381483..e0e64f8b0739 100644 --- a/kernel/dma/map_benchmark.c +++ b/kernel/dma/map_benchmark.c @@ -21,6 +21,7 @@ #define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark) #define DMA_MAP_MAX_THREADS 1024 #define DMA_MAP_MAX_SECONDS 300 +#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC) #define DMA_MAP_BIDIRECTIONAL 0 #define DMA_MAP_TO_DEVICE 1 @@ -36,7 +37,8 @@ struct map_benchmark { __s32 node; /* which numa node this benchmark will run on */ __u32 dma_bits; /* DMA addressing capability */ __u32 dma_dir; /* DMA data direction */ - __u8 expansion[84]; /* For future use */ + __u32 dma_trans_ns; /* time for DMA transmission in ns */ + __u8 expansion[80]; /* For future use */ }; struct map_benchmark_data { @@ -87,6 +89,9 @@ static int map_benchmark_thread(void *data) map_etime = ktime_get(); map_delta = ktime_sub(map_etime, map_stime); + /* Pretend DMA is transmitting */ + ndelay(map->bparam.dma_trans_ns); + unmap_stime = ktime_get(); dma_unmap_single(map->dev, dma_addr, PAGE_SIZE, map->dir); unmap_etime = ktime_get(); @@ -218,6 +223,11 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd, return -EINVAL; } + if (map->bparam.dma_trans_ns > DMA_MAP_MAX_TRANS_DELAY) { + pr_err("invalid transmission delay\n"); + return -EINVAL; + } + if (map->bparam.node != NUMA_NO_NODE && !node_possible(map->bparam.node)) { pr_err("invalid numa node\n"); diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 84de6b1c5fab..b6a633679933 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -517,46 +517,6 @@ void dma_free_pages(struct device *dev, size_t size, struct page *page, } EXPORT_SYMBOL_GPL(dma_free_pages); -void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - void *vaddr; - - if (!ops || !ops->alloc_noncoherent) { - struct page *page; - - page = dma_alloc_pages(dev, size, dma_handle, dir, gfp); - if (!page) - return NULL; - return page_address(page); - } - - size = PAGE_ALIGN(size); - vaddr = ops->alloc_noncoherent(dev, size, dma_handle, dir, gfp); - if (vaddr) - debug_dma_map_page(dev, virt_to_page(vaddr), 0, size, dir, - *dma_handle); - return vaddr; -} -EXPORT_SYMBOL_GPL(dma_alloc_noncoherent); - -void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, enum dma_data_direction dir) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - - if (!ops || !ops->free_noncoherent) { - dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir); - return; - } - - size = PAGE_ALIGN(size); - debug_dma_unmap_page(dev, dma_handle, size, dir); - ops->free_noncoherent(dev, size, vaddr, dma_handle, dir); -} -EXPORT_SYMBOL_GPL(dma_free_noncoherent); - int dma_supported(struct device *dev, u64 mask) { const struct dma_map_ops *ops = get_dma_ops(dev); diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 7c42df6e6100..c10e855a03bc 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -50,9 +50,6 @@ #define CREATE_TRACE_POINTS #include <trace/events/swiotlb.h> -#define OFFSET(val,align) ((unsigned long) \ - ( (val) & ( (align) - 1))) - #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) /* @@ -103,6 +100,11 @@ static unsigned int max_segment; static phys_addr_t *io_tlb_orig_addr; /* + * The mapped buffer's size should be validated during a sync operation. + */ +static size_t *io_tlb_orig_size; + +/* * Protect the above data structures in the map and unmap calls */ static DEFINE_SPINLOCK(io_tlb_lock); @@ -171,7 +173,7 @@ void __init swiotlb_adjust_size(unsigned long new_size) * adjust/expand SWIOTLB size for their use. */ if (!io_tlb_nslabs) { - size = ALIGN(new_size, 1 << IO_TLB_SHIFT); + size = ALIGN(new_size, IO_TLB_SIZE); io_tlb_nslabs = size >> IO_TLB_SHIFT; io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); @@ -192,6 +194,16 @@ void swiotlb_print_info(void) bytes >> 20); } +static inline unsigned long io_tlb_offset(unsigned long val) +{ + return val & (IO_TLB_SEGSIZE - 1); +} + +static inline unsigned long nr_slots(u64 val) +{ + return DIV_ROUND_UP(val, IO_TLB_SIZE); +} + /* * Early SWIOTLB allocation may be too early to allow an architecture to * perform the desired operations. This function allows the architecture to @@ -240,9 +252,16 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); + alloc_size = PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t)); + io_tlb_orig_size = memblock_alloc(alloc_size, PAGE_SIZE); + if (!io_tlb_orig_size) + panic("%s: Failed to allocate %zu bytes align=0x%lx\n", + __func__, alloc_size, PAGE_SIZE); + for (i = 0; i < io_tlb_nslabs; i++) { - io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i); io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + io_tlb_orig_size[i] = 0; } io_tlb_index = 0; no_iotlb_memory = false; @@ -363,7 +382,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) * between io_tlb_start and io_tlb_end. */ io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, - get_order(io_tlb_nslabs * sizeof(int))); + get_order(io_tlb_nslabs * sizeof(int))); if (!io_tlb_list) goto cleanup3; @@ -374,9 +393,18 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) if (!io_tlb_orig_addr) goto cleanup4; + io_tlb_orig_size = (size_t *) + __get_free_pages(GFP_KERNEL, + get_order(io_tlb_nslabs * + sizeof(size_t))); + if (!io_tlb_orig_size) + goto cleanup5; + + for (i = 0; i < io_tlb_nslabs; i++) { - io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); + io_tlb_list[i] = IO_TLB_SEGSIZE - io_tlb_offset(i); io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + io_tlb_orig_size[i] = 0; } io_tlb_index = 0; no_iotlb_memory = false; @@ -389,6 +417,10 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) return 0; +cleanup5: + free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * + sizeof(phys_addr_t))); + cleanup4: free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * sizeof(int))); @@ -404,6 +436,8 @@ void __init swiotlb_exit(void) return; if (late_alloc) { + free_pages((unsigned long)io_tlb_orig_size, + get_order(io_tlb_nslabs * sizeof(size_t))); free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * sizeof(phys_addr_t))); free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * @@ -413,6 +447,8 @@ void __init swiotlb_exit(void) } else { memblock_free_late(__pa(io_tlb_orig_addr), PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); + memblock_free_late(__pa(io_tlb_orig_size), + PAGE_ALIGN(io_tlb_nslabs * sizeof(size_t))); memblock_free_late(__pa(io_tlb_list), PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); memblock_free_late(io_tlb_start, @@ -461,79 +497,71 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, } } -phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr, - size_t mapping_size, size_t alloc_size, - enum dma_data_direction dir, unsigned long attrs) -{ - dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(hwdev, io_tlb_start); - unsigned long flags; - phys_addr_t tlb_addr; - unsigned int nslots, stride, index, wrap; - int i; - unsigned long mask; - unsigned long offset_slots; - unsigned long max_slots; - unsigned long tmp_io_tlb_used; - - if (no_iotlb_memory) - panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); - - if (mem_encrypt_active()) - pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); +#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT)) - if (mapping_size > alloc_size) { - dev_warn_once(hwdev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", - mapping_size, alloc_size); - return (phys_addr_t)DMA_MAPPING_ERROR; - } +/* + * Return the offset into a iotlb slot required to keep the device happy. + */ +static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) +{ + return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); +} - mask = dma_get_seg_boundary(hwdev); +/* + * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. + */ +static inline unsigned long get_max_slots(unsigned long boundary_mask) +{ + if (boundary_mask == ~0UL) + return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); + return nr_slots(boundary_mask + 1); +} - tbl_dma_addr &= mask; +static unsigned int wrap_index(unsigned int index) +{ + if (index >= io_tlb_nslabs) + return 0; + return index; +} - offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; +/* + * Find a suitable number of IO TLB entries size that will fit this request and + * allocate a buffer from that IO TLB pool. + */ +static int find_slots(struct device *dev, phys_addr_t orig_addr, + size_t alloc_size) +{ + unsigned long boundary_mask = dma_get_seg_boundary(dev); + dma_addr_t tbl_dma_addr = + phys_to_dma_unencrypted(dev, io_tlb_start) & boundary_mask; + unsigned long max_slots = get_max_slots(boundary_mask); + unsigned int iotlb_align_mask = + dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); + unsigned int nslots = nr_slots(alloc_size), stride; + unsigned int index, wrap, count = 0, i; + unsigned long flags; - /* - * Carefully handle integer overflow which can occur when mask == ~0UL. - */ - max_slots = mask + 1 - ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT - : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); + BUG_ON(!nslots); /* - * For mappings greater than or equal to a page, we limit the stride - * (and hence alignment) to a page size. + * For mappings with an alignment requirement don't bother looping to + * unaligned slots once we found an aligned one. For allocations of + * PAGE_SIZE or larger only look for page aligned allocations. */ - nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; + stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1; if (alloc_size >= PAGE_SIZE) - stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); - else - stride = 1; - - BUG_ON(!nslots); + stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); - /* - * Find suitable number of IO TLB entries size that will fit this - * request and allocate a buffer from that IO TLB pool. - */ spin_lock_irqsave(&io_tlb_lock, flags); - if (unlikely(nslots > io_tlb_nslabs - io_tlb_used)) goto not_found; - index = ALIGN(io_tlb_index, stride); - if (index >= io_tlb_nslabs) - index = 0; - wrap = index; - + index = wrap = wrap_index(ALIGN(io_tlb_index, stride)); do { - while (iommu_is_span_boundary(index, nslots, offset_slots, - max_slots)) { - index += stride; - if (index >= io_tlb_nslabs) - index = 0; - if (index == wrap) - goto not_found; + if ((slot_addr(tbl_dma_addr, index) & iotlb_align_mask) != + (orig_addr & iotlb_align_mask)) { + index = wrap_index(index + 1); + continue; } /* @@ -541,55 +569,96 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr, * contiguous buffers, we allocate the buffers from that slot * and mark the entries as '0' indicating unavailable. */ - if (io_tlb_list[index] >= nslots) { - int count = 0; - - for (i = index; i < (int) (index + nslots); i++) - io_tlb_list[i] = 0; - for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) - io_tlb_list[i] = ++count; - tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT); - - /* - * Update the indices to avoid searching in the next - * round. - */ - io_tlb_index = ((index + nslots) < io_tlb_nslabs - ? (index + nslots) : 0); - - goto found; + if (!iommu_is_span_boundary(index, nslots, + nr_slots(tbl_dma_addr), + max_slots)) { + if (io_tlb_list[index] >= nslots) + goto found; } - index += stride; - if (index >= io_tlb_nslabs) - index = 0; + index = wrap_index(index + stride); } while (index != wrap); not_found: - tmp_io_tlb_used = io_tlb_used; - spin_unlock_irqrestore(&io_tlb_lock, flags); - if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) - dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", - alloc_size, io_tlb_nslabs, tmp_io_tlb_used); - return (phys_addr_t)DMA_MAPPING_ERROR; + return -1; + found: + for (i = index; i < index + nslots; i++) + io_tlb_list[i] = 0; + for (i = index - 1; + io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && + io_tlb_list[i]; i--) + io_tlb_list[i] = ++count; + + /* + * Update the indices to avoid searching in the next round. + */ + if (index + nslots < io_tlb_nslabs) + io_tlb_index = index + nslots; + else + io_tlb_index = 0; io_tlb_used += nslots; + spin_unlock_irqrestore(&io_tlb_lock, flags); + return index; +} + +phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, + size_t mapping_size, size_t alloc_size, + enum dma_data_direction dir, unsigned long attrs) +{ + unsigned int offset = swiotlb_align_offset(dev, orig_addr); + unsigned int index, i; + phys_addr_t tlb_addr; + + if (no_iotlb_memory) + panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); + + if (mem_encrypt_active()) + pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); + + if (mapping_size > alloc_size) { + dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", + mapping_size, alloc_size); + return (phys_addr_t)DMA_MAPPING_ERROR; + } + + index = find_slots(dev, orig_addr, alloc_size + offset); + if (index == -1) { + if (!(attrs & DMA_ATTR_NO_WARN)) + dev_warn_ratelimited(dev, + "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", + alloc_size, io_tlb_nslabs, io_tlb_used); + return (phys_addr_t)DMA_MAPPING_ERROR; + } /* * Save away the mapping from the original address to the DMA address. * This is needed when we sync the memory. Then we sync the buffer if * needed. */ - for (i = 0; i < nslots; i++) - io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); + for (i = 0; i < nr_slots(alloc_size + offset); i++) { + io_tlb_orig_addr[index + i] = slot_addr(orig_addr, i); + io_tlb_orig_size[index+i] = alloc_size - (i << IO_TLB_SHIFT); + } + tlb_addr = slot_addr(io_tlb_start, index) + offset; if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE); - return tlb_addr; } +static void validate_sync_size_and_truncate(struct device *hwdev, size_t orig_size, size_t *size) +{ + if (*size > orig_size) { + /* Warn and truncate mapping_size */ + dev_WARN_ONCE(hwdev, 1, + "Attempt for buffer overflow. Original size: %zu. Mapping size: %zu.\n", + orig_size, *size); + *size = orig_size; + } +} + /* * tlb_addr is the physical address of the bounce buffer to unmap. */ @@ -598,10 +667,13 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, enum dma_data_direction dir, unsigned long attrs) { unsigned long flags; - int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; - int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; + unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr); + int i, count, nslots = nr_slots(alloc_size + offset); + int index = (tlb_addr - offset - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t orig_addr = io_tlb_orig_addr[index]; + validate_sync_size_and_truncate(hwdev, io_tlb_orig_size[index], &mapping_size); + /* * First, sync the memory before unmapping the entry */ @@ -617,26 +689,30 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, * with slots below and above the pool being returned. */ spin_lock_irqsave(&io_tlb_lock, flags); - { - count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? - io_tlb_list[index + nslots] : 0); - /* - * Step 1: return the slots to the free list, merging the - * slots with superceeding slots - */ - for (i = index + nslots - 1; i >= index; i--) { - io_tlb_list[i] = ++count; - io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; - } - /* - * Step 2: merge the returned slots with the preceding slots, - * if available (non zero) - */ - for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) - io_tlb_list[i] = ++count; + if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE)) + count = io_tlb_list[index + nslots]; + else + count = 0; - io_tlb_used -= nslots; + /* + * Step 1: return the slots to the free list, merging the slots with + * superceeding slots + */ + for (i = index + nslots - 1; i >= index; i--) { + io_tlb_list[i] = ++count; + io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; + io_tlb_orig_size[i] = 0; } + + /* + * Step 2: merge the returned slots with the preceding slots, if + * available (non zero) + */ + for (i = index - 1; + io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && io_tlb_list[i]; + i--) + io_tlb_list[i] = ++count; + io_tlb_used -= nslots; spin_unlock_irqrestore(&io_tlb_lock, flags); } @@ -645,11 +721,13 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, enum dma_sync_target target) { int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; + size_t orig_size = io_tlb_orig_size[index]; phys_addr_t orig_addr = io_tlb_orig_addr[index]; if (orig_addr == INVALID_PHYS_ADDR) return; - orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1); + + validate_sync_size_and_truncate(hwdev, orig_size, &size); switch (target) { case SYNC_FOR_CPU: @@ -707,7 +785,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, size_t swiotlb_max_mapping_size(struct device *dev) { - return ((size_t)1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE; + return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE; } bool is_swiotlb_active(void) diff --git a/kernel/events/core.c b/kernel/events/core.c index 129dee540a8b..0aeca5f3c0ac 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -269,7 +269,7 @@ static void event_function_call(struct perf_event *event, event_f func, void *da if (!event->parent) { /* * If this is a !child event, we must hold ctx::mutex to - * stabilize the the event->ctx relation. See + * stabilize the event->ctx relation. See * perf_event_ctx_lock(). */ lockdep_assert_held(&ctx->mutex); @@ -1303,7 +1303,7 @@ static void put_ctx(struct perf_event_context *ctx) * life-time rules separate them. That is an exiting task cannot fork, and a * spawning task cannot (yet) exit. * - * But remember that that these are parent<->child context relations, and + * But remember that these are parent<->child context relations, and * migration does not affect children, therefore these two orderings should not * interact. * @@ -1442,7 +1442,7 @@ static u64 primary_event_id(struct perf_event *event) /* * Get the perf_event_context for a task and lock it. * - * This has to cope with with the fact that until it is locked, + * This has to cope with the fact that until it is locked, * the context could get moved to another task. */ static struct perf_event_context * @@ -2486,7 +2486,7 @@ static void perf_set_shadow_time(struct perf_event *event, * But this is a bit hairy. * * So instead, we have an explicit cgroup call to remain - * within the time time source all along. We believe it + * within the time source all along. We believe it * is cleaner and simpler to understand. */ if (is_cgroup_event(event)) diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 3ea7f8f92f1d..6addc9780319 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1733,7 +1733,7 @@ void uprobe_free_utask(struct task_struct *t) } /* - * Allocate a uprobe_task object for the task if if necessary. + * Allocate a uprobe_task object for the task if necessary. * Called when the thread hits a breakpoint. * * Returns: diff --git a/kernel/fork.c b/kernel/fork.c index d66cd1014211..d3171e8e88e5 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1940,6 +1940,8 @@ static __latent_entropy struct task_struct *copy_process( p = dup_task_struct(current, node); if (!p) goto fork_out; + if (args->io_thread) + p->flags |= PF_IO_WORKER; /* * This _must_ happen before we call free_task(), i.e. before we jump @@ -2411,6 +2413,34 @@ struct mm_struct *copy_init_mm(void) } /* + * This is like kernel_clone(), but shaved down and tailored to just + * creating io_uring workers. It returns a created task, or an error pointer. + * The returned task is inactive, and the caller must fire it up through + * wake_up_new_task(p). All signals are blocked in the created task. + */ +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) +{ + unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| + CLONE_IO; + struct kernel_clone_args args = { + .flags = ((lower_32_bits(flags) | CLONE_VM | + CLONE_UNTRACED) & ~CSIGNAL), + .exit_signal = (lower_32_bits(flags) & CSIGNAL), + .stack = (unsigned long)fn, + .stack_size = (unsigned long)arg, + .io_thread = 1, + }; + struct task_struct *tsk; + + tsk = copy_process(NULL, 0, node, &args); + if (!IS_ERR(tsk)) { + sigfillset(&tsk->blocked); + sigdelsetmask(&tsk->blocked, sigmask(SIGKILL)); + } + return tsk; +} + +/* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts diff --git a/kernel/groups.c b/kernel/groups.c index fe7e6385530e..787b381c7c00 100644 --- a/kernel/groups.c +++ b/kernel/groups.c @@ -15,12 +15,7 @@ struct group_info *groups_alloc(int gidsetsize) { struct group_info *gi; - unsigned int len; - - len = sizeof(struct group_info) + sizeof(kgid_t) * gidsetsize; - gi = kmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_NOWARN|__GFP_NORETRY); - if (!gi) - gi = __vmalloc(len, GFP_KERNEL_ACCOUNT); + gi = kvmalloc(struct_size(gi, gid, gidsetsize), GFP_KERNEL_ACCOUNT); if (!gi) return NULL; diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 6aacd342cd14..288151393a06 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -205,6 +205,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size, } fwnode_handle_get(fwnode); + fwnode_dev_initialized(fwnode, true); /* Fill structure */ INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); @@ -253,6 +254,7 @@ void irq_domain_remove(struct irq_domain *domain) pr_debug("Removed domain %s\n", domain->name); + fwnode_dev_initialized(domain->fwnode, false); fwnode_handle_put(domain->fwnode); if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED) kfree(domain->name); diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index fe9de067771c..8043a90aa50e 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -177,6 +177,11 @@ unsigned long kallsyms_lookup_name(const char *name) return module_kallsyms_lookup_name(name); } +#ifdef CONFIG_LIVEPATCH +/* + * Iterate over all symbols in vmlinux. For symbols from modules use + * module_kallsyms_on_each_symbol instead. + */ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, unsigned long), void *data) @@ -192,8 +197,9 @@ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, if (ret != 0) return ret; } - return module_kallsyms_on_each_symbol(fn, data); + return 0; } +#endif /* CONFIG_LIVEPATCH */ static unsigned long get_symbol_pos(unsigned long addr, unsigned long *symbolsize, diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h index 39d30ccf8d87..48aaf2ac0d0d 100644 --- a/kernel/kexec_internal.h +++ b/kernel/kexec_internal.h @@ -13,8 +13,6 @@ void kimage_terminate(struct kimage *image); int kimage_is_destination_range(struct kimage *image, unsigned long start, unsigned long end); -int machine_kexec_post_load(struct kimage *image); - extern struct mutex kexec_mutex; #ifdef CONFIG_KEXEC_FILE diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index f76fdb925532..335d988bd811 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -19,6 +19,7 @@ #include <linux/moduleloader.h> #include <linux/completion.h> #include <linux/memory.h> +#include <linux/rcupdate.h> #include <asm/cacheflush.h> #include "core.h" #include "patch.h" @@ -57,7 +58,7 @@ static void klp_find_object_module(struct klp_object *obj) if (!klp_is_module(obj)) return; - mutex_lock(&module_mutex); + rcu_read_lock_sched(); /* * We do not want to block removal of patched modules and therefore * we do not take a reference here. The patches are removed by @@ -74,7 +75,7 @@ static void klp_find_object_module(struct klp_object *obj) if (mod && mod->klp_alive) obj->mod = mod; - mutex_unlock(&module_mutex); + rcu_read_unlock_sched(); } static bool klp_initialized(void) @@ -163,12 +164,10 @@ static int klp_find_object_symbol(const char *objname, const char *name, .pos = sympos, }; - mutex_lock(&module_mutex); if (objname) module_kallsyms_on_each_symbol(klp_find_callback, &args); else kallsyms_on_each_symbol(klp_find_callback, &args); - mutex_unlock(&module_mutex); /* * Ensure an address was found. If sympos is 0, ensure symbol is unique; diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 03b21135313c..48fff6437901 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1420,7 +1420,7 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, } /* - * Performs the wakeup of the the top-waiter and re-enables preemption. + * Performs the wakeup of the top-waiter and re-enables preemption. */ void rt_mutex_postunlock(struct wake_q_head *wake_q) { @@ -1819,7 +1819,7 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) * been started. * @waiter: the pre-initialized rt_mutex_waiter * - * Wait for the the lock acquisition started on our behalf by + * Wait for the lock acquisition started on our behalf by * rt_mutex_start_proxy_lock(). Upon failure, the caller must call * rt_mutex_cleanup_proxy_lock(). * diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index ba67600c7b2c..abba5df50006 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -1048,7 +1048,7 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) /* * If there were already threads queued before us and: - * 1) there are no no active locks, wake the front + * 1) there are no active locks, wake the front * queued process(es) as the handoff bit might be set. * 2) there are no active writers and some readers, the lock * must be read owned; so we try to wake any read lock diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c index d9dd94defc0a..9aa855a96c4a 100644 --- a/kernel/locking/semaphore.c +++ b/kernel/locking/semaphore.c @@ -119,7 +119,7 @@ EXPORT_SYMBOL(down_killable); * @sem: the semaphore to be acquired * * Try to acquire the semaphore atomically. Returns 0 if the semaphore has - * been acquired successfully or 1 if it it cannot be acquired. + * been acquired successfully or 1 if it cannot be acquired. * * NOTE: This return value is inverted from both spin_trylock and * mutex_trylock! Be careful about this when converting code. diff --git a/kernel/module.c b/kernel/module.c index 4bf30e4b3eaa..30479355ab85 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -87,8 +87,7 @@ * 3) module_addr_min/module_addr_max. * (delete and add uses RCU list operations). */ -DEFINE_MUTEX(module_mutex); -EXPORT_SYMBOL_GPL(module_mutex); +static DEFINE_MUTEX(module_mutex); static LIST_HEAD(modules); /* Work queue for freeing init sections in success case */ @@ -256,11 +255,6 @@ static void mod_update_bounds(struct module *mod) struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ #endif /* CONFIG_KGDB_KDB */ -static void module_assert_mutex(void) -{ - lockdep_assert_held(&module_mutex); -} - static void module_assert_mutex_or_preempt(void) { #ifdef CONFIG_LOCKDEP @@ -414,19 +408,8 @@ extern const struct kernel_symbol __start___ksymtab[]; extern const struct kernel_symbol __stop___ksymtab[]; extern const struct kernel_symbol __start___ksymtab_gpl[]; extern const struct kernel_symbol __stop___ksymtab_gpl[]; -extern const struct kernel_symbol __start___ksymtab_gpl_future[]; -extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; extern const s32 __start___kcrctab[]; extern const s32 __start___kcrctab_gpl[]; -extern const s32 __start___kcrctab_gpl_future[]; -#ifdef CONFIG_UNUSED_SYMBOLS -extern const struct kernel_symbol __start___ksymtab_unused[]; -extern const struct kernel_symbol __stop___ksymtab_unused[]; -extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; -extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; -extern const s32 __start___kcrctab_unused[]; -extern const s32 __start___kcrctab_unused_gpl[]; -#endif #ifndef CONFIG_MODVERSIONS #define symversion(base, idx) NULL @@ -434,87 +417,14 @@ extern const s32 __start___kcrctab_unused_gpl[]; #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) #endif -static bool each_symbol_in_section(const struct symsearch *arr, - unsigned int arrsize, - struct module *owner, - bool (*fn)(const struct symsearch *syms, - struct module *owner, - void *data), - void *data) -{ - unsigned int j; - - for (j = 0; j < arrsize; j++) { - if (fn(&arr[j], owner, data)) - return true; - } - - return false; -} - -/* Returns true as soon as fn returns true, otherwise false. */ -static bool each_symbol_section(bool (*fn)(const struct symsearch *arr, - struct module *owner, - void *data), - void *data) -{ - struct module *mod; - static const struct symsearch arr[] = { - { __start___ksymtab, __stop___ksymtab, __start___kcrctab, - NOT_GPL_ONLY, false }, - { __start___ksymtab_gpl, __stop___ksymtab_gpl, - __start___kcrctab_gpl, - GPL_ONLY, false }, - { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future, - __start___kcrctab_gpl_future, - WILL_BE_GPL_ONLY, false }, -#ifdef CONFIG_UNUSED_SYMBOLS - { __start___ksymtab_unused, __stop___ksymtab_unused, - __start___kcrctab_unused, - NOT_GPL_ONLY, true }, - { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl, - __start___kcrctab_unused_gpl, - GPL_ONLY, true }, -#endif - }; - - module_assert_mutex_or_preempt(); - - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) - return true; - - list_for_each_entry_rcu(mod, &modules, list, - lockdep_is_held(&module_mutex)) { - struct symsearch arr[] = { - { mod->syms, mod->syms + mod->num_syms, mod->crcs, - NOT_GPL_ONLY, false }, - { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, - mod->gpl_crcs, - GPL_ONLY, false }, - { mod->gpl_future_syms, - mod->gpl_future_syms + mod->num_gpl_future_syms, - mod->gpl_future_crcs, - WILL_BE_GPL_ONLY, false }, -#ifdef CONFIG_UNUSED_SYMBOLS - { mod->unused_syms, - mod->unused_syms + mod->num_unused_syms, - mod->unused_crcs, - NOT_GPL_ONLY, true }, - { mod->unused_gpl_syms, - mod->unused_gpl_syms + mod->num_unused_gpl_syms, - mod->unused_gpl_crcs, - GPL_ONLY, true }, -#endif - }; - - if (mod->state == MODULE_STATE_UNFORMED) - continue; - - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) - return true; - } - return false; -} +struct symsearch { + const struct kernel_symbol *start, *stop; + const s32 *crcs; + enum mod_license { + NOT_GPL_ONLY, + GPL_ONLY, + } license; +}; struct find_symbol_arg { /* Input */ @@ -535,28 +445,8 @@ static bool check_exported_symbol(const struct symsearch *syms, { struct find_symbol_arg *fsa = data; - if (!fsa->gplok) { - if (syms->license == GPL_ONLY) - return false; - if (syms->license == WILL_BE_GPL_ONLY && fsa->warn) { - pr_warn("Symbol %s is being used by a non-GPL module, " - "which will not be allowed in the future\n", - fsa->name); - } - } - -#ifdef CONFIG_UNUSED_SYMBOLS - if (syms->unused && fsa->warn) { - pr_warn("Symbol %s is marked as UNUSED, however this module is " - "using it.\n", fsa->name); - pr_warn("This symbol will go away in the future.\n"); - pr_warn("Please evaluate if this is the right api to use and " - "if it really is, submit a report to the linux kernel " - "mailing list together with submitting your code for " - "inclusion.\n"); - } -#endif - + if (!fsa->gplok && syms->license == GPL_ONLY) + return false; fsa->owner = owner; fsa->crc = symversion(syms->crcs, symnum); fsa->sym = &syms->start[symnum]; @@ -619,31 +509,44 @@ static bool find_exported_symbol_in_section(const struct symsearch *syms, * Find an exported symbol and return it, along with, (optional) crc and * (optional) module which owns it. Needs preempt disabled or module_mutex. */ -static const struct kernel_symbol *find_symbol(const char *name, - struct module **owner, - const s32 **crc, - enum mod_license *license, - bool gplok, - bool warn) -{ - struct find_symbol_arg fsa; - - fsa.name = name; - fsa.gplok = gplok; - fsa.warn = warn; - - if (each_symbol_section(find_exported_symbol_in_section, &fsa)) { - if (owner) - *owner = fsa.owner; - if (crc) - *crc = fsa.crc; - if (license) - *license = fsa.license; - return fsa.sym; +static bool find_symbol(struct find_symbol_arg *fsa) +{ + static const struct symsearch arr[] = { + { __start___ksymtab, __stop___ksymtab, __start___kcrctab, + NOT_GPL_ONLY }, + { __start___ksymtab_gpl, __stop___ksymtab_gpl, + __start___kcrctab_gpl, + GPL_ONLY }, + }; + struct module *mod; + unsigned int i; + + module_assert_mutex_or_preempt(); + + for (i = 0; i < ARRAY_SIZE(arr); i++) + if (find_exported_symbol_in_section(&arr[i], NULL, fsa)) + return true; + + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { + struct symsearch arr[] = { + { mod->syms, mod->syms + mod->num_syms, mod->crcs, + NOT_GPL_ONLY }, + { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, + mod->gpl_crcs, + GPL_ONLY }, + }; + + if (mod->state == MODULE_STATE_UNFORMED) + continue; + + for (i = 0; i < ARRAY_SIZE(arr); i++) + if (find_exported_symbol_in_section(&arr[i], mod, fsa)) + return true; } - pr_debug("Failed to find symbol %s\n", name); - return NULL; + pr_debug("Failed to find symbol %s\n", fsa->name); + return false; } /* @@ -669,10 +572,8 @@ static struct module *find_module_all(const char *name, size_t len, struct module *find_module(const char *name) { - module_assert_mutex(); return find_module_all(name, strlen(name), false); } -EXPORT_SYMBOL_GPL(find_module); #ifdef CONFIG_SMP @@ -1107,12 +1008,15 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod) void __symbol_put(const char *symbol) { - struct module *owner; + struct find_symbol_arg fsa = { + .name = symbol, + .gplok = true, + }; preempt_disable(); - if (!find_symbol(symbol, &owner, NULL, NULL, true, false)) + if (!find_symbol(&fsa)) BUG(); - module_put(owner); + module_put(fsa.owner); preempt_enable(); } EXPORT_SYMBOL(__symbol_put); @@ -1381,19 +1285,22 @@ bad_version: static inline int check_modstruct_version(const struct load_info *info, struct module *mod) { - const s32 *crc; + struct find_symbol_arg fsa = { + .name = "module_layout", + .gplok = true, + }; /* * Since this should be found in kernel (which can't be removed), no * locking is necessary -- use preempt_disable() to placate lockdep. */ preempt_disable(); - if (!find_symbol("module_layout", NULL, &crc, NULL, true, false)) { + if (!find_symbol(&fsa)) { preempt_enable(); BUG(); } preempt_enable(); - return check_version(info, "module_layout", mod, crc); + return check_version(info, "module_layout", mod, fsa.crc); } /* First part is kernel version, which we ignore if module has crcs. */ @@ -1487,10 +1394,11 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, const char *name, char ownername[]) { - struct module *owner; - const struct kernel_symbol *sym; - const s32 *crc; - enum mod_license license; + struct find_symbol_arg fsa = { + .name = name, + .gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), + .warn = true, + }; int err; /* @@ -1500,42 +1408,40 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod, */ sched_annotate_sleep(); mutex_lock(&module_mutex); - sym = find_symbol(name, &owner, &crc, &license, - !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true); - if (!sym) + if (!find_symbol(&fsa)) goto unlock; - if (license == GPL_ONLY) + if (fsa.license == GPL_ONLY) mod->using_gplonly_symbols = true; - if (!inherit_taint(mod, owner)) { - sym = NULL; + if (!inherit_taint(mod, fsa.owner)) { + fsa.sym = NULL; goto getname; } - if (!check_version(info, name, mod, crc)) { - sym = ERR_PTR(-EINVAL); + if (!check_version(info, name, mod, fsa.crc)) { + fsa.sym = ERR_PTR(-EINVAL); goto getname; } - err = verify_namespace_is_imported(info, sym, mod); + err = verify_namespace_is_imported(info, fsa.sym, mod); if (err) { - sym = ERR_PTR(err); + fsa.sym = ERR_PTR(err); goto getname; } - err = ref_module(mod, owner); + err = ref_module(mod, fsa.owner); if (err) { - sym = ERR_PTR(err); + fsa.sym = ERR_PTR(err); goto getname; } getname: /* We must make copy under the lock if we failed to get ref. */ - strncpy(ownername, module_name(owner), MODULE_NAME_LEN); + strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN); unlock: mutex_unlock(&module_mutex); - return sym; + return fsa.sym; } static const struct kernel_symbol * @@ -2296,16 +2202,19 @@ static void free_module(struct module *mod) void *__symbol_get(const char *symbol) { - struct module *owner; - const struct kernel_symbol *sym; + struct find_symbol_arg fsa = { + .name = symbol, + .gplok = true, + .warn = true, + }; preempt_disable(); - sym = find_symbol(symbol, &owner, NULL, NULL, true, true); - if (sym && strong_try_module_get(owner)) - sym = NULL; + if (!find_symbol(&fsa) || strong_try_module_get(fsa.owner)) { + preempt_enable(); + return NULL; + } preempt_enable(); - - return sym ? (void *)kernel_symbol_value(sym) : NULL; + return (void *)kernel_symbol_value(fsa.sym); } EXPORT_SYMBOL_GPL(__symbol_get); @@ -2318,7 +2227,6 @@ EXPORT_SYMBOL_GPL(__symbol_get); static int verify_exported_symbols(struct module *mod) { unsigned int i; - struct module *owner; const struct kernel_symbol *s; struct { const struct kernel_symbol *sym; @@ -2326,21 +2234,19 @@ static int verify_exported_symbols(struct module *mod) } arr[] = { { mod->syms, mod->num_syms }, { mod->gpl_syms, mod->num_gpl_syms }, - { mod->gpl_future_syms, mod->num_gpl_future_syms }, -#ifdef CONFIG_UNUSED_SYMBOLS - { mod->unused_syms, mod->num_unused_syms }, - { mod->unused_gpl_syms, mod->num_unused_gpl_syms }, -#endif }; for (i = 0; i < ARRAY_SIZE(arr); i++) { for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { - if (find_symbol(kernel_symbol_name(s), &owner, NULL, - NULL, true, false)) { + struct find_symbol_arg fsa = { + .name = kernel_symbol_name(s), + .gplok = true, + }; + if (find_symbol(&fsa)) { pr_err("%s: exports duplicate symbol %s" " (owned by %s)\n", mod->name, kernel_symbol_name(s), - module_name(owner)); + module_name(fsa.owner)); return -ENOEXEC; } } @@ -2348,6 +2254,21 @@ static int verify_exported_symbols(struct module *mod) return 0; } +static bool ignore_undef_symbol(Elf_Half emachine, const char *name) +{ + /* + * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as + * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64. + * i386 has a similar problem but may not deserve a fix. + * + * If we ever have to ignore many symbols, consider refactoring the code to + * only warn if referenced by a relocation. + */ + if (emachine == EM_386 || emachine == EM_X86_64) + return !strcmp(name, "_GLOBAL_OFFSET_TABLE_"); + return false; +} + /* Change all symbols so that st_value encodes the pointer directly. */ static int simplify_symbols(struct module *mod, const struct load_info *info) { @@ -2395,8 +2316,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) break; } - /* Ok if weak. */ - if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK) + /* Ok if weak or ignored. */ + if (!ksym && + (ELF_ST_BIND(sym[i].st_info) == STB_WEAK || + ignore_undef_symbol(info->hdr->e_machine, name))) break; ret = PTR_ERR(ksym) ?: -ENOENT; @@ -2964,7 +2887,7 @@ static int module_sig_check(struct load_info *info, int flags) } if (is_module_sig_enforced()) { - pr_notice("%s: loading of %s is rejected\n", info->name, reason); + pr_notice("Loading of %s is rejected\n", reason); return -EKEYREJECTED; } @@ -2977,9 +2900,33 @@ static int module_sig_check(struct load_info *info, int flags) } #endif /* !CONFIG_MODULE_SIG */ -/* Sanity checks against invalid binaries, wrong arch, weird elf version. */ -static int elf_header_check(struct load_info *info) +static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) +{ + unsigned long secend; + + /* + * Check for both overflow and offset/size being + * too large. + */ + secend = shdr->sh_offset + shdr->sh_size; + if (secend < shdr->sh_offset || secend > info->len) + return -ENOEXEC; + + return 0; +} + +/* + * Sanity checks against invalid binaries, wrong arch, weird elf version. + * + * Also do basic validity checks against section offsets and sizes, the + * section name string table, and the indices used for it (sh_name). + */ +static int elf_validity_check(struct load_info *info) { + unsigned int i; + Elf_Shdr *shdr, *strhdr; + int err; + if (info->len < sizeof(*(info->hdr))) return -ENOEXEC; @@ -2989,11 +2936,78 @@ static int elf_header_check(struct load_info *info) || info->hdr->e_shentsize != sizeof(Elf_Shdr)) return -ENOEXEC; + /* + * e_shnum is 16 bits, and sizeof(Elf_Shdr) is + * known and small. So e_shnum * sizeof(Elf_Shdr) + * will not overflow unsigned long on any platform. + */ if (info->hdr->e_shoff >= info->len || (info->hdr->e_shnum * sizeof(Elf_Shdr) > info->len - info->hdr->e_shoff)) return -ENOEXEC; + info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; + + /* + * Verify if the section name table index is valid. + */ + if (info->hdr->e_shstrndx == SHN_UNDEF + || info->hdr->e_shstrndx >= info->hdr->e_shnum) + return -ENOEXEC; + + strhdr = &info->sechdrs[info->hdr->e_shstrndx]; + err = validate_section_offset(info, strhdr); + if (err < 0) + return err; + + /* + * The section name table must be NUL-terminated, as required + * by the spec. This makes strcmp and pr_* calls that access + * strings in the section safe. + */ + info->secstrings = (void *)info->hdr + strhdr->sh_offset; + if (info->secstrings[strhdr->sh_size - 1] != '\0') + return -ENOEXEC; + + /* + * The code assumes that section 0 has a length of zero and + * an addr of zero, so check for it. + */ + if (info->sechdrs[0].sh_type != SHT_NULL + || info->sechdrs[0].sh_size != 0 + || info->sechdrs[0].sh_addr != 0) + return -ENOEXEC; + + for (i = 1; i < info->hdr->e_shnum; i++) { + shdr = &info->sechdrs[i]; + switch (shdr->sh_type) { + case SHT_NULL: + case SHT_NOBITS: + continue; + case SHT_SYMTAB: + if (shdr->sh_link == SHN_UNDEF + || shdr->sh_link >= info->hdr->e_shnum) + return -ENOEXEC; + fallthrough; + default: + err = validate_section_offset(info, shdr); + if (err < 0) { + pr_err("Invalid ELF section in module (section %u type %u)\n", + i, shdr->sh_type); + return err; + } + + if (shdr->sh_flags & SHF_ALLOC) { + if (shdr->sh_name >= strhdr->sh_size) { + pr_err("Invalid ELF section name in module (section %u type %u)\n", + i, shdr->sh_type); + return -ENOEXEC; + } + } + break; + } + } + return 0; } @@ -3095,11 +3109,6 @@ static int rewrite_section_headers(struct load_info *info, int flags) for (i = 1; i < info->hdr->e_shnum; i++) { Elf_Shdr *shdr = &info->sechdrs[i]; - if (shdr->sh_type != SHT_NOBITS - && info->len < shdr->sh_offset + shdr->sh_size) { - pr_err("Module len %lu truncated\n", info->len); - return -ENOEXEC; - } /* * Mark all sections sh_addr with their address in the @@ -3133,11 +3142,6 @@ static int setup_load_info(struct load_info *info, int flags) { unsigned int i; - /* Set up the convenience variables */ - info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; - info->secstrings = (void *)info->hdr - + info->sechdrs[info->hdr->e_shstrndx].sh_offset; - /* Try to find a name early so we can log errors with a module name */ info->index.info = find_sec(info, ".modinfo"); if (info->index.info) @@ -3241,22 +3245,7 @@ static int find_module_sections(struct module *mod, struct load_info *info) sizeof(*mod->gpl_syms), &mod->num_gpl_syms); mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); - mod->gpl_future_syms = section_objs(info, - "__ksymtab_gpl_future", - sizeof(*mod->gpl_future_syms), - &mod->num_gpl_future_syms); - mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future"); - -#ifdef CONFIG_UNUSED_SYMBOLS - mod->unused_syms = section_objs(info, "__ksymtab_unused", - sizeof(*mod->unused_syms), - &mod->num_unused_syms); - mod->unused_crcs = section_addr(info, "__kcrctab_unused"); - mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl", - sizeof(*mod->unused_gpl_syms), - &mod->num_unused_gpl_syms); - mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl"); -#endif + #ifdef CONFIG_CONSTRUCTORS mod->ctors = section_objs(info, ".ctors", sizeof(*mod->ctors), &mod->num_ctors); @@ -3437,14 +3426,8 @@ static int check_module_license_and_versions(struct module *mod) pr_warn("%s: module license taints kernel.\n", mod->name); #ifdef CONFIG_MODVERSIONS - if ((mod->num_syms && !mod->crcs) - || (mod->num_gpl_syms && !mod->gpl_crcs) - || (mod->num_gpl_future_syms && !mod->gpl_future_crcs) -#ifdef CONFIG_UNUSED_SYMBOLS - || (mod->num_unused_syms && !mod->unused_crcs) - || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs) -#endif - ) { + if ((mod->num_syms && !mod->crcs) || + (mod->num_gpl_syms && !mod->gpl_crcs)) { return try_to_force_load(mod, "no versions for exported symbols"); } @@ -3894,26 +3877,50 @@ static int load_module(struct load_info *info, const char __user *uargs, long err = 0; char *after_dashes; - err = elf_header_check(info); + /* + * Do the signature check (if any) first. All that + * the signature check needs is info->len, it does + * not need any of the section info. That can be + * set up later. This will minimize the chances + * of a corrupt module causing problems before + * we even get to the signature check. + * + * The check will also adjust info->len by stripping + * off the sig length at the end of the module, making + * checks against info->len more correct. + */ + err = module_sig_check(info, flags); + if (err) + goto free_copy; + + /* + * Do basic sanity checks against the ELF header and + * sections. + */ + err = elf_validity_check(info); if (err) { - pr_err("Module has invalid ELF header\n"); + pr_err("Module has invalid ELF structures\n"); goto free_copy; } + /* + * Everything checks out, so set up the section info + * in the info structure. + */ err = setup_load_info(info, flags); if (err) goto free_copy; + /* + * Now that we know we have the correct module name, check + * if it's blacklisted. + */ if (blacklisted(info->name)) { err = -EPERM; pr_err("Module %s is blacklisted\n", info->name); goto free_copy; } - err = module_sig_check(info, flags); - if (err) - goto free_copy; - err = rewrite_section_headers(info, flags); if (err) goto free_copy; @@ -4374,16 +4381,16 @@ unsigned long module_kallsyms_lookup_name(const char *name) return ret; } +#ifdef CONFIG_LIVEPATCH int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, unsigned long), void *data) { struct module *mod; unsigned int i; - int ret; - - module_assert_mutex(); + int ret = 0; + mutex_lock(&module_mutex); list_for_each_entry(mod, &modules, list) { /* We hold module_mutex: no need for rcu_dereference_sched */ struct mod_kallsyms *kallsyms = mod->kallsyms; @@ -4399,11 +4406,13 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, ret = fn(data, kallsyms_symbol_name(kallsyms, i), mod, kallsyms_symbol_value(sym)); if (ret != 0) - return ret; + break; } } - return 0; + mutex_unlock(&module_mutex); + return ret; } +#endif /* CONFIG_LIVEPATCH */ #endif /* CONFIG_KALLSYMS */ /* Maximum number of characters written by module_flags() */ diff --git a/kernel/module_signature.c b/kernel/module_signature.c index 4224a1086b7d..00132d12487c 100644 --- a/kernel/module_signature.c +++ b/kernel/module_signature.c @@ -25,7 +25,7 @@ int mod_check_sig(const struct module_signature *ms, size_t file_len, return -EBADMSG; if (ms->id_type != PKEY_ID_PKCS7) { - pr_err("%s: Module is not signed with expected PKCS#7 message\n", + pr_err("%s: not signed with expected PKCS#7 message\n", name); return -ENOPKG; } diff --git a/kernel/module_signing.c b/kernel/module_signing.c index 9d9fc678c91d..8723ae70ea1f 100644 --- a/kernel/module_signing.c +++ b/kernel/module_signing.c @@ -30,7 +30,7 @@ int mod_verify_sig(const void *mod, struct load_info *info) memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms)); - ret = mod_check_sig(&ms, modlen, info->name); + ret = mod_check_sig(&ms, modlen, "module"); if (ret) return ret; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 61db50f7ca86..821cf1723814 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -375,7 +375,7 @@ static int ptrace_attach(struct task_struct *task, long request, audit_ptrace(task); retval = -EPERM; - if (unlikely(task->flags & PF_KTHREAD)) + if (unlikely(task->flags & (PF_KTHREAD | PF_IO_WORKER))) goto out; if (same_thread_group(task, current)) goto out; diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 41e498b0008a..50cbad89f7fa 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -26,7 +26,7 @@ struct sugov_policy { struct sugov_tunables *tunables; struct list_head tunables_hook; - raw_spinlock_t update_lock; /* For shared policies */ + raw_spinlock_t update_lock; u64 last_freq_update_time; s64 freq_update_delay_ns; unsigned int next_freq; @@ -320,23 +320,21 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } * Make sugov_should_update_freq() ignore the rate limit when DL * has increased the utilization. */ -static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy) +static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) { if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) - sg_policy->limits_changed = true; + sg_cpu->sg_policy->limits_changed = true; } static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, u64 time, unsigned int flags) { - struct sugov_policy *sg_policy = sg_cpu->sg_policy; - sugov_iowait_boost(sg_cpu, time, flags); sg_cpu->last_update = time; - ignore_dl_rate_limit(sg_cpu, sg_policy); + ignore_dl_rate_limit(sg_cpu); - if (!sugov_should_update_freq(sg_policy, time)) + if (!sugov_should_update_freq(sg_cpu->sg_policy, time)) return false; sugov_get_util(sg_cpu); @@ -451,7 +449,7 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) sugov_iowait_boost(sg_cpu, time, flags); sg_cpu->last_update = time; - ignore_dl_rate_limit(sg_cpu, sg_policy); + ignore_dl_rate_limit(sg_cpu); if (sugov_should_update_freq(sg_policy, time)) { next_f = sugov_next_freq_shared(sg_cpu, time); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8a8bd7b13634..794c2cb945f8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5126,7 +5126,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) /* * When a group wakes up we want to make sure that its quota is not already * expired/exceeded, otherwise it may be allowed to steal additional ticks of - * runtime as update_curr() throttling can not not trigger until it's on-rq. + * runtime as update_curr() throttling can not trigger until it's on-rq. */ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) { diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index 08ae45ad9261..acdae625c636 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c @@ -454,7 +454,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) /* * For each cpu runqueue, if the task's mm match @mm, ensure that all - * @mm's membarrier state set bits are also set in in the runqueue's + * @mm's membarrier state set bits are also set in the runqueue's * membarrier state. This ensures that a runqueue scheduling * between threads which are users of @mm has its membarrier state * updated. diff --git a/kernel/signal.c b/kernel/signal.c index 5ad8566534e7..ba4d1ef39a9e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -91,7 +91,7 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force) return true; /* Only allow kernel generated signals to this kthread */ - if (unlikely((t->flags & PF_KTHREAD) && + if (unlikely((t->flags & (PF_KTHREAD | PF_IO_WORKER)) && (handler == SIG_KTHREAD_KERNEL) && !force)) return true; @@ -1096,7 +1096,7 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc /* * Skip useless siginfo allocation for SIGKILL and kernel threads. */ - if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) + if ((sig == SIGKILL) || (t->flags & (PF_KTHREAD | PF_IO_WORKER))) goto out_set; /* diff --git a/kernel/softirq.c b/kernel/softirq.c index 9d71046ea247..9908ec4a9bfe 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -26,6 +26,8 @@ #include <linux/tick.h> #include <linux/irq.h> +#include <asm/softirq_stack.h> + #define CREATE_TRACE_POINTS #include <trace/events/irq.h> diff --git a/kernel/sys.c b/kernel/sys.c index 6928d23c46ea..b09fe21e88ff 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1242,7 +1242,7 @@ static int override_release(char __user *release, size_t len) break; rest++; } - v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60; + v = LINUX_VERSION_PATCHLEVEL + 60; copy = clamp_t(size_t, len, 1, sizeof(buf)); copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); ret = copy_to_user(release, buf, copy + 1); @@ -1847,7 +1847,7 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path)) goto exit; - err = inode_permission(inode, MAY_EXEC); + err = file_permission(exe.file, MAY_EXEC); if (err) goto exit; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index c9fbdd848138..62fbd09b5dc1 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2962,7 +2962,7 @@ static struct ctl_table vm_table[] = { .data = &block_dump, .maxlen = sizeof(block_dump), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, { @@ -2970,7 +2970,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_vfs_cache_pressure, .maxlen = sizeof(sysctl_vfs_cache_pressure), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ @@ -2980,7 +2980,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_legacy_va_layout, .maxlen = sizeof(sysctl_legacy_va_layout), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, #endif @@ -2990,7 +2990,7 @@ static struct ctl_table vm_table[] = { .data = &node_reclaim_mode, .maxlen = sizeof(node_reclaim_mode), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, { diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 799dbcfe65ad..7fa82778c3e6 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -60,6 +60,11 @@ config HAVE_NOP_MCOUNT help Arch supports the gcc options -pg with -mrecord-mcount and -nop-mcount +config HAVE_OBJTOOL_MCOUNT + bool + help + Arch supports objtool --mcount + config HAVE_C_RECORDMCOUNT bool help @@ -602,6 +607,30 @@ config FTRACE_MCOUNT_RECORD depends on DYNAMIC_FTRACE depends on HAVE_FTRACE_MCOUNT_RECORD +config FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY + bool + depends on FTRACE_MCOUNT_RECORD + +config FTRACE_MCOUNT_USE_CC + def_bool y + depends on $(cc-option,-mrecord-mcount) + depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY + depends on FTRACE_MCOUNT_RECORD + +config FTRACE_MCOUNT_USE_OBJTOOL + def_bool y + depends on HAVE_OBJTOOL_MCOUNT + depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY + depends on !FTRACE_MCOUNT_USE_CC + depends on FTRACE_MCOUNT_RECORD + +config FTRACE_MCOUNT_USE_RECORDMCOUNT + def_bool y + depends on !FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY + depends on !FTRACE_MCOUNT_USE_CC + depends on !FTRACE_MCOUNT_USE_OBJTOOL + depends on FTRACE_MCOUNT_RECORD + config TRACING_MAP bool depends on ARCH_HAVE_NMI_SAFE_CMPXCHG @@ -665,7 +694,7 @@ config TRACEPOINT_BENCHMARK help This option creates the tracepoint "benchmark:benchmark_event". When the tracepoint is enabled, it kicks off a kernel thread that - goes into an infinite loop (calling cond_sched() to let other tasks + goes into an infinite loop (calling cond_resched() to let other tasks run), and calls the tracepoint. Each iteration will record the time it took to write to the tracepoint and the next iteration that data will be passed to the tracepoint itself. That is, the tracepoint diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 7e44cea89fdc..b28d3e5013cd 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -81,6 +81,7 @@ obj-$(CONFIG_SYNTH_EVENTS) += trace_events_synth.o obj-$(CONFIG_HIST_TRIGGERS) += trace_events_hist.o obj-$(CONFIG_BPF_EVENTS) += bpf_trace.o obj-$(CONFIG_KPROBE_EVENTS) += trace_kprobe.o +obj-$(CONFIG_TRACEPOINTS) += error_report-traces.o obj-$(CONFIG_TRACEPOINTS) += power-traces.o ifeq ($(CONFIG_PM),y) obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index c286c13bd31a..c221e4c3f625 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c @@ -312,8 +312,6 @@ record_it: static void blk_trace_free(struct blk_trace *bt) { - debugfs_remove(bt->msg_file); - debugfs_remove(bt->dropped_file); relay_close(bt->rchan); debugfs_remove(bt->dir); free_percpu(bt->sequence); @@ -545,10 +543,8 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, INIT_LIST_HEAD(&bt->running_list); ret = -EIO; - bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, - &blk_dropped_fops); - - bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops); + debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); + debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops); bt->rchan = relay_open("trace", dir, buts->buf_size, buts->buf_nr, &blk_relay_callbacks, bt); @@ -1868,7 +1864,17 @@ void blk_trace_remove_sysfs(struct device *dev) #ifdef CONFIG_EVENT_TRACING -void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes) +/** + * blk_fill_rwbs - Fill the buffer rwbs by mapping op to character string. + * @rwbs: buffer to be filled + * @op: REQ_OP_XXX for the tracepoint + * + * Description: + * Maps the REQ_OP_XXX to character and fills the buffer provided by the + * caller with resulting string. + * + **/ +void blk_fill_rwbs(char *rwbs, unsigned int op) { int i = 0; diff --git a/kernel/trace/error_report-traces.c b/kernel/trace/error_report-traces.c new file mode 100644 index 000000000000..f89792c25b11 --- /dev/null +++ b/kernel/trace/error_report-traces.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Error reporting trace points. + * + * Copyright (C) 2021, Google LLC. + */ + +#define CREATE_TRACE_POINTS +#include <trace/events/error_report.h> + +EXPORT_TRACEPOINT_SYMBOL_GPL(error_report_end); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index b9dad3500041..68744c51517e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2815,6 +2815,17 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, return 0; /* + * It's possible that the event time delta is zero + * (has the same time stamp as the previous event) + * in which case write_stamp and before_stamp could + * be the same. In such a case, force before_stamp + * to be different than write_stamp. It doesn't + * matter what it is, as long as its different. + */ + if (!delta) + rb_time_set(&cpu_buffer->before_stamp, 0); + + /* * If an event were to come in now, it would see that the * write_stamp and the before_stamp are different, and assume * that this event just added itself before updating @@ -3307,9 +3318,13 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, goto out; } atomic_inc(&cpu_buffer->record_disabled); - pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld after:%lld\n", - cpu_buffer->cpu, - ts + info->delta, info->ts, info->delta, info->after); + /* There's some cases in boot up that this can happen */ + WARN_ON_ONCE(system_state != SYSTEM_BOOTING); + pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n", + cpu_buffer->cpu, + ts + info->delta, info->ts, info->delta, + info->before, info->after, + full ? " (full)" : ""); dump_buffer_page(bpage, info, tail); atomic_dec(&ts_dump); /* Do not re-enable checking */ diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e295c413580e..eccb4e1187cc 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1929,6 +1929,12 @@ static int run_tracer_selftest(struct tracer *type) if (!selftests_can_run) return save_selftest(type); + if (!tracing_is_on()) { + pr_warn("Selftest for tracer %s skipped due to tracing disabled\n", + type->name); + return 0; + } + /* * Run a selftest on this tracer. * Here we reset the trace buffer, and set the current diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index dec13ff66077..a6446c03cfbc 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -605,7 +605,6 @@ void trace_graph_function(struct trace_array *tr, void trace_latency_header(struct seq_file *m); void trace_default_header(struct seq_file *m); void print_trace_header(struct seq_file *m, struct trace_iterator *iter); -int trace_empty(struct trace_iterator *iter); void trace_graph_return(struct ftrace_graph_ret *trace); int trace_graph_entry(struct ftrace_graph_ent *trace); diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 2979a96595b4..8d71e6c83f10 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -1225,8 +1225,10 @@ static int __create_synth_event(const char *name, const char *raw_fields) goto err; } - if (!argc) + if (!argc) { + argv_free(argv); continue; + } n_fields_this_loop = 0; consumed = 0; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8a1cb0878cbc..6fe770d86dc3 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -124,9 +124,9 @@ static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk) if (!p) return true; *p = '\0'; - mutex_lock(&module_mutex); + rcu_read_lock_sched(); ret = !!find_module(tk->symbol); - mutex_unlock(&module_mutex); + rcu_read_unlock_sched(); *p = ':'; return ret; diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index 0ef8f65bd2d7..9c9eb20dd2c5 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -413,7 +413,7 @@ static void put_watch(struct watch *watch) } /** - * init_watch_queue - Initialise a watch + * init_watch - Initialise a watch * @watch: The watch to initialise. * @wqueue: The queue to assign. * |