diff options
31 files changed, 185 insertions, 154 deletions
@@ -691,6 +691,8 @@ Sachin Mokashi <sachin.mokashi@intel.com> <sachinx.mokashi@intel.com> Sachin P Sant <ssant@in.ibm.com> Sai Prakash Ranjan <quic_saipraka@quicinc.com> <saiprakash.ranjan@codeaurora.org> Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi> +Sam Protsenko <semen.protsenko@linaro.org> +Sam Protsenko <semen.protsenko@linaro.org> <semen.protsenko@globallogic.com> Sam Ravnborg <sam@mars.ravnborg.org> Sankeerth Billakanti <quic_sbillaka@quicinc.com> <sbillaka@codeaurora.org> Santosh Shilimkar <santosh.shilimkar@oracle.org> diff --git a/MAINTAINERS b/MAINTAINERS index 6df89b14b521..047d242faf68 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -13799,6 +13799,7 @@ F: Documentation/admin-guide/mm/kho.rst F: Documentation/core-api/kho/* F: include/linux/kexec_handover.h F: kernel/kexec_handover.c +F: lib/test_kho.c F: tools/testing/selftests/kho/ KEYS-ENCRYPTED diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index c022c1acb8c7..f1cb2447afc9 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -351,16 +351,6 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) * as long as we take care not to create a writable * mapping for executable code. */ - fallthrough; - - case EFI_ACPI_MEMORY_NVS: - /* - * ACPI NVS marks an area reserved for use by the - * firmware, even after exiting the boot service. - * This may be used by the firmware for sharing dynamic - * tables/data (e.g., ACPI CCEL) with the OS. Map it - * as read-only. - */ prot = PAGE_KERNEL_RO; break; diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index c7d70d04c164..80a580e019c5 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -1032,6 +1032,8 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) { /* No point mitigating Spectre-BHB alone. */ + } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) { + /* Do nothing */ } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) { state = SPECTRE_MITIGATED; set_bit(BHB_HW, &system_bhb_mitigations); diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c index fd995a1d3d24..8cc8af8fd408 100644 --- a/drivers/acpi/arm64/gtdt.c +++ b/drivers/acpi/arm64/gtdt.c @@ -430,10 +430,10 @@ static int __init gtdt_platform_timer_init(void) continue; pdev = platform_device_register_data(NULL, "gtdt-arm-mmio-timer", - gwdt_count, &atm, + mmio_timer_count, &atm, sizeof(atm)); if (IS_ERR(pdev)) { - pr_err("Can't register timer %d\n", gwdt_count); + pr_err("Can't register timer %d\n", mmio_timer_count); continue; } diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 5d824435b26b..7644de24d2fa 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -166,7 +166,8 @@ static int __acpi_processor_start(struct acpi_device *device) if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS)) dev_dbg(&device->dev, "CPPC data invalid or not present\n"); - acpi_processor_power_init(pr); + if (cpuidle_get_driver() == &acpi_idle_driver) + acpi_processor_power_init(pr); acpi_pss_perf_init(pr); @@ -262,8 +263,6 @@ static int __init acpi_processor_driver_init(void) if (result < 0) return result; - acpi_processor_register_idle_driver(); - result = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "acpi/cpu-drv:online", acpi_soft_cpu_online, NULL); @@ -302,7 +301,6 @@ static void __exit acpi_processor_driver_exit(void) cpuhp_remove_state_nocalls(hp_online); cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD); - acpi_processor_unregister_idle_driver(); driver_unregister(&acpi_processor_driver); } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 22b051b94a86..4166090db642 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -51,7 +51,7 @@ module_param(latency_factor, uint, 0644); static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); -static struct cpuidle_driver acpi_idle_driver = { +struct cpuidle_driver acpi_idle_driver = { .name = "acpi_idle", .owner = THIS_MODULE, }; @@ -1357,102 +1357,79 @@ int acpi_processor_power_state_has_changed(struct acpi_processor *pr) return 0; } -void acpi_processor_register_idle_driver(void) -{ - struct acpi_processor *pr; - int ret = -ENODEV; - int cpu; - - /* - * Acpi idle driver is used by all possible CPUs. - * Install the idle handler by the processor power info of one in them. - * Note that we use previously set idle handler will be used on - * platforms that only support C1. - */ - for_each_cpu(cpu, (struct cpumask *)cpu_possible_mask) { - pr = per_cpu(processors, cpu); - if (!pr) - continue; - - ret = acpi_processor_get_power_info(pr); - if (!ret) { - pr->flags.power_setup_done = 1; - acpi_processor_setup_cpuidle_states(pr); - break; - } - } - - if (ret) { - pr_debug("No ACPI power information from any CPUs.\n"); - return; - } +static int acpi_processor_registered; - ret = cpuidle_register_driver(&acpi_idle_driver); - if (ret) { - pr_debug("register %s failed.\n", acpi_idle_driver.name); - return; - } - pr_debug("%s registered with cpuidle.\n", acpi_idle_driver.name); -} - -void acpi_processor_unregister_idle_driver(void) -{ - cpuidle_unregister_driver(&acpi_idle_driver); -} - -void acpi_processor_power_init(struct acpi_processor *pr) +int acpi_processor_power_init(struct acpi_processor *pr) { + int retval; struct cpuidle_device *dev; - /* - * The code below only works if the current cpuidle driver is the ACPI - * idle driver. - */ - if (cpuidle_get_driver() != &acpi_idle_driver) - return; - if (disabled_by_idle_boot_param()) - return; + return 0; acpi_processor_cstate_first_run_checks(); if (!acpi_processor_get_power_info(pr)) pr->flags.power_setup_done = 1; - if (!pr->flags.power) - return; - - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return; + /* + * Install the idle handler if processor power management is supported. + * Note that we use previously set idle handler will be used on + * platforms that only support C1. + */ + if (pr->flags.power) { + /* Register acpi_idle_driver if not already registered */ + if (!acpi_processor_registered) { + acpi_processor_setup_cpuidle_states(pr); + retval = cpuidle_register_driver(&acpi_idle_driver); + if (retval) + return retval; + pr_debug("%s registered with cpuidle\n", + acpi_idle_driver.name); + } - per_cpu(acpi_cpuidle_device, pr->id) = dev; + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + per_cpu(acpi_cpuidle_device, pr->id) = dev; - acpi_processor_setup_cpuidle_dev(pr, dev); + acpi_processor_setup_cpuidle_dev(pr, dev); - /* - * Register a cpuidle device for this CPU. The cpuidle driver using - * this device is expected to be registered. - */ - if (cpuidle_register_device(dev)) { - per_cpu(acpi_cpuidle_device, pr->id) = NULL; - kfree(dev); + /* Register per-cpu cpuidle_device. Cpuidle driver + * must already be registered before registering device + */ + retval = cpuidle_register_device(dev); + if (retval) { + if (acpi_processor_registered == 0) + cpuidle_unregister_driver(&acpi_idle_driver); + + per_cpu(acpi_cpuidle_device, pr->id) = NULL; + kfree(dev); + return retval; + } + acpi_processor_registered++; } + return 0; } -void acpi_processor_power_exit(struct acpi_processor *pr) +int acpi_processor_power_exit(struct acpi_processor *pr) { struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); if (disabled_by_idle_boot_param()) - return; + return 0; if (pr->flags.power) { cpuidle_unregister_device(dev); + acpi_processor_registered--; + if (acpi_processor_registered == 0) + cpuidle_unregister_driver(&acpi_idle_driver); + kfree(dev); } pr->flags.power_setup_done = 0; + return 0; } MODULE_IMPORT_NS("ACPI_PROCESSOR_IDLE"); diff --git a/drivers/iommu/iommufd/driver.c b/drivers/iommu/iommufd/driver.c index 6f1010da221c..21d4a35538f6 100644 --- a/drivers/iommu/iommufd/driver.c +++ b/drivers/iommu/iommufd/driver.c @@ -161,8 +161,8 @@ int iommufd_viommu_report_event(struct iommufd_viommu *viommu, vevent = &veventq->lost_events_header; goto out_set_header; } - memcpy(vevent->event_data, event_data, data_len); vevent->data_len = data_len; + memcpy(vevent->event_data, event_data, data_len); veventq->num_events++; out_set_header: diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h index 627f9b78483a..85d0843ed07b 100644 --- a/drivers/iommu/iommufd/iommufd_private.h +++ b/drivers/iommu/iommufd/iommufd_private.h @@ -614,7 +614,6 @@ struct iommufd_veventq { struct iommufd_eventq common; struct iommufd_viommu *viommu; struct list_head node; /* for iommufd_viommu::veventqs */ - struct iommufd_vevent lost_events_header; enum iommu_veventq_type type; unsigned int depth; @@ -622,6 +621,9 @@ struct iommufd_veventq { /* Use common.lock for protection */ u32 num_events; u32 sequence; + + /* Must be last as it ends in a flexible-array member. */ + struct iommufd_vevent lost_events_header; }; static inline struct iommufd_veventq * diff --git a/drivers/md/dm-pcache/Makefile b/drivers/md/dm-pcache/Makefile index 86776e4acad2..cedfd38854f6 100644 --- a/drivers/md/dm-pcache/Makefile +++ b/drivers/md/dm-pcache/Makefile @@ -1,3 +1,3 @@ dm-pcache-y := dm_pcache.o cache_dev.o segment.o backing_dev.o cache.o cache_gc.o cache_writeback.o cache_segment.o cache_key.o cache_req.o -obj-m += dm-pcache.o +obj-$(CONFIG_DM_PCACHE) += dm-pcache.o diff --git a/drivers/md/dm-pcache/cache.c b/drivers/md/dm-pcache/cache.c index d8e92367d947..698697a7a73c 100644 --- a/drivers/md/dm-pcache/cache.c +++ b/drivers/md/dm-pcache/cache.c @@ -181,7 +181,7 @@ static void cache_info_init_default(struct pcache_cache *cache) { struct pcache_cache_info *cache_info = &cache->cache_info; - cache_info->header.seq = 0; + memset(cache_info, 0, sizeof(*cache_info)); cache_info->n_segs = cache->cache_dev->seg_num; cache_info_set_gc_percent(cache_info, PCACHE_CACHE_GC_PERCENT_DEFAULT); } @@ -411,7 +411,7 @@ void pcache_cache_stop(struct dm_pcache *pcache) { struct pcache_cache *cache = &pcache->cache; - cache_flush(cache); + pcache_cache_flush(cache); cancel_delayed_work_sync(&cache->gc_work); flush_work(&cache->clean_work); diff --git a/drivers/md/dm-pcache/cache.h b/drivers/md/dm-pcache/cache.h index 1136d86958c8..27613b56be54 100644 --- a/drivers/md/dm-pcache/cache.h +++ b/drivers/md/dm-pcache/cache.h @@ -339,7 +339,7 @@ void cache_seg_put(struct pcache_cache_segment *cache_seg); void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id); /* cache request*/ -int cache_flush(struct pcache_cache *cache); +int pcache_cache_flush(struct pcache_cache *cache); void miss_read_end_work_fn(struct work_struct *work); int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *pcache_req); diff --git a/drivers/md/dm-pcache/cache_req.c b/drivers/md/dm-pcache/cache_req.c index 27f94c1fa968..7854a30e07b7 100644 --- a/drivers/md/dm-pcache/cache_req.c +++ b/drivers/md/dm-pcache/cache_req.c @@ -790,7 +790,7 @@ err: } /** - * cache_flush - Flush all ksets to persist any pending cache data + * pcache_cache_flush - Flush all ksets to persist any pending cache data * @cache: Pointer to the cache structure * * This function iterates through all ksets associated with the provided `cache` @@ -802,7 +802,7 @@ err: * the respective error code, preventing the flush operation from proceeding to * subsequent ksets. */ -int cache_flush(struct pcache_cache *cache) +int pcache_cache_flush(struct pcache_cache *cache) { struct pcache_cache_kset *kset; int ret; @@ -827,7 +827,7 @@ int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *p struct bio *bio = pcache_req->bio; if (unlikely(bio->bi_opf & REQ_PREFLUSH)) - return cache_flush(cache); + return pcache_cache_flush(cache); if (bio_data_dir(bio) == READ) return cache_read(cache, pcache_req); diff --git a/drivers/md/dm-pcache/pcache_internal.h b/drivers/md/dm-pcache/pcache_internal.h index d427e534727c..b7a3319d2bd3 100644 --- a/drivers/md/dm-pcache/pcache_internal.h +++ b/drivers/md/dm-pcache/pcache_internal.h @@ -99,7 +99,7 @@ static inline void __must_check *pcache_meta_find_latest(struct pcache_meta_head /* Update latest if a more recent sequence is found */ if (!latest || pcache_meta_seq_after(meta->seq, seq_latest)) { seq_latest = meta->seq; - latest = (void *)header + (i * meta_max_size); + latest = meta_addr; } } diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index d382a390d39a..72047b47a7a0 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -320,11 +320,7 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio) if (fio->bufs[n]) continue; - fio->bufs[n] = mempool_alloc(&v->fec->prealloc_pool, GFP_NOWAIT); - if (unlikely(!fio->bufs[n])) { - DMERR("failed to allocate FEC buffer"); - return -ENOMEM; - } + fio->bufs[n] = mempool_alloc(&v->fec->prealloc_pool, GFP_NOIO); } /* try to allocate the maximum number of buffers */ diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f5e5e59b232b..6c83ab940af7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2005,7 +2005,7 @@ static void dm_split_and_process_bio(struct mapped_device *md, * linear target or multiple linear targets pointing to the same * device), we can send the flush with data directly to it. */ - if (map->flush_bypasses_map) { + if (bio->bi_iter.bi_size && map->flush_bypasses_map) { struct list_head *devices = dm_table_get_devices(map); if (devices->next == devices->prev) goto send_preflush_with_data; diff --git a/drivers/power/supply/intel_dc_ti_battery.c b/drivers/power/supply/intel_dc_ti_battery.c index 56b0c92e9d28..67a75281b0ac 100644 --- a/drivers/power/supply/intel_dc_ti_battery.c +++ b/drivers/power/supply/intel_dc_ti_battery.c @@ -127,7 +127,8 @@ struct dc_ti_battery_chip { static int dc_ti_battery_get_voltage_and_current_now(struct power_supply *psy, int *volt, int *curr) { struct dc_ti_battery_chip *chip = power_supply_get_drvdata(psy); - s64 cnt_start_usec, now_usec, sleep_usec; + ktime_t ktime; + s64 sleep_usec; unsigned int reg_val; s32 acc, smpl_ctr; int ret; @@ -141,16 +142,17 @@ static int dc_ti_battery_get_voltage_and_current_now(struct power_supply *psy, i if (ret) goto out_err; - cnt_start_usec = ktime_get_ns() / NSEC_PER_USEC; + ktime = ktime_get(); /* Read Vbat, convert IIO mV to power-supply ųV */ ret = iio_read_channel_processed_scale(chip->vbat_channel, volt, 1000); if (ret < 0) goto out_err; + ktime = ktime_sub(ktime_get(), ktime); + /* Sleep at least 3 sample-times + slack to get 3+ CC samples */ - now_usec = ktime_get_ns() / NSEC_PER_USEC; - sleep_usec = 3 * SMPL_INTVL_US + SLEEP_SLACK_US - (now_usec - cnt_start_usec); + sleep_usec = 3 * SMPL_INTVL_US + SLEEP_SLACK_US - ktime_to_us(ktime); if (sleep_usec > 0 && sleep_usec < 1000000) usleep_range(sleep_usec, sleep_usec + SLEEP_SLACK_US); diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 7146a8e9e9c2..d0eccbd920e5 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -417,15 +417,32 @@ static inline void acpi_processor_throttling_init(void) {} #endif /* CONFIG_ACPI_CPU_FREQ_PSS */ /* in processor_idle.c */ +extern struct cpuidle_driver acpi_idle_driver; #ifdef CONFIG_ACPI_PROCESSOR_IDLE -void acpi_processor_power_init(struct acpi_processor *pr); -void acpi_processor_power_exit(struct acpi_processor *pr); +int acpi_processor_power_init(struct acpi_processor *pr); +int acpi_processor_power_exit(struct acpi_processor *pr); int acpi_processor_power_state_has_changed(struct acpi_processor *pr); int acpi_processor_hotplug(struct acpi_processor *pr); -void acpi_processor_register_idle_driver(void); -void acpi_processor_unregister_idle_driver(void); -int acpi_processor_ffh_lpi_probe(unsigned int cpu); -int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi); +#else +static inline int acpi_processor_power_init(struct acpi_processor *pr) +{ + return -ENODEV; +} + +static inline int acpi_processor_power_exit(struct acpi_processor *pr) +{ + return -ENODEV; +} + +static inline int acpi_processor_power_state_has_changed(struct acpi_processor *pr) +{ + return -ENODEV; +} + +static inline int acpi_processor_hotplug(struct acpi_processor *pr) +{ + return -ENODEV; +} #endif /* CONFIG_ACPI_PROCESSOR_IDLE */ /* in processor_thermal.c */ @@ -448,6 +465,11 @@ static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) } #endif /* CONFIG_CPU_FREQ */ +#ifdef CONFIG_ACPI_PROCESSOR_IDLE +extern int acpi_processor_ffh_lpi_probe(unsigned int cpu); +extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi); +#endif + void acpi_processor_init_invariance_cppc(void); #endif diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d1e527cf2aae..304e93597126 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -8781,8 +8781,18 @@ static void tracing_buffers_mmap_close(struct vm_area_struct *vma) put_snapshot_map(iter->tr); } +static int tracing_buffers_may_split(struct vm_area_struct *vma, unsigned long addr) +{ + /* + * Trace buffer mappings require the complete buffer including + * the meta page. Partial mappings are not supported. + */ + return -EINVAL; +} + static const struct vm_operations_struct tracing_buffers_vmops = { .close = tracing_buffers_mmap_close, + .may_split = tracing_buffers_may_split, }; static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 3034e294d50d..713cc94caa02 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -445,8 +445,7 @@ config FRAME_WARN default 2048 if GCC_PLUGIN_LATENT_ENTROPY default 2048 if PARISC default 1536 if (!64BIT && XTENSA) - default 1280 if KASAN && !64BIT - default 1024 if !64BIT + default 1280 if !64BIT default 2048 if 64BIT help Tell the compiler to warn at build time for stack frames larger than this. diff --git a/mm/filemap.c b/mm/filemap.c index 2f1e7e283a51..024b71da5224 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3682,8 +3682,9 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, struct folio *folio, unsigned long start, unsigned long addr, unsigned int nr_pages, unsigned long *rss, unsigned short *mmap_miss, - bool can_map_large) + pgoff_t file_end) { + struct address_space *mapping = folio->mapping; unsigned int ref_from_caller = 1; vm_fault_t ret = 0; struct page *page = folio_page(folio, start); @@ -3692,12 +3693,16 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, unsigned long addr0; /* - * Map the large folio fully where possible. + * Map the large folio fully where possible: * - * The folio must not cross VMA or page table boundary. + * - The folio is fully within size of the file or belong + * to shmem/tmpfs; + * - The folio doesn't cross VMA boundary; + * - The folio doesn't cross page table boundary; */ addr0 = addr - start * PAGE_SIZE; - if (can_map_large && folio_within_vma(folio, vmf->vma) && + if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) && + folio_within_vma(folio, vmf->vma) && (addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) { vmf->pte -= start; page -= start; @@ -3812,7 +3817,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, unsigned long rss = 0; unsigned int nr_pages = 0, folio_type; unsigned short mmap_miss = 0, mmap_miss_saved; - bool can_map_large; rcu_read_lock(); folio = next_uptodate_folio(&xas, mapping, end_pgoff); @@ -3823,16 +3827,14 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, end_pgoff = min(end_pgoff, file_end); /* - * Do not allow to map with PTEs beyond i_size and with PMD - * across i_size to preserve SIGBUS semantics. + * Do not allow to map with PMD across i_size to preserve + * SIGBUS semantics. * * Make an exception for shmem/tmpfs that for long time * intentionally mapped with PMDs across i_size. */ - can_map_large = shmem_mapping(mapping) || - file_end >= folio_next_index(folio); - - if (can_map_large && filemap_map_pmd(vmf, folio, start_pgoff)) { + if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) && + filemap_map_pmd(vmf, folio, start_pgoff)) { ret = VM_FAULT_NOPAGE; goto out; } @@ -3861,8 +3863,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, else ret |= filemap_map_folio_range(vmf, folio, xas.xa_index - folio->index, addr, - nr_pages, &rss, &mmap_miss, - can_map_large); + nr_pages, &rss, &mmap_miss, file_end); folio_unlock(folio); } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2f2a521e5d68..6cba1cb14b23 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3619,6 +3619,16 @@ static int __folio_split(struct folio *folio, unsigned int new_order, if (folio != page_folio(split_at) || folio != page_folio(lock_at)) return -EINVAL; + /* + * Folios that just got truncated cannot get split. Signal to the + * caller that there was a race. + * + * TODO: this will also currently refuse shmem folios that are in the + * swapcache. + */ + if (!is_anon && !folio->mapping) + return -EBUSY; + if (new_order >= folio_order(folio)) return -EINVAL; @@ -3659,18 +3669,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order, gfp_t gfp; mapping = folio->mapping; - - /* Truncated ? */ - /* - * TODO: add support for large shmem folio in swap cache. - * When shmem is in swap cache, mapping is NULL and - * folio_test_swapcache() is true. - */ - if (!mapping) { - ret = -EBUSY; - goto out; - } - min_order = mapping_min_folio_order(folio->mapping); if (new_order < min_order) { ret = -EINVAL; diff --git a/mm/memfd.c b/mm/memfd.c index 1d109c1acf21..a405eaa451ee 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -96,9 +96,36 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) NULL, gfp_mask); if (folio) { + u32 hash; + + /* + * Zero the folio to prevent information leaks to userspace. + * Use folio_zero_user() which is optimized for huge/gigantic + * pages. Pass 0 as addr_hint since this is not a faulting path + * and we don't have a user virtual address yet. + */ + folio_zero_user(folio, 0); + + /* + * Mark the folio uptodate before adding to page cache, + * as required by filemap.c and other hugetlb paths. + */ + __folio_mark_uptodate(folio); + + /* + * Serialize hugepage allocation and instantiation to prevent + * races with concurrent allocations, as required by all other + * callers of hugetlb_add_to_page_cache(). + */ + hash = hugetlb_fault_mutex_hash(memfd->f_mapping, idx); + mutex_lock(&hugetlb_fault_mutex_table[hash]); + err = hugetlb_add_to_page_cache(folio, memfd->f_mapping, idx); + + mutex_unlock(&hugetlb_fault_mutex_table[hash]); + if (err) { folio_put(folio); goto err_unresv; diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c index 0a0db5849b8e..42e3dde73e74 100644 --- a/mm/mmap_lock.c +++ b/mm/mmap_lock.c @@ -241,6 +241,7 @@ retry: if (PTR_ERR(vma) == -EAGAIN) { count_vm_vma_lock_event(VMA_LOCK_MISS); /* The area was replaced with another one */ + mas_set(&mas, address); goto retry; } diff --git a/mm/swapfile.c b/mm/swapfile.c index 10760240a3a2..a1b4b9d80e3b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2005,10 +2005,8 @@ swp_entry_t get_swap_page_of_type(int type) local_lock(&percpu_swap_cluster.lock); offset = cluster_alloc_swap_entry(si, 0, 1); local_unlock(&percpu_swap_cluster.lock); - if (offset) { + if (offset) entry = swp_entry(si->type, offset); - atomic_long_dec(&nr_swap_pages); - } } put_swap_device(si); } diff --git a/sound/hda/codecs/cirrus/cs420x.c b/sound/hda/codecs/cirrus/cs420x.c index 823220d5cada..13f5f1711fa4 100644 --- a/sound/hda/codecs/cirrus/cs420x.c +++ b/sound/hda/codecs/cirrus/cs420x.c @@ -585,6 +585,7 @@ static const struct hda_quirk cs4208_mac_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI), SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6), SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6), + SND_PCI_QUIRK(0x106b, 0x7800, "MacPro 6,1", CS4208_MACMINI), SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11), {} /* terminator */ }; diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c index 269b6c1e3b6d..b45fcc9a3785 100644 --- a/sound/hda/codecs/realtek/alc269.c +++ b/sound/hda/codecs/realtek/alc269.c @@ -6525,6 +6525,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8a4f, "HP Victus 15-fa0xxx (MB 8A4F)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8a6e, "HP EDNA 360", ALC287_FIXUP_CS35L41_I2C_4), SND_PCI_QUIRK(0x103c, 0x8a74, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8a75, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED), @@ -6572,6 +6573,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8bc8, "HP Victus 15-fa1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT), SND_PCI_QUIRK(0x103c, 0x8bd4, "HP Victus 16-s0xxx (MB 8BD4)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT), + SND_PCI_QUIRK(0x103c, 0x8bd6, "HP Pavilion Aero Laptop 13z-be200", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x103c, 0x8bdf, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2), diff --git a/sound/pci/au88x0/au88x0.c b/sound/pci/au88x0/au88x0.c index de56e83d8e10..bb02945793f0 100644 --- a/sound/pci/au88x0/au88x0.c +++ b/sound/pci/au88x0/au88x0.c @@ -280,11 +280,11 @@ __snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) // (5) err = pci_read_config_word(pci, PCI_DEVICE_ID, &chip->device); - if (err < 0) - return err; + if (err) + return pcibios_err_to_errno(err); err = pci_read_config_word(pci, PCI_VENDOR_ID, &chip->vendor); - if (err < 0) - return err; + if (err) + return pcibios_err_to_errno(err); chip->rev = pci->revision; #ifdef CHIP_AU8830 if ((chip->rev) != 0xfe && (chip->rev) != 0xfa) { diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 72b900505d2c..3af71d42b9b9 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -921,7 +921,7 @@ static int parse_term_uac2_clock_source(struct mixer_build *state, { struct uac_clock_source_descriptor *d = p1; - term->type = UAC3_CLOCK_SOURCE << 16; /* virtual type */ + term->type = UAC2_CLOCK_SOURCE << 16; /* virtual type */ term->id = id; term->name = d->iClockSource; return 0; diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 5e30bff69f82..61bd61ffb1b2 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -2030,6 +2030,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, case USB_ID(0x249c, 0x9326): /* M2Tech Young MkIII */ case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */ case USB_ID(0x2622, 0x0041): /* Audiolab M-DAC+ */ + case USB_ID(0x2622, 0x0061): /* LEAK Stereo 230 */ case USB_ID(0x278b, 0x5100): /* Rotel RC-1590 */ case USB_ID(0x27f7, 0x3002): /* W4S DAC-2v2SE */ case USB_ID(0x29a2, 0x0086): /* Mutec MC3+ USB */ @@ -2428,6 +2429,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_DSD_RAW), VENDOR_FLG(0x25ce, /* Mytek devices */ QUIRK_FLAG_DSD_RAW), + VENDOR_FLG(0x2622, /* IAG Limited devices */ + QUIRK_FLAG_DSD_RAW), VENDOR_FLG(0x278b, /* Rotel? */ QUIRK_FLAG_DSD_RAW), VENDOR_FLG(0x292b, /* Gustard/Ess based devices */ diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c index 9e3be2ee7f1b..f917b4c4c943 100644 --- a/tools/testing/selftests/mm/uffd-unit-tests.c +++ b/tools/testing/selftests/mm/uffd-unit-tests.c @@ -1758,10 +1758,15 @@ int main(int argc, char *argv[]) uffd_test_ops = mem_type->mem_ops; uffd_test_case_ops = test->test_case_ops; - if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB)) + if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB)) { gopts.page_size = default_huge_page_size(); - else + if (gopts.page_size == 0) { + uffd_test_skip("huge page size is 0, feature missing?"); + continue; + } + } else { gopts.page_size = psize(); + } /* Ensure we have at least 2 pages */ gopts.nr_pages = MAX(UFFD_TEST_MEM_SIZE, gopts.page_size * 2) @@ -1776,12 +1781,6 @@ int main(int argc, char *argv[]) continue; uffd_test_start("%s on %s", test->name, mem_type->name); - if ((mem_type->mem_flag == MEM_HUGETLB || - mem_type->mem_flag == MEM_HUGETLB_PRIVATE) && - (default_huge_page_size() == 0)) { - uffd_test_skip("huge page size is 0, feature missing?"); - continue; - } if (!uffd_feature_supported(test)) { uffd_test_skip("feature missing"); continue; |
