summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/dev-tools/checkpatch.rst23
-rw-r--r--Documentation/translations/zh_CN/core-api/irq/irq-domain.rst4
-rw-r--r--Makefile4
-rw-r--r--arch/x86/events/amd/uncore.c5
-rw-r--r--arch/x86/events/intel/core.c3
-rw-r--r--drivers/block/Kconfig3
-rw-r--r--drivers/irqchip/irq-mchp-eic.c2
-rw-r--r--drivers/md/dm-mpath.c13
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/ipr.c28
-rw-r--r--drivers/scsi/libsas/sas_init.c1
-rw-r--r--drivers/scsi/libsas/sas_internal.h15
-rw-r--r--drivers/scsi/libsas/sas_phy.c33
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h4
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c30
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c2
-rw-r--r--drivers/scsi/scsi_dh.c8
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/target/sbp/sbp_target.c3
-rw-r--r--drivers/target/target_core_transport.c1
-rw-r--r--drivers/ufs/Kconfig1
-rw-r--r--drivers/ufs/core/ufshcd.c68
-rw-r--r--drivers/ufs/host/ufs-qcom.c6
-rw-r--r--fs/btrfs/file.c3
-rw-r--r--fs/btrfs/inode.c1
-rw-r--r--fs/btrfs/qgroup.c27
-rw-r--r--fs/btrfs/tests/qgroup-tests.c1
-rw-r--r--fs/btrfs/tree-log.c46
-rw-r--r--fs/btrfs/volumes.c1
-rw-r--r--fs/ceph/Kconfig3
-rw-r--r--fs/ceph/caps.c4
-rw-r--r--fs/ceph/mds_client.c20
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/ceph/super.c3
-rw-r--r--fs/fat/cache.c7
-rw-r--r--fs/libfs.c50
-rw-r--r--fs/ocfs2/alloc.c12
-rw-r--r--fs/ocfs2/buffer_head_io.c2
-rw-r--r--fs/ocfs2/cluster/nodemanager.c3
-rw-r--r--fs/ocfs2/dir.c10
-rw-r--r--fs/ocfs2/file.c14
-rw-r--r--fs/ocfs2/inode.c11
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/namei.c3
-rw-r--r--fs/ocfs2/ocfs2.h18
-rw-r--r--fs/ocfs2/resize.c4
-rw-r--r--fs/ocfs2/stackglue.c3
-rw-r--r--fs/ocfs2/suballoc.c13
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/ocfs2/xattr.c38
-rw-r--r--fs/smb/common/smbdirect/smbdirect_socket.h12
-rw-r--r--fs/smb/server/mgmt/user_session.c4
-rw-r--r--fs/smb/server/smb2pdu.c4
-rw-r--r--fs/smb/server/smbacl.c3
-rw-r--r--fs/smb/server/transport_rdma.c175
-rw-r--r--fs/smb/server/vfs.c2
-rw-r--r--include/linux/args.h4
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/irqdomain.h16
-rw-r--r--include/linux/rseq_entry.h2
-rw-r--r--include/trace/events/ceph.h234
-rw-r--r--kernel/cgroup/rstat.c13
-rw-r--r--kernel/cpu.c25
-rw-r--r--kernel/events/core.c22
-rw-r--r--kernel/events/uprobes.c8
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/liveupdate/Kconfig1
-rw-r--r--kernel/liveupdate/luo_core.c4
-rw-r--r--kernel/liveupdate/luo_file.c7
-rw-r--r--kernel/sched/ext.c72
-rw-r--r--lib/bug.c6
-rw-r--r--mm/shmem.c43
-rw-r--r--net/ceph/osd_client.c6
-rw-r--r--net/ceph/osdmap.c120
-rwxr-xr-xscripts/checkpatch.pl6
-rw-r--r--security/tomoyo/domain.c9
-rw-r--r--tools/testing/selftests/sched_ext/runner.c8
79 files changed, 995 insertions, 384 deletions
diff --git a/Documentation/dev-tools/checkpatch.rst b/Documentation/dev-tools/checkpatch.rst
index fa2988dd4657..deb3f67a633c 100644
--- a/Documentation/dev-tools/checkpatch.rst
+++ b/Documentation/dev-tools/checkpatch.rst
@@ -1002,6 +1002,29 @@ Functions and Variables
return bar;
+ **UNINITIALIZED_PTR_WITH_FREE**
+ Pointers with __free attribute should be declared at the place of use
+ and initialized (see include/linux/cleanup.h). In this case
+ declarations at the top of the function rule can be relaxed. Not doing
+ so may lead to undefined behavior as the memory assigned (garbage,
+ in case not initialized) to the pointer is freed automatically when
+ the pointer goes out of scope.
+
+ Also see: https://lore.kernel.org/lkml/58fd478f408a34b578ee8d949c5c4b4da4d4f41d.camel@HansenPartnership.com/
+
+ Example::
+
+ type var __free(free_func);
+ ... // var not used, but, in future someone might add a return here
+ var = malloc(var_size);
+ ...
+
+ should be initialized as::
+
+ ...
+ type var __free(free_func) = malloc(var_size);
+ ...
+
Permissions
-----------
diff --git a/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst b/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst
index 4a2d3b27aa4d..aaefeda0e164 100644
--- a/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst
+++ b/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst
@@ -109,10 +109,6 @@ irq_domain维护着从hwirq号到Linux IRQ的radix的树状映射。 当一个hw
如果hwirq号可以非常大,树状映射是一个很好的选择,因为它不需要分配一个和最大hwirq
号一样大的表。 缺点是,hwirq到IRQ号的查找取决于表中有多少条目。
-irq_domain_add_tree()和irq_domain_create_tree()在功能上是等价的,除了第一
-个参数不同——前者接受一个Open Firmware特定的 'struct device_node' ,而后者接受
-一个更通用的抽象 'struct fwnode_handle' 。
-
很少有驱动应该需要这个映射。
无映射
diff --git a/Makefile b/Makefile
index 2f545ec1690f..e404e4767944 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
-PATCHLEVEL = 18
+PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index e8b6af199c73..9293ce50574d 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -656,14 +656,11 @@ static int amd_uncore_df_event_init(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int ret = amd_uncore_event_init(event);
- if (ret || pmu_version < 2)
- return ret;
-
hwc->config = event->attr.config &
(pmu_version >= 2 ? AMD64_PERFMON_V2_RAW_EVENT_MASK_NB :
AMD64_RAW_EVENT_MASK_NB);
- return 0;
+ return ret;
}
static int amd_uncore_df_add(struct perf_event *event, int flags)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 853fe073bab3..bdf3f0d0fe21 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3378,6 +3378,9 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
if (!test_bit(bit, cpuc->active_mask))
continue;
+ /* Event may have already been cleared: */
+ if (!event)
+ continue;
/*
* There may be unprocessed PEBS records in the PEBS buffer,
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 77d694448990..858320b6ebb7 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -316,9 +316,6 @@ config BLK_DEV_RBD
tristate "Rados block device (RBD)"
depends on INET && BLOCK
select CEPH_LIB
- select CRC32
- select CRYPTO_AES
- select CRYPTO
help
Say Y here if you want include the Rados block device, which stripes
a block device over objects stored in the Ceph distributed object
diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c
index 2474fa467a05..31093a8ab67c 100644
--- a/drivers/irqchip/irq-mchp-eic.c
+++ b/drivers/irqchip/irq-mchp-eic.c
@@ -170,7 +170,7 @@ static int mchp_eic_domain_alloc(struct irq_domain *domain, unsigned int virq,
ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
if (ret || hwirq >= MCHP_EIC_NIRQ)
- return ret;
+ return ret ?: -EINVAL;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index c18358271618..d5d6ef7ba838 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -950,6 +950,19 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
q = bdev_get_queue(p->path.dev->bdev);
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
+ if (IS_ERR(attached_handler_name)) {
+ if (PTR_ERR(attached_handler_name) == -ENODEV) {
+ if (m->hw_handler_name) {
+ DMERR("hardware handlers are only allowed for SCSI devices");
+ kfree(m->hw_handler_name);
+ m->hw_handler_name = NULL;
+ }
+ attached_handler_name = NULL;
+ } else {
+ r = PTR_ERR(attached_handler_name);
+ goto bad;
+ }
+ }
if (attached_handler_name || m->hw_handler_name) {
INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 9d1de68dee27..d7d41b054b98 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -106,7 +106,6 @@ config PHANTOM
config RPMB
tristate "RPMB partition interface"
- depends on MMC || SCSI_UFSHCD
help
Unified RPMB unit interface for RPMB capable devices such as eMMC and
UFS. Provides interface for in-kernel security controllers to access
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 5c602c057798..45b0e33293a5 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1260,6 +1260,7 @@ static void imm_detach(struct parport *pb)
imm_struct *dev;
list_for_each_entry(dev, &imm_hosts, list) {
if (dev->dev->port == pb) {
+ disable_delayed_work_sync(&dev->imm_tq);
list_del_init(&dev->list);
scsi_remove_host(dev->host);
scsi_host_put(dev->host);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 95123689e9d1..dbd58a7e7bc1 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -61,8 +61,8 @@
#include <linux/hdreg.h>
#include <linux/reboot.h>
#include <linux/stringify.h>
+#include <linux/irq.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/processor.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -7844,6 +7844,30 @@ static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_set_affinity_nobalance
+ * @ioa_cfg: ipr_ioa_cfg struct for an ipr device
+ * @flag: bool
+ * true: ensable "IRQ_NO_BALANCING" bit for msix interrupt
+ * false: disable "IRQ_NO_BALANCING" bit for msix interrupt
+ * Description: This function will be called to disable/enable
+ * "IRQ_NO_BALANCING" to avoid irqbalance daemon
+ * kicking in during adapter reset.
+ **/
+static void ipr_set_affinity_nobalance(struct ipr_ioa_cfg *ioa_cfg, bool flag)
+{
+ int irq, i;
+
+ for (i = 0; i < ioa_cfg->nvectors; i++) {
+ irq = pci_irq_vector(ioa_cfg->pdev, i);
+
+ if (flag)
+ irq_set_status_flags(irq, IRQ_NO_BALANCING);
+ else
+ irq_clear_status_flags(irq, IRQ_NO_BALANCING);
+ }
+}
+
+/**
* ipr_reset_restore_cfg_space - Restore PCI config space.
* @ipr_cmd: ipr command struct
*
@@ -7866,6 +7890,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
return IPR_RC_JOB_CONTINUE;
}
+ ipr_set_affinity_nobalance(ioa_cfg, false);
ipr_fail_all_ops(ioa_cfg);
if (ioa_cfg->sis64) {
@@ -7945,6 +7970,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
if (rc == PCIBIOS_SUCCESSFUL) {
+ ipr_set_affinity_nobalance(ioa_cfg, true);
ipr_cmd->job_step = ipr_reset_bist_done;
ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
rc = IPR_RC_JOB_RETURN;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 8566bb1208a0..6b15ad1bcada 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -141,6 +141,7 @@ Undo_event_q:
Undo_ports:
sas_unregister_ports(sas_ha);
Undo_phys:
+ sas_unregister_phys(sas_ha);
return error;
}
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 6706f2be8d27..d104c87f04f5 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -54,6 +54,7 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev);
void sas_scsi_recover_host(struct Scsi_Host *shost);
int sas_register_phys(struct sas_ha_struct *sas_ha);
+void sas_unregister_phys(struct sas_ha_struct *sas_ha);
struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, gfp_t gfp_flags);
void sas_free_event(struct asd_sas_event *event);
@@ -145,20 +146,6 @@ static inline void sas_fail_probe(struct domain_device *dev, const char *func, i
func, dev->parent ? "exp-attached" :
"direct-attached",
SAS_ADDR(dev->sas_addr), err);
-
- /*
- * If the device probe failed, the expander phy attached address
- * needs to be reset so that the phy will not be treated as flutter
- * in the next revalidation
- */
- if (dev->parent && !dev_is_expander(dev->dev_type)) {
- struct sas_phy *phy = dev->phy;
- struct domain_device *parent = dev->parent;
- struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number];
-
- memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
- }
-
sas_unregister_dev(dev->port, dev);
}
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 635835c28ecd..58f08dc2c187 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -116,6 +116,7 @@ static void sas_phye_shutdown(struct work_struct *work)
int sas_register_phys(struct sas_ha_struct *sas_ha)
{
int i;
+ int err;
/* Now register the phys. */
for (i = 0; i < sas_ha->num_phys; i++) {
@@ -132,8 +133,10 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
phy->frame_rcvd_size = 0;
phy->phy = sas_phy_alloc(&sas_ha->shost->shost_gendev, i);
- if (!phy->phy)
- return -ENOMEM;
+ if (!phy->phy) {
+ err = -ENOMEM;
+ goto rollback;
+ }
phy->phy->identify.initiator_port_protocols =
phy->iproto;
@@ -146,10 +149,34 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
phy->phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
- sas_phy_add(phy->phy);
+ err = sas_phy_add(phy->phy);
+ if (err) {
+ sas_phy_free(phy->phy);
+ goto rollback;
+ }
}
return 0;
+rollback:
+ for (i-- ; i >= 0 ; i--) {
+ struct asd_sas_phy *phy = sas_ha->sas_phy[i];
+
+ sas_phy_delete(phy->phy);
+ sas_phy_free(phy->phy);
+ }
+ return err;
+}
+
+void sas_unregister_phys(struct sas_ha_struct *sas_ha)
+{
+ int i;
+
+ for (i = 0 ; i < sas_ha->num_phys ; i++) {
+ struct asd_sas_phy *phy = sas_ha->sas_phy[i];
+
+ sas_phy_delete(phy->phy);
+ sas_phy_free(phy->phy);
+ }
}
const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 6742684e2990..31d68c151b20 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -56,8 +56,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.15.0.5.50"
-#define MPI3MR_DRIVER_RELDATE "12-August-2025"
+#define MPI3MR_DRIVER_VERSION "8.15.0.5.51"
+#define MPI3MR_DRIVER_RELDATE "18-November-2025"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index b88633e1efe2..d4ca878d0886 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -1184,6 +1184,8 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
if (is_added == true)
tgtdev->io_throttle_enabled =
(flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
+ if (!mrioc->sas_transport_enabled)
+ tgtdev->non_stl = 1;
switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) {
case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB:
@@ -4844,7 +4846,7 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
if (starget->channel == mrioc->scsi_device_channel) {
tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
- if (tgt_dev && !tgt_dev->is_hidden) {
+ if (tgt_dev && !tgt_dev->is_hidden && tgt_dev->non_stl) {
scsi_tgt_priv_data->starget = starget;
scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 3a57f07d73f5..16a44c0917e1 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -17,6 +17,7 @@
#include <linux/crash_dump.h>
#include <linux/trace_events.h>
#include <linux/trace.h>
+#include <linux/irq.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -7776,6 +7777,31 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
}
+/**
+ * qla2xxx_set_affinity_nobalance
+ * @pdev: pci_dev struct for a qla2xxx device
+ * @flag: bool
+ * true: enable "IRQ_NO_BALANCING" bit for msix interrupt
+ * false: disable "IRQ_NO_BALANCING" bit for msix interrupt
+ * Description: This function will be called to disable/enable
+ * "IRQ_NO_BALANCING" to avoid irqbalance daemon
+ * kicking in during adapter reset.
+ **/
+
+static void qla2xxx_set_affinity_nobalance(struct pci_dev *pdev, bool flag)
+{
+ int irq, i;
+
+ for (i = 0; i < QLA_BASE_VECTORS; i++) {
+ irq = pci_irq_vector(pdev, i);
+
+ if (flag)
+ irq_set_status_flags(irq, IRQ_NO_BALANCING);
+ else
+ irq_clear_status_flags(irq, IRQ_NO_BALANCING);
+ }
+}
+
static pci_ers_result_t
qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
@@ -7794,6 +7820,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
goto out;
}
+ qla2xxx_set_affinity_nobalance(pdev, false);
+
switch (state) {
case pci_channel_io_normal:
qla_pci_set_eeh_busy(vha);
@@ -7930,6 +7958,8 @@ exit_slot_reset:
ql_dbg(ql_dbg_aer, base_vha, 0x900e,
"Slot Reset returning %x.\n", ret);
+ qla2xxx_set_affinity_nobalance(pdev, true);
+
return ret;
}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index da2fc66ffedd..b0a62aaa1cca 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1552,7 +1552,7 @@ static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
(val == PHAN_INITIALIZE_ACK))
return 0;
set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(500);
+ schedule_timeout(msecs_to_jiffies(500));
} while (--retries);
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 7b56e00c7df6..b9d805317814 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -353,7 +353,8 @@ EXPORT_SYMBOL_GPL(scsi_dh_attach);
* that may have a device handler attached
* @gfp - the GFP mask used in the kmalloc() call when allocating memory
*
- * Returns name of attached handler, NULL if no handler is attached.
+ * Returns name of attached handler, NULL if no handler is attached, or
+ * and error pointer if an error occurred.
* Caller must take care to free the returned string.
*/
const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
@@ -363,10 +364,11 @@ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
sdev = scsi_device_from_queue(q);
if (!sdev)
- return NULL;
+ return ERR_PTR(-ENODEV);
if (sdev->handler)
- handler_name = kstrdup(sdev->handler->name, gfp);
+ handler_name = kstrdup(sdev->handler->name, gfp) ? :
+ ERR_PTR(-ENOMEM);
put_device(&sdev->sdev_gendev);
return handler_name;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 51ad2ad07e43..93031326ac3e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2801,7 +2801,7 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
*
* Must be called with user context, may sleep.
*
- * Returns zero if unsuccessful or an error if not.
+ * Returns zero if successful or an error if not.
*/
int
scsi_device_quiesce(struct scsi_device *sdev)
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index b8457477cee9..9f167ff8da7b 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -5,8 +5,7 @@
* Copyright (C) 2011 Chris Boot <bootc@bootc.net>
*/
-#define KMSG_COMPONENT "sbp_target"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "sbp_target: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index e8b7955d40f2..50d21888a0c9 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1524,6 +1524,7 @@ target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp)
if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp);
if (!cmd->t_task_cdb) {
+ cmd->t_task_cdb = &cmd->__t_task_cdb[0];
pr_err("Unable to allocate cmd->t_task_cdb"
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig
index 90226f72c158..f662e7ce71f1 100644
--- a/drivers/ufs/Kconfig
+++ b/drivers/ufs/Kconfig
@@ -6,6 +6,7 @@
menuconfig SCSI_UFSHCD
tristate "Universal Flash Storage Controller"
depends on SCSI && SCSI_DMA
+ depends on RPMB || !RPMB
select PM_DEVFREQ
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select NLS
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 040a0ceb170a..80c0b49f30b0 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1455,15 +1455,14 @@ out:
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err)
{
up_write(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->wb_mutex);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ mutex_unlock(&hba->host->scan_mutex);
/* Enable Write Booster if current gear requires it else disable it */
if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear);
- mutex_unlock(&hba->wb_mutex);
-
- blk_mq_unquiesce_tagset(&hba->host->tag_set);
- mutex_unlock(&hba->host->scan_mutex);
ufshcd_release(hba);
}
@@ -6504,6 +6503,11 @@ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
+ /*
+ * A WLUN resume failure could potentially lead to the HBA being
+ * runtime suspended, so take an extra reference on hba->dev.
+ */
+ pm_runtime_get_sync(hba->dev);
ufshcd_rpm_get_sync(hba);
if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
hba->is_sys_suspended) {
@@ -6543,6 +6547,7 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
ufshcd_rpm_put(hba);
+ pm_runtime_put(hba->dev);
}
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
@@ -6557,28 +6562,42 @@ static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
#ifdef CONFIG_PM
static void ufshcd_recover_pm_error(struct ufs_hba *hba)
{
+ struct scsi_target *starget = hba->ufs_device_wlun->sdev_target;
struct Scsi_Host *shost = hba->host;
struct scsi_device *sdev;
struct request_queue *q;
- int ret;
+ bool resume_sdev_queues = false;
hba->is_sys_suspended = false;
+
/*
- * Set RPM status of wlun device to RPM_ACTIVE,
- * this also clears its runtime error.
+ * Ensure the parent's error status is cleared before proceeding
+ * to the child, as the parent must be active to activate the child.
*/
- ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
+ if (hba->dev->power.runtime_error) {
+ /* hba->dev has no functional parent thus simplily set RPM_ACTIVE */
+ pm_runtime_set_active(hba->dev);
+ resume_sdev_queues = true;
+ }
+
+ if (hba->ufs_device_wlun->sdev_gendev.power.runtime_error) {
+ /*
+ * starget, parent of wlun, might be suspended if wlun resume failed.
+ * Make sure parent is resumed before set child (wlun) active.
+ */
+ pm_runtime_get_sync(&starget->dev);
+ pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
+ pm_runtime_put_sync(&starget->dev);
+ resume_sdev_queues = true;
+ }
- /* hba device might have a runtime error otherwise */
- if (ret)
- ret = pm_runtime_set_active(hba->dev);
/*
* If wlun device had runtime error, we also need to resume those
* consumer scsi devices in case any of them has failed to be
* resumed due to supplier runtime resume failure. This is to unblock
* blk_queue_enter in case there are bios waiting inside it.
*/
- if (!ret) {
+ if (resume_sdev_queues) {
shost_for_each_device(sdev, shost) {
q = sdev->request_queue;
if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
@@ -6679,19 +6698,22 @@ static void ufshcd_err_handler(struct work_struct *work)
hba->saved_uic_err, hba->force_reset,
ufshcd_is_link_broken(hba) ? "; link is broken" : "");
- /*
- * Use ufshcd_rpm_get_noresume() here to safely perform link recovery
- * even if an error occurs during runtime suspend or runtime resume.
- * This avoids potential deadlocks that could happen if we tried to
- * resume the device while a PM operation is already in progress.
- */
- ufshcd_rpm_get_noresume(hba);
- if (hba->pm_op_in_progress) {
- ufshcd_link_recovery(hba);
+ if (hba->ufs_device_wlun) {
+ /*
+ * Use ufshcd_rpm_get_noresume() here to safely perform link
+ * recovery even if an error occurs during runtime suspend or
+ * runtime resume. This avoids potential deadlocks that could
+ * happen if we tried to resume the device while a PM operation
+ * is already in progress.
+ */
+ ufshcd_rpm_get_noresume(hba);
+ if (hba->pm_op_in_progress) {
+ ufshcd_link_recovery(hba);
+ ufshcd_rpm_put(hba);
+ return;
+ }
ufshcd_rpm_put(hba);
- return;
}
- ufshcd_rpm_put(hba);
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 8d119b3223cb..8ebee0cc5313 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1769,10 +1769,9 @@ static void ufs_qcom_dump_testbus(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int i, j, nminor = 0, testbus_len = 0;
- u32 *testbus __free(kfree) = NULL;
char *prefix;
- testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+ u32 *testbus __free(kfree) = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
if (!testbus)
return;
@@ -1794,13 +1793,12 @@ static void ufs_qcom_dump_testbus(struct ufs_hba *hba)
static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
const char *prefix, void __iomem *base)
{
- u32 *regs __free(kfree) = NULL;
size_t pos;
if (offset % 4 != 0 || len % 4 != 0)
return -EINVAL;
- regs = kzalloc(len, GFP_ATOMIC);
+ u32 *regs __free(kfree) = kzalloc(len, GFP_ATOMIC);
if (!regs)
return -ENOMEM;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 7a501e73d880..1abc7ed2990e 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2019,13 +2019,14 @@ out:
else
btrfs_delalloc_release_space(inode, data_reserved, page_start,
reserved_space, true);
- extent_changeset_free(data_reserved);
out_noreserve:
if (only_release_metadata)
btrfs_check_nocow_unlock(inode);
sb_end_pagefault(inode->vfs_inode.i_sb);
+ extent_changeset_free(data_reserved);
+
if (ret < 0)
return vmf_error(ret);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c4bee47829ed..317db7d10a21 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -256,6 +256,7 @@ static void print_data_reloc_error(const struct btrfs_inode *inode, u64 file_off
if (ret < 0) {
btrfs_err_rl(fs_info, "failed to lookup extent item for logical %llu: %d",
logical, ret);
+ btrfs_release_path(&path);
return;
}
eb = path.nodes[0];
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 9e2b53e90dcb..d9d8d9968a58 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1243,14 +1243,7 @@ out:
btrfs_end_transaction(trans);
else if (trans)
ret = btrfs_end_transaction(trans);
-
- /*
- * At this point we either failed at allocating prealloc, or we
- * succeeded and passed the ownership to it to add_qgroup_rb(). In any
- * case, this needs to be NULL or there is something wrong.
- */
- ASSERT(prealloc == NULL);
-
+ kfree(prealloc);
return ret;
}
@@ -1682,12 +1675,7 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
- /*
- * At this point we either failed at allocating prealloc, or we
- * succeeded and passed the ownership to it to add_qgroup_rb(). In any
- * case, this needs to be NULL or there is something wrong.
- */
- ASSERT(prealloc == NULL);
+ kfree(prealloc);
return ret;
}
@@ -3279,7 +3267,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
struct btrfs_root *quota_root;
struct btrfs_qgroup *srcgroup;
struct btrfs_qgroup *dstgroup;
- struct btrfs_qgroup *prealloc = NULL;
+ struct btrfs_qgroup *prealloc;
struct btrfs_qgroup_list **qlist_prealloc = NULL;
bool free_inherit = false;
bool need_rescan = false;
@@ -3520,14 +3508,7 @@ out:
}
if (free_inherit)
kfree(inherit);
-
- /*
- * At this point we either failed at allocating prealloc, or we
- * succeeded and passed the ownership to it to add_qgroup_rb(). In any
- * case, this needs to be NULL or there is something wrong.
- */
- ASSERT(prealloc == NULL);
-
+ kfree(prealloc);
return ret;
}
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 05cfda8af422..e9124605974b 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -187,7 +187,6 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
ret = btrfs_search_slot(&trans, root, &key, path, -1, 1);
if (ret) {
test_err("couldn't find backref %d", ret);
- btrfs_free_path(path);
return ret;
}
btrfs_del_item(&trans, root, path);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index fff37c8d96a4..31edc93a383e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -5865,14 +5865,6 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
struct btrfs_inode *curr_inode = start_inode;
int ret = 0;
- /*
- * If we are logging a new name, as part of a link or rename operation,
- * don't bother logging new dentries, as we just want to log the names
- * of an inode and that any new parents exist.
- */
- if (ctx->logging_new_name)
- return 0;
-
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -6051,6 +6043,33 @@ static int conflicting_inode_is_dir(struct btrfs_root *root, u64 ino,
return ret;
}
+static bool can_log_conflicting_inode(const struct btrfs_trans_handle *trans,
+ const struct btrfs_inode *inode)
+{
+ if (!S_ISDIR(inode->vfs_inode.i_mode))
+ return true;
+
+ if (inode->last_unlink_trans < trans->transid)
+ return true;
+
+ /*
+ * If this is a directory and its unlink_trans is not from a past
+ * transaction then we must fallback to a transaction commit in order
+ * to avoid getting a directory with 2 hard links after log replay.
+ *
+ * This happens if a directory A is renamed, moved from one parent
+ * directory to another one, a new file is created in the old parent
+ * directory with the old name of our directory A, the new file is
+ * fsynced, then we moved the new file to some other parent directory
+ * and fsync again the new file. This results in a log tree where we
+ * logged that directory A existed, with the INODE_REF item for the
+ * new location but without having logged its old parent inode, so
+ * that on log replay we add a new link for the new location but the
+ * old link remains, resulting in a link count of 2.
+ */
+ return false;
+}
+
static int add_conflicting_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
@@ -6154,6 +6173,11 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
return 0;
}
+ if (!can_log_conflicting_inode(trans, inode)) {
+ btrfs_add_delayed_iput(inode);
+ return BTRFS_LOG_FORCE_COMMIT;
+ }
+
btrfs_add_delayed_iput(inode);
ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
@@ -6218,6 +6242,12 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
break;
}
+ if (!can_log_conflicting_inode(trans, inode)) {
+ btrfs_add_delayed_iput(inode);
+ ret = BTRFS_LOG_FORCE_COMMIT;
+ break;
+ }
+
/*
* Always log the directory, we cannot make this
* conditional on need_log_inode() because the directory
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index ae1742a35e76..13c514684cfb 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -7128,6 +7128,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
fs_devices->seeding = true;
fs_devices->opened = 1;
+ list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
return fs_devices;
}
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index 3e7def3d31c1..3d64a316ca31 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -3,9 +3,6 @@ config CEPH_FS
tristate "Ceph distributed file system"
depends on INET
select CEPH_LIB
- select CRC32
- select CRYPTO_AES
- select CRYPTO
select NETFS_SUPPORT
select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
default n
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index b1a8ff612c41..2f663972da99 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -18,6 +18,7 @@
#include "crypto.h"
#include <linux/ceph/decode.h>
#include <linux/ceph/messenger.h>
+#include <trace/events/ceph.h>
/*
* Capability management
@@ -4452,6 +4453,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
session->s_mds, ceph_cap_op_name(op), vino.ino, vino.snap, inode,
seq, issue_seq, mseq);
+ trace_ceph_handle_caps(mdsc, session, op, &vino, ceph_inode(inode),
+ seq, issue_seq, mseq);
+
mutex_lock(&session->s_mutex);
if (!inode) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 1740047aef0f..7e4eab824dae 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -24,6 +24,7 @@
#include <linux/ceph/pagelist.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/debugfs.h>
+#include <trace/events/ceph.h>
#define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
@@ -3288,6 +3289,8 @@ static void complete_request(struct ceph_mds_client *mdsc,
{
req->r_end_latency = ktime_get();
+ trace_ceph_mdsc_complete_request(mdsc, req);
+
if (req->r_callback)
req->r_callback(mdsc, req);
complete_all(&req->r_completion);
@@ -3419,6 +3422,8 @@ static int __send_request(struct ceph_mds_session *session,
{
int err;
+ trace_ceph_mdsc_send_request(session, req);
+
err = __prepare_send_request(session, req, drop_cap_releases);
if (!err) {
ceph_msg_get(req->r_request);
@@ -3470,6 +3475,8 @@ static void __do_request(struct ceph_mds_client *mdsc,
}
if (mdsc->mdsmap->m_epoch == 0) {
doutc(cl, "no mdsmap, waiting for map\n");
+ trace_ceph_mdsc_suspend_request(mdsc, session, req,
+ ceph_mdsc_suspend_reason_no_mdsmap);
list_add(&req->r_wait, &mdsc->waiting_for_map);
return;
}
@@ -3491,6 +3498,8 @@ static void __do_request(struct ceph_mds_client *mdsc,
goto finish;
}
doutc(cl, "no mds or not active, waiting for map\n");
+ trace_ceph_mdsc_suspend_request(mdsc, session, req,
+ ceph_mdsc_suspend_reason_no_active_mds);
list_add(&req->r_wait, &mdsc->waiting_for_map);
return;
}
@@ -3536,9 +3545,11 @@ static void __do_request(struct ceph_mds_client *mdsc,
* it to the mdsc queue.
*/
if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
- if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER))
+ if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER)) {
+ trace_ceph_mdsc_suspend_request(mdsc, session, req,
+ ceph_mdsc_suspend_reason_rejected);
list_add(&req->r_wait, &mdsc->waiting_for_map);
- else
+ } else
err = -EACCES;
goto out_session;
}
@@ -3552,6 +3563,8 @@ static void __do_request(struct ceph_mds_client *mdsc,
if (random)
req->r_resend_mds = mds;
}
+ trace_ceph_mdsc_suspend_request(mdsc, session, req,
+ ceph_mdsc_suspend_reason_session);
list_add(&req->r_wait, &session->s_waiting);
goto out_session;
}
@@ -3652,6 +3665,7 @@ static void __wake_requests(struct ceph_mds_client *mdsc,
list_del_init(&req->r_wait);
doutc(cl, " wake request %p tid %llu\n", req,
req->r_tid);
+ trace_ceph_mdsc_resume_request(mdsc, req);
__do_request(mdsc, req);
}
}
@@ -3678,6 +3692,7 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds)
req->r_session->s_mds == mds) {
doutc(cl, " kicking tid %llu\n", req->r_tid);
list_del_init(&req->r_wait);
+ trace_ceph_mdsc_resume_request(mdsc, req);
__do_request(mdsc, req);
}
}
@@ -3724,6 +3739,7 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
doutc(cl, "submit_request on %p for inode %p\n", req, dir);
mutex_lock(&mdsc->mutex);
__register_request(mdsc, req, dir);
+ trace_ceph_mdsc_submit_request(mdsc, req);
__do_request(mdsc, req);
err = req->r_err;
mutex_unlock(&mdsc->mutex);
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index c65f2b202b2b..521507ea8260 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -374,7 +374,7 @@ static int build_snap_context(struct ceph_mds_client *mdsc,
/* alloc new snap context */
err = -ENOMEM;
- if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
+ if ((size_t)num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
goto fail;
snapc = ceph_create_snap_context(num, GFP_NOFS);
if (!snapc)
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index f6bf24b5c683..7c1c1dac320d 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -30,6 +30,9 @@
#include <uapi/linux/magic.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/ceph.h>
+
static DEFINE_SPINLOCK(ceph_fsc_lock);
static LIST_HEAD(ceph_fsc_list);
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 2af424e200b3..630f3056658e 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -29,11 +29,6 @@ struct fat_cache_id {
int dcluster;
};
-static inline int fat_max_cache(struct inode *inode)
-{
- return FAT_MAX_CACHE;
-}
-
static struct kmem_cache *fat_cache_cachep;
static void init_once(void *foo)
@@ -145,7 +140,7 @@ static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
cache = fat_cache_merge(inode, new);
if (cache == NULL) {
- if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
+ if (MSDOS_I(inode)->nr_caches < FAT_MAX_CACHE) {
MSDOS_I(inode)->nr_caches++;
spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
diff --git a/fs/libfs.c b/fs/libfs.c
index 9264523be85c..591eb649ebba 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -346,22 +346,22 @@ void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry)
* User space expects the directory offset value of the replaced
* (new) directory entry to be unchanged after a rename.
*
- * Returns zero on success, a negative errno value on failure.
+ * Caller must have grabbed a slot for new_dentry in the maple_tree
+ * associated with new_dir, even if dentry is negative.
*/
-int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
+void simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
{
struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir);
struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir);
long new_offset = dentry2offset(new_dentry);
- simple_offset_remove(old_ctx, old_dentry);
+ if (WARN_ON(!new_offset))
+ return;
- if (new_offset) {
- offset_set(new_dentry, 0);
- return simple_offset_replace(new_ctx, old_dentry, new_offset);
- }
- return simple_offset_add(new_ctx, old_dentry);
+ simple_offset_remove(old_ctx, old_dentry);
+ offset_set(new_dentry, 0);
+ WARN_ON(simple_offset_replace(new_ctx, old_dentry, new_offset));
}
/**
@@ -388,31 +388,23 @@ int simple_offset_rename_exchange(struct inode *old_dir,
long new_index = dentry2offset(new_dentry);
int ret;
- simple_offset_remove(old_ctx, old_dentry);
- simple_offset_remove(new_ctx, new_dentry);
+ if (WARN_ON(!old_index || !new_index))
+ return -EINVAL;
- ret = simple_offset_replace(new_ctx, old_dentry, new_index);
- if (ret)
- goto out_restore;
+ ret = mtree_store(&new_ctx->mt, new_index, old_dentry, GFP_KERNEL);
+ if (WARN_ON(ret))
+ return ret;
- ret = simple_offset_replace(old_ctx, new_dentry, old_index);
- if (ret) {
- simple_offset_remove(new_ctx, old_dentry);
- goto out_restore;
+ ret = mtree_store(&old_ctx->mt, old_index, new_dentry, GFP_KERNEL);
+ if (WARN_ON(ret)) {
+ mtree_store(&new_ctx->mt, new_index, new_dentry, GFP_KERNEL);
+ return ret;
}
- ret = simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
- if (ret) {
- simple_offset_remove(new_ctx, old_dentry);
- simple_offset_remove(old_ctx, new_dentry);
- goto out_restore;
- }
+ offset_set(old_dentry, new_index);
+ offset_set(new_dentry, old_index);
+ simple_rename_exchange(old_dir, old_dentry, new_dir, new_dentry);
return 0;
-
-out_restore:
- (void)simple_offset_replace(old_ctx, old_dentry, old_index);
- (void)simple_offset_replace(new_ctx, new_dentry, new_index);
- return ret;
}
/**
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index b267ec580da9..58bf58b68955 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -10,6 +10,7 @@
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/quotaops.h>
@@ -1037,7 +1038,7 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
memset(bhs[i]->b_data, 0, osb->sb->s_blocksize);
eb = (struct ocfs2_extent_block *) bhs[i]->b_data;
/* Ok, setup the minimal stuff here. */
- strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
+ strscpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
eb->h_blkno = cpu_to_le64(first_blkno);
eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
eb->h_suballoc_slot =
@@ -3654,7 +3655,6 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
* So we use the new rightmost path.
*/
ocfs2_mv_path(right_path, left_path);
- left_path = NULL;
} else
ocfs2_complete_edge_insert(handle, left_path,
right_path, subtree_index);
@@ -6164,7 +6164,7 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
struct buffer_head *bh = NULL;
struct ocfs2_dinode *di;
struct ocfs2_truncate_log *tl;
- unsigned int tl_count;
+ unsigned int tl_count, tl_used;
inode = ocfs2_get_system_file_inode(osb,
TRUNCATE_LOG_SYSTEM_INODE,
@@ -6185,8 +6185,10 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
di = (struct ocfs2_dinode *)bh->b_data;
tl = &di->id2.i_dealloc;
tl_count = le16_to_cpu(tl->tl_count);
+ tl_used = le16_to_cpu(tl->tl_used);
if (unlikely(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) ||
- tl_count == 0)) {
+ tl_count == 0 ||
+ tl_used > tl_count)) {
status = -EFSCORRUPTED;
iput(inode);
brelse(bh);
@@ -6744,7 +6746,7 @@ static int ocfs2_reuse_blk_from_dealloc(handle_t *handle,
/* We can't guarantee that buffer head is still cached, so
* polutlate the extent block again.
*/
- strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
+ strscpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
eb->h_blkno = cpu_to_le64(bf->free_blk);
eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
eb->h_suballoc_slot = cpu_to_le16(real_slot);
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 8f714406528d..701d27d908d4 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -434,7 +434,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
BUG_ON(buffer_jbd(bh));
ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
+ if (unlikely(ocfs2_emergency_state(osb))) {
ret = -EROFS;
mlog_errno(ret);
goto out;
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index 2f61d39e4e50..6bc4e064ace4 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -4,6 +4,7 @@
*/
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/configfs.h>
@@ -590,7 +591,7 @@ static struct config_item *o2nm_node_group_make_item(struct config_group *group,
if (node == NULL)
return ERR_PTR(-ENOMEM);
- strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
+ strscpy(node->nd_name, name); /* use item.ci_namebuf instead? */
config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
spin_lock_init(&node->nd_lock);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 2785ff245e79..782afd9fa934 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -136,7 +136,7 @@ static void ocfs2_init_dir_trailer(struct inode *inode,
struct ocfs2_dir_block_trailer *trailer;
trailer = ocfs2_trailer_from_bh(bh, inode->i_sb);
- strcpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE);
+ strscpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE);
trailer->db_compat_rec_len =
cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer));
trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
@@ -2213,14 +2213,14 @@ static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode,
de->name_len = 1;
de->rec_len =
cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
- strcpy(de->name, ".");
+ strscpy(de->name, ".");
ocfs2_set_de_type(de, S_IFDIR);
de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1));
de->name_len = 2;
- strcpy(de->name, "..");
+ strscpy(de->name, "..");
ocfs2_set_de_type(de, S_IFDIR);
return de;
@@ -2378,7 +2378,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
memset(dx_root, 0, osb->sb->s_blocksize);
- strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
+ strscpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc);
dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
@@ -2454,7 +2454,7 @@ static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
dx_leaf = (struct ocfs2_dx_leaf *) bh->b_data;
memset(dx_leaf, 0, osb->sb->s_blocksize);
- strcpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE);
+ strscpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE);
dx_leaf->dl_fs_generation = cpu_to_le32(osb->fs_generation);
dx_leaf->dl_blkno = cpu_to_le64(bh->b_blocknr);
dx_leaf->dl_list.de_count =
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 21d797ccccd0..732c61599159 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -179,7 +179,7 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
file->f_path.dentry->d_name.name,
(unsigned long long)datasync);
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return -EROFS;
err = file_write_and_wait_range(file, start, end);
@@ -209,7 +209,7 @@ int ocfs2_should_update_atime(struct inode *inode,
struct timespec64 now;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return 0;
if ((inode->i_flags & S_NOATIME) ||
@@ -1136,6 +1136,12 @@ int ocfs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
attr->ia_valid & ATTR_GID ?
from_kgid(&init_user_ns, attr->ia_gid) : 0);
+ status = ocfs2_emergency_state(osb);
+ if (unlikely(status)) {
+ mlog_errno(status);
+ goto bail;
+ }
+
/* ensuring we don't even attempt to truncate a symlink */
if (S_ISLNK(inode->i_mode))
attr->ia_valid &= ~ATTR_SIZE;
@@ -1943,7 +1949,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
handle_t *handle;
unsigned long long max_off = inode->i_sb->s_maxbytes;
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return -EROFS;
inode_lock(inode);
@@ -2707,7 +2713,7 @@ static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
return -EINVAL;
if (!ocfs2_refcount_tree(osb))
return -EOPNOTSUPP;
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return -EROFS;
/* Lock both files against IO */
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 8340525e5589..b5fcc2725a29 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -1442,6 +1442,14 @@ int ocfs2_validate_inode_block(struct super_block *sb,
goto bail;
}
+ if ((!di->i_links_count && !di->i_links_count_hi) || !di->i_mode) {
+ mlog(ML_ERROR, "Invalid dinode #%llu: "
+ "Corrupt state (nlink = %u or mode = %u) detected!\n",
+ (unsigned long long)bh->b_blocknr,
+ ocfs2_read_links_count(di), le16_to_cpu(di->i_mode));
+ rc = -EFSCORRUPTED;
+ goto bail;
+ }
/*
* Errors after here are fatal.
*/
@@ -1604,8 +1612,7 @@ static int ocfs2_filecheck_repair_inode_block(struct super_block *sb,
trace_ocfs2_filecheck_repair_inode_block(
(unsigned long long)bh->b_blocknr);
- if (ocfs2_is_hard_readonly(OCFS2_SB(sb)) ||
- ocfs2_is_soft_readonly(OCFS2_SB(sb))) {
+ if (unlikely(ocfs2_emergency_state(OCFS2_SB(sb)))) {
mlog(ML_ERROR,
"Filecheck: cannot repair dinode #%llu "
"on readonly filesystem\n",
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index ce978a2497d9..99637e34d9da 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -909,7 +909,7 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
struct buffer_head *di_bh = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return -EROFS;
inode_lock(inode);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index c90b254da75e..4ec6dbed65a8 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -23,6 +23,7 @@
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/highmem.h>
#include <linux/quotaops.h>
#include <linux/iversion.h>
@@ -568,7 +569,7 @@ static int __ocfs2_mknod_locked(struct inode *dir,
ocfs2_set_links_count(fe, inode->i_nlink);
fe->i_last_eb_blk = 0;
- strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE);
+ strscpy(fe->i_signature, OCFS2_INODE_SIGNATURE);
fe->i_flags |= cpu_to_le32(OCFS2_VALID_FL);
ktime_get_coarse_real_ts64(&ts);
fe->i_atime = fe->i_ctime = fe->i_mtime =
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 6aaa94c554c1..7b50e03dfa66 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -680,6 +680,24 @@ static inline int ocfs2_is_soft_readonly(struct ocfs2_super *osb)
return ret;
}
+static inline int ocfs2_is_readonly(struct ocfs2_super *osb)
+{
+ int ret;
+ spin_lock(&osb->osb_lock);
+ ret = osb->osb_flags & (OCFS2_OSB_SOFT_RO | OCFS2_OSB_HARD_RO);
+ spin_unlock(&osb->osb_lock);
+
+ return ret;
+}
+
+static inline int ocfs2_emergency_state(struct ocfs2_super *osb)
+{
+ if (ocfs2_is_readonly(osb))
+ return -EROFS;
+
+ return 0;
+}
+
static inline int ocfs2_clusterinfo_valid(struct ocfs2_super *osb)
{
return (osb->s_feature_incompat &
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
index b0733c08ed13..ac3ec2c21119 100644
--- a/fs/ocfs2/resize.c
+++ b/fs/ocfs2/resize.c
@@ -276,7 +276,7 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters)
u32 first_new_cluster;
u64 lgd_blkno;
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return -EROFS;
if (new_clusters < 0)
@@ -466,7 +466,7 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
u16 cl_bpc;
u64 bg_ptr;
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return -EROFS;
main_bm_inode = ocfs2_get_system_file_inode(osb,
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index a28c127b9934..fca2fd07c881 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kmod.h>
@@ -670,7 +671,7 @@ static int __init ocfs2_stack_glue_init(void)
{
int ret;
- strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB);
+ strscpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB);
ocfs2_table_header = register_sysctl("fs/ocfs2/nm", ocfs2_nm_table);
if (!ocfs2_table_header) {
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 6ac4dcd54588..8e6e5235b30c 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/highmem.h>
#include <cluster/masklog.h>
@@ -372,7 +373,7 @@ static int ocfs2_block_group_fill(handle_t *handle,
}
memset(bg, 0, sb->s_blocksize);
- strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
+ strscpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
bg->bg_generation = cpu_to_le32(osb->fs_generation);
bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb, 1,
osb->s_feature_incompat));
@@ -1992,6 +1993,16 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
}
cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
+ if (!le16_to_cpu(cl->cl_next_free_rec) ||
+ le16_to_cpu(cl->cl_next_free_rec) > le16_to_cpu(cl->cl_count)) {
+ status = ocfs2_error(ac->ac_inode->i_sb,
+ "Chain allocator dinode %llu has invalid next "
+ "free chain record %u, but only %u total\n",
+ (unsigned long long)le64_to_cpu(fe->i_blkno),
+ le16_to_cpu(cl->cl_next_free_rec),
+ le16_to_cpu(cl->cl_count));
+ goto bail;
+ }
victim = ocfs2_find_victim_chain(cl);
ac->ac_chain = victim;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 2c7ba1480f7a..3cbafac50cd1 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -2487,7 +2487,7 @@ static int ocfs2_handle_error(struct super_block *sb)
rv = -EIO;
} else { /* default option */
rv = -EROFS;
- if (sb_rdonly(sb) && (ocfs2_is_soft_readonly(osb) || ocfs2_is_hard_readonly(osb)))
+ if (sb_rdonly(sb) && ocfs2_emergency_state(osb))
return rv;
pr_crit("OCFS2: File system is now read-only.\n");
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index dc1761e84814..1b21fbc16d73 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -49,9 +49,13 @@
#include "ocfs2_trace.h"
struct ocfs2_xattr_def_value_root {
- struct ocfs2_xattr_value_root xv;
- struct ocfs2_extent_rec er;
+ /* Must be last as it ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct ocfs2_xattr_value_root, xv, xr_list.l_recs,
+ struct ocfs2_extent_rec er;
+ );
};
+static_assert(offsetof(struct ocfs2_xattr_def_value_root, xv.xr_list.l_recs) ==
+ offsetof(struct ocfs2_xattr_def_value_root, er));
struct ocfs2_xattr_bucket {
/* The inode these xattrs are associated with */
@@ -971,13 +975,39 @@ static int ocfs2_xattr_ibody_list(struct inode *inode,
struct ocfs2_xattr_header *header = NULL;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
int ret = 0;
+ u16 xattr_count;
+ size_t max_entries;
+ u16 inline_size;
if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
return ret;
+ inline_size = le16_to_cpu(di->i_xattr_inline_size);
+
+ /* Validate inline size is reasonable */
+ if (inline_size > inode->i_sb->s_blocksize ||
+ inline_size < sizeof(struct ocfs2_xattr_header)) {
+ ocfs2_error(inode->i_sb,
+ "Invalid xattr inline size %u in inode %llu\n",
+ inline_size,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ return -EFSCORRUPTED;
+ }
+
header = (struct ocfs2_xattr_header *)
- ((void *)di + inode->i_sb->s_blocksize -
- le16_to_cpu(di->i_xattr_inline_size));
+ ((void *)di + inode->i_sb->s_blocksize - inline_size);
+
+ xattr_count = le16_to_cpu(header->xh_count);
+ max_entries = (inline_size - sizeof(struct ocfs2_xattr_header)) /
+ sizeof(struct ocfs2_xattr_entry);
+
+ if (xattr_count > max_entries) {
+ ocfs2_error(inode->i_sb,
+ "xattr entry count %u exceeds maximum %zu in inode %llu\n",
+ xattr_count, max_entries,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ return -EFSCORRUPTED;
+ }
ret = ocfs2_xattr_list_entries(inode, header, buffer, buffer_size);
diff --git a/fs/smb/common/smbdirect/smbdirect_socket.h b/fs/smb/common/smbdirect/smbdirect_socket.h
index 384b19177e1c..ee4c2726771a 100644
--- a/fs/smb/common/smbdirect/smbdirect_socket.h
+++ b/fs/smb/common/smbdirect/smbdirect_socket.h
@@ -133,6 +133,14 @@ struct smbdirect_socket {
struct smbdirect_socket_parameters parameters;
/*
+ * The state for connect/negotiation
+ */
+ struct {
+ spinlock_t lock;
+ struct work_struct work;
+ } connect;
+
+ /*
* The state for keepalive and timeout handling
*/
struct {
@@ -353,6 +361,10 @@ static __always_inline void smbdirect_socket_init(struct smbdirect_socket *sc)
INIT_WORK(&sc->disconnect_work, __smbdirect_socket_disabled_work);
disable_work_sync(&sc->disconnect_work);
+ spin_lock_init(&sc->connect.lock);
+ INIT_WORK(&sc->connect.work, __smbdirect_socket_disabled_work);
+ disable_work_sync(&sc->connect.work);
+
INIT_WORK(&sc->idle.immediate_work, __smbdirect_socket_disabled_work);
disable_work_sync(&sc->idle.immediate_work);
INIT_DELAYED_WORK(&sc->idle.timer_work, __smbdirect_socket_disabled_work);
diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
index 1c181ef99929..7d880ff34402 100644
--- a/fs/smb/server/mgmt/user_session.c
+++ b/fs/smb/server/mgmt/user_session.c
@@ -325,8 +325,10 @@ struct ksmbd_session *ksmbd_session_lookup_all(struct ksmbd_conn *conn,
sess = ksmbd_session_lookup(conn, id);
if (!sess && conn->binding)
sess = ksmbd_session_lookup_slowpath(id);
- if (sess && sess->state != SMB2_SESSION_VALID)
+ if (sess && sess->state != SMB2_SESSION_VALID) {
+ ksmbd_user_session_put(sess);
sess = NULL;
+ }
return sess;
}
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 27f87a13f20a..8aa483800014 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -2363,7 +2363,7 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
int rc = 0;
unsigned int next = 0;
- if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
+ if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + 1 +
le16_to_cpu(eabuf->EaValueLength))
return -EINVAL;
@@ -2440,7 +2440,7 @@ next:
break;
}
- if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
+ if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + 1 +
le16_to_cpu(eabuf->EaValueLength)) {
rc = -EINVAL;
break;
diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
index 5aa7a66334d9..05598d994a68 100644
--- a/fs/smb/server/smbacl.c
+++ b/fs/smb/server/smbacl.c
@@ -1307,9 +1307,6 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
granted |= le32_to_cpu(ace->access_req);
ace = (struct smb_ace *)((char *)ace + le16_to_cpu(ace->size));
}
-
- if (!pdacl->num_aces)
- granted = GENERIC_ALL_FLAGS;
}
if (!uid)
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index 4e7ab8d9314f..f585359684d4 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -242,6 +242,7 @@ static void smb_direct_disconnect_rdma_work(struct work_struct *work)
* disable[_delayed]_work_sync()
*/
disable_work(&sc->disconnect_work);
+ disable_work(&sc->connect.work);
disable_work(&sc->recv_io.posted.refill_work);
disable_delayed_work(&sc->idle.timer_work);
disable_work(&sc->idle.immediate_work);
@@ -297,6 +298,7 @@ smb_direct_disconnect_rdma_connection(struct smbdirect_socket *sc)
* not queued again but here we don't block and avoid
* disable[_delayed]_work_sync()
*/
+ disable_work(&sc->connect.work);
disable_work(&sc->recv_io.posted.refill_work);
disable_work(&sc->idle.immediate_work);
disable_delayed_work(&sc->idle.timer_work);
@@ -467,6 +469,7 @@ static void free_transport(struct smb_direct_transport *t)
*/
smb_direct_disconnect_wake_up_all(sc);
+ disable_work_sync(&sc->connect.work);
disable_work_sync(&sc->recv_io.posted.refill_work);
disable_delayed_work_sync(&sc->idle.timer_work);
disable_work_sync(&sc->idle.immediate_work);
@@ -635,28 +638,8 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
switch (sc->recv_io.expected) {
case SMBDIRECT_EXPECT_NEGOTIATE_REQ:
- if (wc->byte_len < sizeof(struct smbdirect_negotiate_req)) {
- put_recvmsg(sc, recvmsg);
- smb_direct_disconnect_rdma_connection(sc);
- return;
- }
- sc->recv_io.reassembly.full_packet_received = true;
- /*
- * Some drivers (at least mlx5_ib) might post a
- * recv completion before RDMA_CM_EVENT_ESTABLISHED,
- * we need to adjust our expectation in that case.
- */
- if (!sc->first_error && sc->status == SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING)
- sc->status = SMBDIRECT_SOCKET_NEGOTIATE_NEEDED;
- if (SMBDIRECT_CHECK_STATUS_WARN(sc, SMBDIRECT_SOCKET_NEGOTIATE_NEEDED)) {
- put_recvmsg(sc, recvmsg);
- smb_direct_disconnect_rdma_connection(sc);
- return;
- }
- sc->status = SMBDIRECT_SOCKET_NEGOTIATE_RUNNING;
- enqueue_reassembly(sc, recvmsg, 0);
- wake_up(&sc->status_wait);
- return;
+ /* see smb_direct_negotiate_recv_done */
+ break;
case SMBDIRECT_EXPECT_DATA_TRANSFER: {
struct smbdirect_data_transfer *data_transfer =
(struct smbdirect_data_transfer *)recvmsg->packet;
@@ -742,6 +725,126 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
smb_direct_disconnect_rdma_connection(sc);
}
+static void smb_direct_negotiate_recv_work(struct work_struct *work);
+
+static void smb_direct_negotiate_recv_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct smbdirect_recv_io *recv_io =
+ container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe);
+ struct smbdirect_socket *sc = recv_io->socket;
+ unsigned long flags;
+
+ /*
+ * reset the common recv_done for later reuse.
+ */
+ recv_io->cqe.done = recv_done;
+
+ if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
+ put_recvmsg(sc, recv_io);
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ pr_err("Negotiate Recv error. status='%s (%d)' opcode=%d\n",
+ ib_wc_status_msg(wc->status), wc->status,
+ wc->opcode);
+ smb_direct_disconnect_rdma_connection(sc);
+ }
+ return;
+ }
+
+ ksmbd_debug(RDMA, "Negotiate Recv completed. status='%s (%d)', opcode=%d\n",
+ ib_wc_status_msg(wc->status), wc->status,
+ wc->opcode);
+
+ ib_dma_sync_single_for_cpu(sc->ib.dev,
+ recv_io->sge.addr,
+ recv_io->sge.length,
+ DMA_FROM_DEVICE);
+
+ /*
+ * This is an internal error!
+ */
+ if (WARN_ON_ONCE(sc->recv_io.expected != SMBDIRECT_EXPECT_NEGOTIATE_REQ)) {
+ put_recvmsg(sc, recv_io);
+ smb_direct_disconnect_rdma_connection(sc);
+ return;
+ }
+
+ /*
+ * Don't reset timer to the keepalive interval in
+ * this will be done in smb_direct_negotiate_recv_work.
+ */
+
+ /*
+ * Only remember the recv_io if it has enough bytes,
+ * this gives smb_direct_negotiate_recv_work enough
+ * information in order to disconnect if it was not
+ * valid.
+ */
+ sc->recv_io.reassembly.full_packet_received = true;
+ if (wc->byte_len >= sizeof(struct smbdirect_negotiate_req))
+ enqueue_reassembly(sc, recv_io, 0);
+ else
+ put_recvmsg(sc, recv_io);
+
+ /*
+ * Some drivers (at least mlx5_ib and irdma in roce mode)
+ * might post a recv completion before RDMA_CM_EVENT_ESTABLISHED,
+ * we need to adjust our expectation in that case.
+ *
+ * So we defer further processing of the negotiation
+ * to smb_direct_negotiate_recv_work().
+ *
+ * If we are already in SMBDIRECT_SOCKET_NEGOTIATE_NEEDED
+ * we queue the work directly otherwise
+ * smb_direct_cm_handler() will do it, when
+ * RDMA_CM_EVENT_ESTABLISHED arrived.
+ */
+ spin_lock_irqsave(&sc->connect.lock, flags);
+ if (!sc->first_error) {
+ INIT_WORK(&sc->connect.work, smb_direct_negotiate_recv_work);
+ if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_NEEDED)
+ queue_work(sc->workqueue, &sc->connect.work);
+ }
+ spin_unlock_irqrestore(&sc->connect.lock, flags);
+}
+
+static void smb_direct_negotiate_recv_work(struct work_struct *work)
+{
+ struct smbdirect_socket *sc =
+ container_of(work, struct smbdirect_socket, connect.work);
+ const struct smbdirect_socket_parameters *sp = &sc->parameters;
+ struct smbdirect_recv_io *recv_io;
+
+ if (sc->first_error)
+ return;
+
+ ksmbd_debug(RDMA, "Negotiate Recv Work running\n");
+
+ /*
+ * Reset timer to the keepalive interval in
+ * order to trigger our next keepalive message.
+ */
+ sc->idle.keepalive = SMBDIRECT_KEEPALIVE_NONE;
+ mod_delayed_work(sc->workqueue, &sc->idle.timer_work,
+ msecs_to_jiffies(sp->keepalive_interval_msec));
+
+ /*
+ * If smb_direct_negotiate_recv_done() detected an
+ * invalid request we want to disconnect.
+ */
+ recv_io = get_first_reassembly(sc);
+ if (!recv_io) {
+ smb_direct_disconnect_rdma_connection(sc);
+ return;
+ }
+
+ if (SMBDIRECT_CHECK_STATUS_WARN(sc, SMBDIRECT_SOCKET_NEGOTIATE_NEEDED)) {
+ smb_direct_disconnect_rdma_connection(sc);
+ return;
+ }
+ sc->status = SMBDIRECT_SOCKET_NEGOTIATE_RUNNING;
+ wake_up(&sc->status_wait);
+}
+
static int smb_direct_post_recv(struct smbdirect_socket *sc,
struct smbdirect_recv_io *recvmsg)
{
@@ -758,7 +861,6 @@ static int smb_direct_post_recv(struct smbdirect_socket *sc,
return ret;
recvmsg->sge.length = sp->max_recv_size;
recvmsg->sge.lkey = sc->ib.pd->local_dma_lkey;
- recvmsg->cqe.done = recv_done;
wr.wr_cqe = &recvmsg->cqe;
wr.next = NULL;
@@ -1732,6 +1834,7 @@ static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
struct smbdirect_socket *sc = cm_id->context;
+ unsigned long flags;
ksmbd_debug(RDMA, "RDMA CM event. cm_id=%p event=%s (%d)\n",
cm_id, rdma_event_msg(event->event), event->event);
@@ -1739,18 +1842,27 @@ static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
switch (event->event) {
case RDMA_CM_EVENT_ESTABLISHED: {
/*
- * Some drivers (at least mlx5_ib) might post a
- * recv completion before RDMA_CM_EVENT_ESTABLISHED,
+ * Some drivers (at least mlx5_ib and irdma in roce mode)
+ * might post a recv completion before RDMA_CM_EVENT_ESTABLISHED,
* we need to adjust our expectation in that case.
*
- * As we already started the negotiation, we just
- * ignore RDMA_CM_EVENT_ESTABLISHED here.
+ * If smb_direct_negotiate_recv_done was called first
+ * it initialized sc->connect.work only for us to
+ * start, so that we turned into
+ * SMBDIRECT_SOCKET_NEGOTIATE_NEEDED, before
+ * smb_direct_negotiate_recv_work() runs.
+ *
+ * If smb_direct_negotiate_recv_done didn't happen
+ * yet. sc->connect.work is still be disabled and
+ * queue_work() is a no-op.
*/
- if (!sc->first_error && sc->status > SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING)
- break;
if (SMBDIRECT_CHECK_STATUS_DISCONNECT(sc, SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING))
break;
sc->status = SMBDIRECT_SOCKET_NEGOTIATE_NEEDED;
+ spin_lock_irqsave(&sc->connect.lock, flags);
+ if (!sc->first_error)
+ queue_work(sc->workqueue, &sc->connect.work);
+ spin_unlock_irqrestore(&sc->connect.lock, flags);
wake_up(&sc->status_wait);
break;
}
@@ -1921,6 +2033,7 @@ static int smb_direct_prepare_negotiation(struct smbdirect_socket *sc)
recvmsg = get_free_recvmsg(sc);
if (!recvmsg)
return -ENOMEM;
+ recvmsg->cqe.done = smb_direct_negotiate_recv_done;
ret = smb_direct_post_recv(sc, recvmsg);
if (ret) {
@@ -2339,6 +2452,7 @@ respond:
static int smb_direct_connect(struct smbdirect_socket *sc)
{
+ struct smbdirect_recv_io *recv_io;
int ret;
ret = smb_direct_init_params(sc);
@@ -2353,6 +2467,9 @@ static int smb_direct_connect(struct smbdirect_socket *sc)
return ret;
}
+ list_for_each_entry(recv_io, &sc->recv_io.free.list, list)
+ recv_io->cqe.done = recv_done;
+
ret = smb_direct_create_qpair(sc);
if (ret) {
pr_err("Can't accept RDMA client: %d\n", ret);
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 98b0eb966d91..f891344bd76b 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -702,7 +702,7 @@ retry:
rd.old_parent = NULL;
rd.new_parent = new_path.dentry;
rd.flags = flags;
- rd.delegated_inode = NULL,
+ rd.delegated_inode = NULL;
err = start_renaming_dentry(&rd, lookup_flags, old_child, &new_last);
if (err)
goto out_drop_write;
diff --git a/include/linux/args.h b/include/linux/args.h
index 2e8e65d975c7..0562dc51435e 100644
--- a/include/linux/args.h
+++ b/include/linux/args.h
@@ -6,9 +6,9 @@
/*
* How do these macros work?
*
- * In __COUNT_ARGS() _0 to _12 are just placeholders from the start
+ * In __COUNT_ARGS() _0 to _15 are just placeholders from the start
* in order to make sure _n is positioned over the correct number
- * from 12 to 0 (depending on X, which is a variadic argument list).
+ * from 15 to 0 (depending on X, which is a variadic argument list).
* They serve no purpose other than occupying a position. Since each
* macro parameter must have a distinct identifier, those identifiers
* are as good as any.
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 04ceeca12a0d..f5c9cf28c4dc 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -3247,7 +3247,7 @@ struct offset_ctx {
void simple_offset_init(struct offset_ctx *octx);
int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry);
void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry);
-int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
+void simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry);
int simple_offset_rename_exchange(struct inode *old_dir,
struct dentry *old_dentry,
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 952d3c8dd6b7..62f81bbeb490 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -730,22 +730,6 @@ static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsig
}
#endif
-static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- struct irq_domain_info info = {
- .fwnode = of_fwnode_handle(of_node),
- .hwirq_max = ~0U,
- .ops = ops,
- .host_data = host_data,
- };
- struct irq_domain *d;
-
- d = irq_domain_instantiate(&info);
- return IS_ERR(d) ? NULL : d;
-}
-
static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
unsigned int size,
const struct irq_domain_ops *ops,
diff --git a/include/linux/rseq_entry.h b/include/linux/rseq_entry.h
index c92167ff8a7f..a36b472627de 100644
--- a/include/linux/rseq_entry.h
+++ b/include/linux/rseq_entry.h
@@ -596,7 +596,7 @@ static __always_inline void rseq_exit_to_user_mode_legacy(void)
void __rseq_debug_syscall_return(struct pt_regs *regs);
-static inline void rseq_debug_syscall_return(struct pt_regs *regs)
+static __always_inline void rseq_debug_syscall_return(struct pt_regs *regs)
{
if (static_branch_unlikely(&rseq_debug_enabled))
__rseq_debug_syscall_return(regs);
diff --git a/include/trace/events/ceph.h b/include/trace/events/ceph.h
new file mode 100644
index 000000000000..08cb0659fbfc
--- /dev/null
+++ b/include/trace/events/ceph.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Ceph filesystem support module tracepoints
+ *
+ * Copyright (C) 2025 IONOS SE. All Rights Reserved.
+ * Written by Max Kellermann (max.kellermann@ionos.com)
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ceph
+
+#if !defined(_TRACE_CEPH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CEPH_H
+
+#include <linux/tracepoint.h>
+
+#define ceph_mdsc_suspend_reasons \
+ EM(ceph_mdsc_suspend_reason_no_mdsmap, "no-mdsmap") \
+ EM(ceph_mdsc_suspend_reason_no_active_mds, "no-active-mds") \
+ EM(ceph_mdsc_suspend_reason_rejected, "rejected") \
+ E_(ceph_mdsc_suspend_reason_session, "session")
+
+#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum ceph_mdsc_suspend_reason { ceph_mdsc_suspend_reasons } __mode(byte);
+
+#endif
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+ceph_mdsc_suspend_reasons;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
+
+TRACE_EVENT(ceph_mdsc_submit_request,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req),
+
+ TP_ARGS(mdsc, req),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ __field(u64, ino)
+ __field(u64, snap)
+ ),
+
+ TP_fast_assign(
+ struct inode *inode;
+
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+
+ inode = req->r_inode;
+ if (inode == NULL && req->r_dentry)
+ inode = d_inode(req->r_dentry);
+
+ if (inode) {
+ __entry->ino = ceph_ino(inode);
+ __entry->snap = ceph_snap(inode);
+ } else {
+ __entry->ino = __entry->snap = 0;
+ }
+ ),
+
+ TP_printk("R=%llu op=%s ino=%llx,%llx",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op),
+ __entry->ino, __entry->snap)
+);
+
+TRACE_EVENT(ceph_mdsc_suspend_request,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ struct ceph_mds_request *req,
+ enum ceph_mdsc_suspend_reason reason),
+
+ TP_ARGS(mdsc, session, req, reason),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ __field(int, mds)
+ __field(enum ceph_mdsc_suspend_reason, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+ __entry->mds = session ? session->s_mds : -1;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("R=%llu op=%s reason=%s",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op),
+ __print_symbolic(__entry->reason, ceph_mdsc_suspend_reasons))
+);
+
+TRACE_EVENT(ceph_mdsc_resume_request,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req),
+
+ TP_ARGS(mdsc, req),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ ),
+
+ TP_fast_assign(
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+ ),
+
+ TP_printk("R=%llu op=%s",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op))
+);
+
+TRACE_EVENT(ceph_mdsc_send_request,
+ TP_PROTO(struct ceph_mds_session *session,
+ struct ceph_mds_request *req),
+
+ TP_ARGS(session, req),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ __field(int, mds)
+ ),
+
+ TP_fast_assign(
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+ __entry->mds = session->s_mds;
+ ),
+
+ TP_printk("R=%llu op=%s mds=%d",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op),
+ __entry->mds)
+);
+
+TRACE_EVENT(ceph_mdsc_complete_request,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req),
+
+ TP_ARGS(mdsc, req),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ __field(int, err)
+ __field(unsigned long, latency_ns)
+ ),
+
+ TP_fast_assign(
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+ __entry->err = req->r_err;
+ __entry->latency_ns = req->r_end_latency - req->r_start_latency;
+ ),
+
+ TP_printk("R=%llu op=%s err=%d latency_ns=%lu",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op),
+ __entry->err,
+ __entry->latency_ns)
+);
+
+TRACE_EVENT(ceph_handle_caps,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ int op,
+ const struct ceph_vino *vino,
+ struct ceph_inode_info *inode,
+ u32 seq, u32 mseq, u32 issue_seq),
+
+ TP_ARGS(mdsc, session, op, vino, inode, seq, mseq, issue_seq),
+
+ TP_STRUCT__entry(
+ __field(int, mds)
+ __field(int, op)
+ __field(u64, ino)
+ __field(u64, snap)
+ __field(u32, seq)
+ __field(u32, mseq)
+ __field(u32, issue_seq)
+ ),
+
+ TP_fast_assign(
+ __entry->mds = session->s_mds;
+ __entry->op = op;
+ __entry->ino = vino->ino;
+ __entry->snap = vino->snap;
+ __entry->seq = seq;
+ __entry->mseq = mseq;
+ __entry->issue_seq = issue_seq;
+ ),
+
+ TP_printk("mds=%d op=%s vino=%llx.%llx seq=%u iseq=%u mseq=%u",
+ __entry->mds,
+ ceph_cap_op_name(__entry->op),
+ __entry->ino,
+ __entry->snap,
+ __entry->seq,
+ __entry->issue_seq,
+ __entry->mseq)
+);
+
+#undef EM
+#undef E_
+#endif /* _TRACE_CEPH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index a198e40c799b..150e5871e66f 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -71,7 +71,6 @@ __bpf_kfunc void css_rstat_updated(struct cgroup_subsys_state *css, int cpu)
{
struct llist_head *lhead;
struct css_rstat_cpu *rstatc;
- struct css_rstat_cpu __percpu *rstatc_pcpu;
struct llist_node *self;
/*
@@ -104,18 +103,22 @@ __bpf_kfunc void css_rstat_updated(struct cgroup_subsys_state *css, int cpu)
/*
* This function can be renentered by irqs and nmis for the same cgroup
* and may try to insert the same per-cpu lnode into the llist. Note
- * that llist_add() does not protect against such scenarios.
+ * that llist_add() does not protect against such scenarios. In addition
+ * this same per-cpu lnode can be modified through init_llist_node()
+ * from css_rstat_flush() running on a different CPU.
*
* To protect against such stacked contexts of irqs/nmis, we use the
* fact that lnode points to itself when not on a list and then use
- * this_cpu_cmpxchg() to atomically set to NULL to select the winner
+ * try_cmpxchg() to atomically set to NULL to select the winner
* which will call llist_add(). The losers can assume the insertion is
* successful and the winner will eventually add the per-cpu lnode to
* the llist.
+ *
+ * Please note that we can not use this_cpu_cmpxchg() here as on some
+ * archs it is not safe against modifications from multiple CPUs.
*/
self = &rstatc->lnode;
- rstatc_pcpu = css->rstat_cpu;
- if (this_cpu_cmpxchg(rstatc_pcpu->lnode.next, self, NULL) != self)
+ if (!try_cmpxchg(&rstatc->lnode.next, &self, NULL))
return;
lhead = ss_lhead_cpu(css->ss, cpu);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b674fdf96208..8df2d773fe3b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -249,6 +249,14 @@ err:
return ret;
}
+/*
+ * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
+ */
+static bool cpuhp_is_atomic_state(enum cpuhp_state state)
+{
+ return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
+}
+
#ifdef CONFIG_SMP
static bool cpuhp_is_ap_state(enum cpuhp_state state)
{
@@ -271,14 +279,6 @@ static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
complete(done);
}
-/*
- * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
- */
-static bool cpuhp_is_atomic_state(enum cpuhp_state state)
-{
- return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
-}
-
/* Synchronization state management */
enum cpuhp_sync_state {
SYNC_STATE_DEAD,
@@ -2364,7 +2364,14 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
else
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
#else
- ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
+ if (cpuhp_is_atomic_state(state)) {
+ guard(irqsave)();
+ ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
+ /* STARTING/DYING must not fail! */
+ WARN_ON_ONCE(ret);
+ } else {
+ ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
+ }
#endif
BUG_ON(ret && !bringup);
return ret;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ece716879cbc..dad0d3d2e85f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2317,8 +2317,6 @@ out:
perf_event__header_size(leader);
}
-static void sync_child_event(struct perf_event *child_event);
-
static void perf_child_detach(struct perf_event *event)
{
struct perf_event *parent_event = event->parent;
@@ -2337,7 +2335,6 @@ static void perf_child_detach(struct perf_event *event)
lockdep_assert_held(&parent_event->child_mutex);
*/
- sync_child_event(event);
list_del_init(&event->child_list);
}
@@ -4588,6 +4585,7 @@ out:
static void perf_remove_from_owner(struct perf_event *event);
static void perf_event_exit_event(struct perf_event *event,
struct perf_event_context *ctx,
+ struct task_struct *task,
bool revoke);
/*
@@ -4615,7 +4613,7 @@ static void perf_event_remove_on_exec(struct perf_event_context *ctx)
modified = true;
- perf_event_exit_event(event, ctx, false);
+ perf_event_exit_event(event, ctx, ctx->task, false);
}
raw_spin_lock_irqsave(&ctx->lock, flags);
@@ -12518,7 +12516,7 @@ static void __pmu_detach_event(struct pmu *pmu, struct perf_event *event,
/*
* De-schedule the event and mark it REVOKED.
*/
- perf_event_exit_event(event, ctx, true);
+ perf_event_exit_event(event, ctx, ctx->task, true);
/*
* All _free_event() bits that rely on event->pmu:
@@ -14075,14 +14073,13 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
}
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
-static void sync_child_event(struct perf_event *child_event)
+static void sync_child_event(struct perf_event *child_event,
+ struct task_struct *task)
{
struct perf_event *parent_event = child_event->parent;
u64 child_val;
if (child_event->attr.inherit_stat) {
- struct task_struct *task = child_event->ctx->task;
-
if (task && task != TASK_TOMBSTONE)
perf_event_read_event(child_event, task);
}
@@ -14101,7 +14098,9 @@ static void sync_child_event(struct perf_event *child_event)
static void
perf_event_exit_event(struct perf_event *event,
- struct perf_event_context *ctx, bool revoke)
+ struct perf_event_context *ctx,
+ struct task_struct *task,
+ bool revoke)
{
struct perf_event *parent_event = event->parent;
unsigned long detach_flags = DETACH_EXIT;
@@ -14124,6 +14123,9 @@ perf_event_exit_event(struct perf_event *event,
mutex_lock(&parent_event->child_mutex);
/* PERF_ATTACH_ITRACE might be set concurrently */
attach_state = READ_ONCE(event->attach_state);
+
+ if (attach_state & PERF_ATTACH_CHILD)
+ sync_child_event(event, task);
}
if (revoke)
@@ -14215,7 +14217,7 @@ static void perf_event_exit_task_context(struct task_struct *task, bool exit)
perf_event_task(task, ctx, 0);
list_for_each_entry_safe(child_event, next, &ctx->event_list, event_entry)
- perf_event_exit_event(child_event, ctx, false);
+ perf_event_exit_event(child_event, ctx, exit ? task : NULL, false);
mutex_unlock(&ctx->mutex);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index f11ceb8be8c4..d546d32390a8 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -79,7 +79,7 @@ struct uprobe {
* The generic code assumes that it has two members of unknown type
* owned by the arch-specific code:
*
- * insn - copy_insn() saves the original instruction here for
+ * insn - copy_insn() saves the original instruction here for
* arch_uprobe_analyze_insn().
*
* ixol - potentially modified instruction to execute out of
@@ -107,8 +107,8 @@ static LIST_HEAD(delayed_uprobe_list);
* allocated.
*/
struct xol_area {
- wait_queue_head_t wq; /* if all slots are busy */
- unsigned long *bitmap; /* 0 = free slot */
+ wait_queue_head_t wq; /* if all slots are busy */
+ unsigned long *bitmap; /* 0 = free slot */
struct page *page;
/*
@@ -116,7 +116,7 @@ struct xol_area {
* itself. The probed process or a naughty kernel module could make
* the vma go away, and we must handle that reasonably gracefully.
*/
- unsigned long vaddr; /* Page(s) of instruction slots */
+ unsigned long vaddr; /* Page(s) of instruction slots */
};
static void uprobe_warn(struct task_struct *t, const char *msg)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0bb29316b436..8b1b4c8a4f54 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -2470,6 +2470,9 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act)
if (retval < 0)
return retval;
+ if (!act->affinity)
+ act->affinity = cpu_online_mask;
+
retval = __setup_irq(irq, desc, act);
if (retval)
diff --git a/kernel/liveupdate/Kconfig b/kernel/liveupdate/Kconfig
index 9b2515f31afb..d2aeaf13c3ac 100644
--- a/kernel/liveupdate/Kconfig
+++ b/kernel/liveupdate/Kconfig
@@ -54,6 +54,7 @@ config KEXEC_HANDOVER_ENABLE_DEFAULT
config LIVEUPDATE
bool "Live Update Orchestrator"
depends on KEXEC_HANDOVER
+ depends on SHMEM
help
Enable the Live Update Orchestrator. Live Update is a mechanism,
typically based on kexec, that allows the kernel to be updated
diff --git a/kernel/liveupdate/luo_core.c b/kernel/liveupdate/luo_core.c
index f7ecaf7740d1..944663d99dd9 100644
--- a/kernel/liveupdate/luo_core.c
+++ b/kernel/liveupdate/luo_core.c
@@ -399,10 +399,8 @@ static long luo_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
int err;
nr = _IOC_NR(cmd);
- if (nr < LIVEUPDATE_CMD_BASE ||
- (nr - LIVEUPDATE_CMD_BASE) >= ARRAY_SIZE(luo_ioctl_ops)) {
+ if (nr - LIVEUPDATE_CMD_BASE >= ARRAY_SIZE(luo_ioctl_ops))
return -EINVAL;
- }
ucmd.ubuffer = (void __user *)arg;
err = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer);
diff --git a/kernel/liveupdate/luo_file.c b/kernel/liveupdate/luo_file.c
index ddff87917b21..a32a777f6df8 100644
--- a/kernel/liveupdate/luo_file.c
+++ b/kernel/liveupdate/luo_file.c
@@ -554,17 +554,20 @@ int luo_retrieve_file(struct luo_file_set *file_set, u64 token,
{
struct liveupdate_file_op_args args = {0};
struct luo_file *luo_file;
+ bool found = false;
int err;
if (list_empty(&file_set->files_list))
return -ENOENT;
list_for_each_entry(luo_file, &file_set->files_list, list) {
- if (luo_file->token == token)
+ if (luo_file->token == token) {
+ found = true;
break;
+ }
}
- if (luo_file->token != token)
+ if (!found)
return -ENOENT;
guard(mutex)(&luo_file->mutex);
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 05f5a49e9649..94164f2dec6d 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -41,6 +41,13 @@ static bool scx_init_task_enabled;
static bool scx_switching_all;
DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
+/*
+ * Tracks whether scx_enable() called scx_bypass(true). Used to balance bypass
+ * depth on enable failure. Will be removed when bypass depth is moved into the
+ * sched instance.
+ */
+static bool scx_bypassed_for_enable;
+
static atomic_long_t scx_nr_rejected = ATOMIC_LONG_INIT(0);
static atomic_long_t scx_hotplug_seq = ATOMIC_LONG_INIT(0);
@@ -975,6 +982,30 @@ static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p)
__scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
}
+static void local_dsq_post_enq(struct scx_dispatch_q *dsq, struct task_struct *p,
+ u64 enq_flags)
+{
+ struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
+ bool preempt = false;
+
+ /*
+ * If @rq is in balance, the CPU is already vacant and looking for the
+ * next task to run. No need to preempt or trigger resched after moving
+ * @p into its local DSQ.
+ */
+ if (rq->scx.flags & SCX_RQ_IN_BALANCE)
+ return;
+
+ if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
+ rq->curr->sched_class == &ext_sched_class) {
+ rq->curr->scx.slice = 0;
+ preempt = true;
+ }
+
+ if (preempt || sched_class_above(&ext_sched_class, rq->curr->sched_class))
+ resched_curr(rq);
+}
+
static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
struct task_struct *p, u64 enq_flags)
{
@@ -1086,22 +1117,10 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
if (enq_flags & SCX_ENQ_CLEAR_OPSS)
atomic_long_set_release(&p->scx.ops_state, SCX_OPSS_NONE);
- if (is_local) {
- struct rq *rq = container_of(dsq, struct rq, scx.local_dsq);
- bool preempt = false;
-
- if ((enq_flags & SCX_ENQ_PREEMPT) && p != rq->curr &&
- rq->curr->sched_class == &ext_sched_class) {
- rq->curr->scx.slice = 0;
- preempt = true;
- }
-
- if (preempt || sched_class_above(&ext_sched_class,
- rq->curr->sched_class))
- resched_curr(rq);
- } else {
+ if (is_local)
+ local_dsq_post_enq(dsq, p, enq_flags);
+ else
raw_spin_unlock(&dsq->lock);
- }
}
static void task_unlink_from_dsq(struct task_struct *p,
@@ -1625,6 +1644,8 @@ static void move_local_task_to_local_dsq(struct task_struct *p, u64 enq_flags,
dsq_mod_nr(dst_dsq, 1);
p->scx.dsq = dst_dsq;
+
+ local_dsq_post_enq(dst_dsq, p, enq_flags);
}
/**
@@ -2402,7 +2423,7 @@ static void put_prev_task_scx(struct rq *rq, struct task_struct *p,
* ops.enqueue() that @p is the only one available for this cpu,
* which should trigger an explicit follow-up scheduling event.
*/
- if (sched_class_above(&ext_sched_class, next->sched_class)) {
+ if (next && sched_class_above(&ext_sched_class, next->sched_class)) {
WARN_ON_ONCE(!(sch->ops.flags & SCX_OPS_ENQ_LAST));
do_enqueue_task(rq, p, SCX_ENQ_LAST, -1);
} else {
@@ -2425,7 +2446,7 @@ static struct task_struct *
do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
{
struct task_struct *prev = rq->curr;
- bool keep_prev, kick_idle = false;
+ bool keep_prev;
struct task_struct *p;
/* see kick_cpus_irq_workfn() */
@@ -2467,12 +2488,8 @@ do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
refill_task_slice_dfl(rcu_dereference_sched(scx_root), p);
} else {
p = first_local_task(rq);
- if (!p) {
- if (kick_idle)
- scx_kick_cpu(rcu_dereference_sched(scx_root),
- cpu_of(rq), SCX_KICK_IDLE);
+ if (!p)
return NULL;
- }
if (unlikely(!p->scx.slice)) {
struct scx_sched *sch = rcu_dereference_sched(scx_root);
@@ -3575,7 +3592,7 @@ static void scx_sched_free_rcu_work(struct work_struct *work)
int node;
irq_work_sync(&sch->error_irq_work);
- kthread_stop(sch->helper->task);
+ kthread_destroy_worker(sch->helper);
free_percpu(sch->pcpu);
@@ -4318,6 +4335,11 @@ static void scx_disable_workfn(struct kthread_work *work)
scx_dsp_max_batch = 0;
free_kick_syncs();
+ if (scx_bypassed_for_enable) {
+ scx_bypassed_for_enable = false;
+ scx_bypass(false);
+ }
+
mutex_unlock(&scx_enable_mutex);
WARN_ON_ONCE(scx_set_enable_state(SCX_DISABLED) != SCX_DISABLING);
@@ -4786,7 +4808,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
return sch;
err_stop_helper:
- kthread_stop(sch->helper->task);
+ kthread_destroy_worker(sch->helper);
err_free_pcpu:
free_percpu(sch->pcpu);
err_free_gdsqs:
@@ -4970,6 +4992,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
* Init in bypass mode to guarantee forward progress.
*/
scx_bypass(true);
+ scx_bypassed_for_enable = true;
for (i = SCX_OPI_NORMAL_BEGIN; i < SCX_OPI_NORMAL_END; i++)
if (((void (**)(void))ops)[i])
@@ -5067,6 +5090,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
scx_task_iter_stop(&sti);
percpu_up_write(&scx_fork_rwsem);
+ scx_bypassed_for_enable = false;
scx_bypass(false);
if (!scx_tryset_enable_state(SCX_ENABLED, SCX_ENABLING)) {
diff --git a/lib/bug.c b/lib/bug.c
index edd9041f89f3..623c467a8b76 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -173,6 +173,9 @@ struct bug_entry *find_bug(unsigned long bugaddr)
return module_find_bug(bugaddr);
}
+__diag_push();
+__diag_ignore(GCC, all, "-Wsuggest-attribute=format",
+ "Not a valid __printf() conversion candidate.");
static void __warn_printf(const char *fmt, struct pt_regs *regs)
{
if (!fmt)
@@ -192,6 +195,7 @@ static void __warn_printf(const char *fmt, struct pt_regs *regs)
printk("%s", fmt);
}
+__diag_pop();
static enum bug_trap_type __report_bug(struct bug_entry *bug, unsigned long bugaddr, struct pt_regs *regs)
{
@@ -262,7 +266,7 @@ enum bug_trap_type report_bug_entry(struct bug_entry *bug, struct pt_regs *regs)
bool rcu = false;
rcu = warn_rcu_enter();
- ret = __report_bug(bug, 0, regs);
+ ret = __report_bug(bug, bug_addr(bug), regs);
warn_rcu_exit(rcu);
return ret;
diff --git a/mm/shmem.c b/mm/shmem.c
index 3f194c9842a8..ec6c01378e9d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -4019,22 +4019,10 @@ static int shmem_whiteout(struct mnt_idmap *idmap,
whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
if (!whiteout)
return -ENOMEM;
-
error = shmem_mknod(idmap, old_dir, whiteout,
S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
dput(whiteout);
- if (error)
- return error;
-
- /*
- * Cheat and hash the whiteout while the old dentry is still in
- * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
- *
- * d_lookup() will consistently find one of them at this point,
- * not sure which one, but that isn't even important.
- */
- d_rehash(whiteout);
- return 0;
+ return error;
}
/*
@@ -4050,6 +4038,7 @@ static int shmem_rename2(struct mnt_idmap *idmap,
{
struct inode *inode = d_inode(old_dentry);
int they_are_dirs = S_ISDIR(inode->i_mode);
+ bool had_offset = false;
int error;
if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
@@ -4062,16 +4051,23 @@ static int shmem_rename2(struct mnt_idmap *idmap,
if (!simple_empty(new_dentry))
return -ENOTEMPTY;
+ error = simple_offset_add(shmem_get_offset_ctx(new_dir), new_dentry);
+ if (error == -EBUSY)
+ had_offset = true;
+ else if (unlikely(error))
+ return error;
+
if (flags & RENAME_WHITEOUT) {
error = shmem_whiteout(idmap, old_dir, old_dentry);
- if (error)
+ if (error) {
+ if (!had_offset)
+ simple_offset_remove(shmem_get_offset_ctx(new_dir),
+ new_dentry);
return error;
+ }
}
- error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry);
- if (error)
- return error;
-
+ simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry);
if (d_really_is_positive(new_dentry)) {
(void) shmem_unlink(new_dir, new_dentry);
if (they_are_dirs) {
@@ -5794,8 +5790,15 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
#define shmem_vm_ops generic_file_vm_ops
#define shmem_anon_vm_ops generic_file_vm_ops
#define shmem_file_operations ramfs_file_operations
-#define shmem_acct_size(flags, size) 0
-#define shmem_unacct_size(flags, size) do {} while (0)
+
+static inline int shmem_acct_size(unsigned long flags, loff_t size)
+{
+ return 0;
+}
+
+static inline void shmem_unacct_size(unsigned long flags, loff_t size)
+{
+}
static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
struct super_block *sb, struct inode *dir,
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6664ea73ccf8..3667319b949d 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1280,8 +1280,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
static struct ceph_osd *get_osd(struct ceph_osd *osd)
{
if (refcount_inc_not_zero(&osd->o_ref)) {
- dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
- refcount_read(&osd->o_ref));
+ dout("get_osd %p -> %d\n", osd, refcount_read(&osd->o_ref));
return osd;
} else {
dout("get_osd %p FAIL\n", osd);
@@ -1291,8 +1290,7 @@ static struct ceph_osd *get_osd(struct ceph_osd *osd)
static void put_osd(struct ceph_osd *osd)
{
- dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
- refcount_read(&osd->o_ref) - 1);
+ dout("put_osd %p -> %d\n", osd, refcount_read(&osd->o_ref) - 1);
if (refcount_dec_and_test(&osd->o_ref)) {
osd_cleanup(osd);
kfree(osd);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d245fa508e1c..34b3ab59602f 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -806,51 +806,49 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
ceph_decode_need(p, end, len, bad);
pool_end = *p + len;
+ ceph_decode_need(p, end, 4 + 4 + 4, bad);
pi->type = ceph_decode_8(p);
pi->size = ceph_decode_8(p);
pi->crush_ruleset = ceph_decode_8(p);
pi->object_hash = ceph_decode_8(p);
-
pi->pg_num = ceph_decode_32(p);
pi->pgp_num = ceph_decode_32(p);
- *p += 4 + 4; /* skip lpg* */
- *p += 4; /* skip last_change */
- *p += 8 + 4; /* skip snap_seq, snap_epoch */
+ /* lpg*, last_change, snap_seq, snap_epoch */
+ ceph_decode_skip_n(p, end, 8 + 4 + 8 + 4, bad);
/* skip snaps */
- num = ceph_decode_32(p);
+ ceph_decode_32_safe(p, end, num, bad);
while (num--) {
- *p += 8; /* snapid key */
- *p += 1 + 1; /* versions */
- len = ceph_decode_32(p);
- *p += len;
+ /* snapid key, pool snap (with versions) */
+ ceph_decode_skip_n(p, end, 8 + 2, bad);
+ ceph_decode_skip_string(p, end, bad);
}
- /* skip removed_snaps */
- num = ceph_decode_32(p);
- *p += num * (8 + 8);
+ /* removed_snaps */
+ ceph_decode_skip_map(p, end, 64, 64, bad);
+ ceph_decode_need(p, end, 8 + 8 + 4, bad);
*p += 8; /* skip auid */
pi->flags = ceph_decode_64(p);
*p += 4; /* skip crash_replay_interval */
if (ev >= 7)
- pi->min_size = ceph_decode_8(p);
+ ceph_decode_8_safe(p, end, pi->min_size, bad);
else
pi->min_size = pi->size - pi->size / 2;
if (ev >= 8)
- *p += 8 + 8; /* skip quota_max_* */
+ /* quota_max_* */
+ ceph_decode_skip_n(p, end, 8 + 8, bad);
if (ev >= 9) {
- /* skip tiers */
- num = ceph_decode_32(p);
- *p += num * 8;
+ /* tiers */
+ ceph_decode_skip_set(p, end, 64, bad);
+ ceph_decode_need(p, end, 8 + 1 + 8 + 8, bad);
*p += 8; /* skip tier_of */
*p += 1; /* skip cache_mode */
-
pi->read_tier = ceph_decode_64(p);
pi->write_tier = ceph_decode_64(p);
} else {
@@ -858,86 +856,76 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
pi->write_tier = -1;
}
- if (ev >= 10) {
- /* skip properties */
- num = ceph_decode_32(p);
- while (num--) {
- len = ceph_decode_32(p);
- *p += len; /* key */
- len = ceph_decode_32(p);
- *p += len; /* val */
- }
- }
+ if (ev >= 10)
+ /* properties */
+ ceph_decode_skip_map(p, end, string, string, bad);
if (ev >= 11) {
- /* skip hit_set_params */
- *p += 1 + 1; /* versions */
- len = ceph_decode_32(p);
- *p += len;
+ /* hit_set_params (with versions) */
+ ceph_decode_skip_n(p, end, 2, bad);
+ ceph_decode_skip_string(p, end, bad);
- *p += 4; /* skip hit_set_period */
- *p += 4; /* skip hit_set_count */
+ /* hit_set_period, hit_set_count */
+ ceph_decode_skip_n(p, end, 4 + 4, bad);
}
if (ev >= 12)
- *p += 4; /* skip stripe_width */
+ /* stripe_width */
+ ceph_decode_skip_32(p, end, bad);
- if (ev >= 13) {
- *p += 8; /* skip target_max_bytes */
- *p += 8; /* skip target_max_objects */
- *p += 4; /* skip cache_target_dirty_ratio_micro */
- *p += 4; /* skip cache_target_full_ratio_micro */
- *p += 4; /* skip cache_min_flush_age */
- *p += 4; /* skip cache_min_evict_age */
- }
+ if (ev >= 13)
+ /* target_max_*, cache_target_*, cache_min_* */
+ ceph_decode_skip_n(p, end, 16 + 8 + 8, bad);
- if (ev >= 14) {
- /* skip erasure_code_profile */
- len = ceph_decode_32(p);
- *p += len;
- }
+ if (ev >= 14)
+ /* erasure_code_profile */
+ ceph_decode_skip_string(p, end, bad);
/*
* last_force_op_resend_preluminous, will be overridden if the
* map was encoded with RESEND_ON_SPLIT
*/
if (ev >= 15)
- pi->last_force_request_resend = ceph_decode_32(p);
+ ceph_decode_32_safe(p, end, pi->last_force_request_resend, bad);
else
pi->last_force_request_resend = 0;
if (ev >= 16)
- *p += 4; /* skip min_read_recency_for_promote */
+ /* min_read_recency_for_promote */
+ ceph_decode_skip_32(p, end, bad);
if (ev >= 17)
- *p += 8; /* skip expected_num_objects */
+ /* expected_num_objects */
+ ceph_decode_skip_64(p, end, bad);
if (ev >= 19)
- *p += 4; /* skip cache_target_dirty_high_ratio_micro */
+ /* cache_target_dirty_high_ratio_micro */
+ ceph_decode_skip_32(p, end, bad);
if (ev >= 20)
- *p += 4; /* skip min_write_recency_for_promote */
+ /* min_write_recency_for_promote */
+ ceph_decode_skip_32(p, end, bad);
if (ev >= 21)
- *p += 1; /* skip use_gmt_hitset */
+ /* use_gmt_hitset */
+ ceph_decode_skip_8(p, end, bad);
if (ev >= 22)
- *p += 1; /* skip fast_read */
+ /* fast_read */
+ ceph_decode_skip_8(p, end, bad);
- if (ev >= 23) {
- *p += 4; /* skip hit_set_grade_decay_rate */
- *p += 4; /* skip hit_set_search_last_n */
- }
+ if (ev >= 23)
+ /* hit_set_grade_decay_rate, hit_set_search_last_n */
+ ceph_decode_skip_n(p, end, 4 + 4, bad);
if (ev >= 24) {
- /* skip opts */
- *p += 1 + 1; /* versions */
- len = ceph_decode_32(p);
- *p += len;
+ /* opts (with versions) */
+ ceph_decode_skip_n(p, end, 2, bad);
+ ceph_decode_skip_string(p, end, bad);
}
if (ev >= 25)
- pi->last_force_request_resend = ceph_decode_32(p);
+ ceph_decode_32_safe(p, end, pi->last_force_request_resend, bad);
/* ignore the rest */
@@ -1438,7 +1426,7 @@ static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end,
ceph_decode_32_safe(p, end, len, e_inval);
if (len == 0 && incremental)
return NULL; /* new_pg_temp: [] to remove */
- if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
+ if ((size_t)len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
return ERR_PTR(-EINVAL);
ceph_decode_need(p, end, len * sizeof(u32), e_inval);
@@ -1619,7 +1607,7 @@ static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end,
u32 len, i;
ceph_decode_32_safe(p, end, len, e_inval);
- if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
+ if ((size_t)len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
return ERR_PTR(-EINVAL);
ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index d58ca9655ab7..c0250244cf7a 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -7732,6 +7732,12 @@ sub process {
ERROR("MISSING_SENTINEL", "missing sentinel in ID array\n" . "$here\n$stat\n");
}
}
+
+# check for uninitialized pointers with __free attribute
+ while ($line =~ /\*\s*($Ident)\s+__free\s*\(\s*$Ident\s*\)\s*[,;]/g) {
+ ERROR("UNINITIALIZED_PTR_WITH_FREE",
+ "pointer '$1' with __free attribute should be initialized\n" . $herecurr);
+ }
}
# If we have no input at all, then there is nothing to report on
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 5f9ccab26e9a..90cf0e2969df 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -934,17 +934,12 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
#endif
if (page != dump->page) {
const unsigned int offset = pos % PAGE_SIZE;
- /*
- * Maybe kmap()/kunmap() should be used here.
- * But remove_arg_zero() uses kmap_atomic()/kunmap_atomic().
- * So do I.
- */
- char *kaddr = kmap_atomic(page);
+ char *kaddr = kmap_local_page(page);
dump->page = page;
memcpy(dump->data + offset, kaddr + offset,
PAGE_SIZE - offset);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
}
/* Same with put_arg_page(page) in fs/exec.c */
#ifdef CONFIG_MMU
diff --git a/tools/testing/selftests/sched_ext/runner.c b/tools/testing/selftests/sched_ext/runner.c
index aa2d7d32dda9..5748d2c69903 100644
--- a/tools/testing/selftests/sched_ext/runner.c
+++ b/tools/testing/selftests/sched_ext/runner.c
@@ -46,6 +46,14 @@ static void print_test_preamble(const struct scx_test *test, bool quiet)
if (!quiet)
printf("DESCRIPTION: %s\n", test->description);
printf("OUTPUT:\n");
+
+ /*
+ * The tests may fork with the preamble buffered
+ * in the children's stdout. Flush before the test
+ * to avoid printing the message multiple times.
+ */
+ fflush(stdout);
+ fflush(stderr);
}
static const char *status_to_result(enum scx_test_status status)