summaryrefslogtreecommitdiff
path: root/tools/testing
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing')
-rw-r--r--tools/testing/cxl/Kbuild5
-rw-r--r--tools/testing/cxl/test/cxl.c36
-rw-r--r--tools/testing/cxl/test/mem.c367
-rw-r--r--tools/testing/cxl/test/mock.c59
-rw-r--r--tools/testing/cxl/test/mock.h3
-rw-r--r--tools/testing/nvdimm/test/nfit.c6
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h29
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc58
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc26
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe.tc27
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc111
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc82
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-retval.tc44
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_insn_boundary.tc19
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc16
-rw-r--r--tools/testing/selftests/kvm/Makefile19
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c32
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c96
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_base.h1
-rw-r--r--tools/testing/selftests/kvm/include/memstress.h8
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c17
-rw-r--r--tools/testing/selftests/kvm/lib/memstress.c75
-rw-r--r--tools/testing/selftests/kvm/lib/userfaultfd_util.c4
-rw-r--r--tools/testing/selftests/kvm/s390x/cmma_test.c700
-rw-r--r--tools/testing/selftests/kvm/x86_64/cpuid_test.c21
-rw-r--r--tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c259
-rw-r--r--tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c22
-rw-r--r--tools/testing/selftests/net/config1
-rw-r--r--tools/testing/selftests/net/mptcp/config1
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh3
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh29
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c10
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh2
-rw-r--r--tools/testing/selftests/tty/.gitignore2
-rw-r--r--tools/testing/selftests/tty/Makefile5
-rw-r--r--tools/testing/selftests/tty/tty_tstamp_update.c88
-rwxr-xr-xtools/testing/selftests/wireguard/netns.sh30
40 files changed, 2042 insertions, 278 deletions
diff --git a/tools/testing/cxl/Kbuild b/tools/testing/cxl/Kbuild
index 6f9347ade82c..90f3c9802ffb 100644
--- a/tools/testing/cxl/Kbuild
+++ b/tools/testing/cxl/Kbuild
@@ -6,13 +6,13 @@ ldflags-y += --wrap=acpi_pci_find_root
ldflags-y += --wrap=nvdimm_bus_register
ldflags-y += --wrap=devm_cxl_port_enumerate_dports
ldflags-y += --wrap=devm_cxl_setup_hdm
-ldflags-y += --wrap=devm_cxl_enable_hdm
ldflags-y += --wrap=devm_cxl_add_passthrough_decoder
ldflags-y += --wrap=devm_cxl_enumerate_decoders
ldflags-y += --wrap=cxl_await_media_ready
ldflags-y += --wrap=cxl_hdm_decode_init
ldflags-y += --wrap=cxl_dvsec_rr_decode
-ldflags-y += --wrap=cxl_rcrb_to_component
+ldflags-y += --wrap=devm_cxl_add_rch_dport
+ldflags-y += --wrap=cxl_rcd_component_reg_phys
DRIVERS := ../../../drivers
CXL_SRC := $(DRIVERS)/cxl
@@ -57,6 +57,7 @@ cxl_core-y += $(CXL_CORE_SRC)/memdev.o
cxl_core-y += $(CXL_CORE_SRC)/mbox.o
cxl_core-y += $(CXL_CORE_SRC)/pci.o
cxl_core-y += $(CXL_CORE_SRC)/hdm.o
+cxl_core-y += $(CXL_CORE_SRC)/pmu.o
cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
cxl_core-y += config_check.o
diff --git a/tools/testing/cxl/test/cxl.c b/tools/testing/cxl/test/cxl.c
index bf00dc52fe96..0e78d8e19895 100644
--- a/tools/testing/cxl/test/cxl.c
+++ b/tools/testing/cxl/test/cxl.c
@@ -713,7 +713,7 @@ static void default_mock_decoder(struct cxl_decoder *cxld)
cxld->interleave_ways = 1;
cxld->interleave_granularity = 256;
- cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->commit = mock_decoder_commit;
cxld->reset = mock_decoder_reset;
}
@@ -754,7 +754,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
/* check is endpoint is attach to host-bridge0 */
port = cxled_to_port(cxled);
do {
- if (port->uport == &cxl_host_bridge[0]->dev) {
+ if (port->uport_dev == &cxl_host_bridge[0]->dev) {
hb0 = true;
break;
}
@@ -787,7 +787,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
cxld->interleave_ways = 2;
eig_to_granularity(window->granularity, &cxld->interleave_granularity);
- cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->flags = CXL_DECODER_F_ENABLE;
cxled->state = CXL_DECODER_STATE_AUTO;
port->commit_end = cxld->id;
@@ -820,7 +820,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
} else
cxlsd->target[0] = dport;
cxld = &cxlsd->cxld;
- cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->target_type = CXL_DECODER_HOSTONLYMEM;
cxld->flags = CXL_DECODER_F_ENABLE;
iter->commit_end = 0;
/*
@@ -889,7 +889,7 @@ static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
mock_init_hdm_decoder(cxld);
if (target_count) {
- rc = device_for_each_child(port->uport, &ctx,
+ rc = device_for_each_child(port->uport_dev, &ctx,
map_targets);
if (rc) {
put_device(&cxld->dev);
@@ -919,29 +919,29 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
int i, array_size;
if (port->depth == 1) {
- if (is_multi_bridge(port->uport)) {
+ if (is_multi_bridge(port->uport_dev)) {
array_size = ARRAY_SIZE(cxl_root_port);
array = cxl_root_port;
- } else if (is_single_bridge(port->uport)) {
+ } else if (is_single_bridge(port->uport_dev)) {
array_size = ARRAY_SIZE(cxl_root_single);
array = cxl_root_single;
} else {
dev_dbg(&port->dev, "%s: unknown bridge type\n",
- dev_name(port->uport));
+ dev_name(port->uport_dev));
return -ENXIO;
}
} else if (port->depth == 2) {
struct cxl_port *parent = to_cxl_port(port->dev.parent);
- if (is_multi_bridge(parent->uport)) {
+ if (is_multi_bridge(parent->uport_dev)) {
array_size = ARRAY_SIZE(cxl_switch_dport);
array = cxl_switch_dport;
- } else if (is_single_bridge(parent->uport)) {
+ } else if (is_single_bridge(parent->uport_dev)) {
array_size = ARRAY_SIZE(cxl_swd_single);
array = cxl_swd_single;
} else {
dev_dbg(&port->dev, "%s: unknown bridge type\n",
- dev_name(port->uport));
+ dev_name(port->uport_dev));
return -ENXIO;
}
} else {
@@ -954,9 +954,9 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
struct platform_device *pdev = array[i];
struct cxl_dport *dport;
- if (pdev->dev.parent != port->uport) {
+ if (pdev->dev.parent != port->uport_dev) {
dev_dbg(&port->dev, "%s: mismatch parent %s\n",
- dev_name(port->uport),
+ dev_name(port->uport_dev),
dev_name(pdev->dev.parent));
continue;
}
@@ -971,15 +971,6 @@ static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
return 0;
}
-resource_size_t mock_cxl_rcrb_to_component(struct device *dev,
- resource_size_t rcrb,
- enum cxl_rcrb which)
-{
- dev_dbg(dev, "rcrb: %pa which: %d\n", &rcrb, which);
-
- return (resource_size_t) which + 1;
-}
-
static struct cxl_mock_ops cxl_mock_ops = {
.is_mock_adev = is_mock_adev,
.is_mock_bridge = is_mock_bridge,
@@ -988,7 +979,6 @@ static struct cxl_mock_ops cxl_mock_ops = {
.is_mock_dev = is_mock_dev,
.acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
.acpi_evaluate_integer = mock_acpi_evaluate_integer,
- .cxl_rcrb_to_component = mock_cxl_rcrb_to_component,
.acpi_pci_find_root = mock_acpi_pci_find_root,
.devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
.devm_cxl_setup_hdm = mock_cxl_setup_hdm,
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 34b48027b3de..464fc39ed277 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -8,11 +8,14 @@
#include <linux/sizes.h>
#include <linux/bits.h>
#include <asm/unaligned.h>
+#include <crypto/sha2.h>
#include <cxlmem.h>
#include "trace.h"
#define LSA_SIZE SZ_128K
+#define FW_SIZE SZ_64M
+#define FW_SLOTS 3
#define DEV_SIZE SZ_2G
#define EFFECT(x) (1U << x)
@@ -21,42 +24,70 @@
static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
+enum cxl_command_effects {
+ CONF_CHANGE_COLD_RESET = 0,
+ CONF_CHANGE_IMMEDIATE,
+ DATA_CHANGE_IMMEDIATE,
+ POLICY_CHANGE_IMMEDIATE,
+ LOG_CHANGE_IMMEDIATE,
+ SECURITY_CHANGE_IMMEDIATE,
+ BACKGROUND_OP,
+ SECONDARY_MBOX_SUPPORTED,
+};
+
+#define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
+
static struct cxl_cel_entry mock_cel[] = {
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
- .effect = cpu_to_le16(0),
+ .effect = CXL_CMD_EFFECT_NONE,
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
- .effect = cpu_to_le16(0),
+ .effect = CXL_CMD_EFFECT_NONE,
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
- .effect = cpu_to_le16(0),
+ .effect = CXL_CMD_EFFECT_NONE,
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
- .effect = cpu_to_le16(0),
+ .effect = CXL_CMD_EFFECT_NONE,
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
- .effect = cpu_to_le16(EFFECT(1) | EFFECT(2)),
+ .effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
+ EFFECT(DATA_CHANGE_IMMEDIATE)),
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
- .effect = cpu_to_le16(0),
+ .effect = CXL_CMD_EFFECT_NONE,
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
- .effect = cpu_to_le16(0),
+ .effect = CXL_CMD_EFFECT_NONE,
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
- .effect = cpu_to_le16(0),
+ .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
},
{
.opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
- .effect = cpu_to_le16(0),
+ .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
+ .effect = CXL_CMD_EFFECT_NONE,
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
+ .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
+ EFFECT(BACKGROUND_OP)),
+ },
+ {
+ .opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
+ .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
+ EFFECT(CONF_CHANGE_IMMEDIATE)),
},
};
@@ -102,13 +133,17 @@ struct mock_event_log {
};
struct mock_event_store {
- struct cxl_dev_state *cxlds;
+ struct cxl_memdev_state *mds;
struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
u32 ev_status;
};
struct cxl_mockmem_data {
void *lsa;
+ void *fw;
+ int fw_slot;
+ int fw_staged;
+ size_t fw_size;
u32 security_state;
u8 user_pass[NVDIMM_PASSPHRASE_LEN];
u8 master_pass[NVDIMM_PASSPHRASE_LEN];
@@ -180,8 +215,7 @@ static void mes_add_event(struct mock_event_store *mes,
log->nr_events++;
}
-static int mock_get_event(struct cxl_dev_state *cxlds,
- struct cxl_mbox_cmd *cmd)
+static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
{
struct cxl_get_event_payload *pl;
struct mock_event_log *log;
@@ -201,7 +235,7 @@ static int mock_get_event(struct cxl_dev_state *cxlds,
memset(cmd->payload_out, 0, cmd->size_out);
- log = event_find_log(cxlds->dev, log_type);
+ log = event_find_log(dev, log_type);
if (!log || event_log_empty(log))
return 0;
@@ -234,8 +268,7 @@ static int mock_get_event(struct cxl_dev_state *cxlds,
return 0;
}
-static int mock_clear_event(struct cxl_dev_state *cxlds,
- struct cxl_mbox_cmd *cmd)
+static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
struct mock_event_log *log;
@@ -246,7 +279,7 @@ static int mock_clear_event(struct cxl_dev_state *cxlds,
if (log_type >= CXL_EVENT_TYPE_MAX)
return -EINVAL;
- log = event_find_log(cxlds->dev, log_type);
+ log = event_find_log(dev, log_type);
if (!log)
return 0; /* No mock data in this log */
@@ -256,7 +289,7 @@ static int mock_clear_event(struct cxl_dev_state *cxlds,
* However, this is not good behavior for the host so test it.
*/
if (log->clear_idx + pl->nr_recs > log->cur_idx) {
- dev_err(cxlds->dev,
+ dev_err(dev,
"Attempting to clear more events than returned!\n");
return -EINVAL;
}
@@ -266,7 +299,7 @@ static int mock_clear_event(struct cxl_dev_state *cxlds,
nr < pl->nr_recs;
nr++, handle++) {
if (handle != le16_to_cpu(pl->handles[nr])) {
- dev_err(cxlds->dev, "Clearing events out of order\n");
+ dev_err(dev, "Clearing events out of order\n");
return -EINVAL;
}
}
@@ -293,7 +326,7 @@ static void cxl_mock_event_trigger(struct device *dev)
event_reset_log(log);
}
- cxl_mem_get_event_records(mes->cxlds, mes->ev_status);
+ cxl_mem_get_event_records(mes->mds, mes->ev_status);
}
struct cxl_event_record_raw maint_needed = {
@@ -453,7 +486,7 @@ static int mock_gsl(struct cxl_mbox_cmd *cmd)
return 0;
}
-static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_get_log *gl = cmd->payload_in;
u32 offset = le32_to_cpu(gl->offset);
@@ -463,7 +496,7 @@ static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
if (cmd->size_in < sizeof(*gl))
return -EINVAL;
- if (length > cxlds->payload_size)
+ if (length > mds->payload_size)
return -EINVAL;
if (offset + length > sizeof(mock_cel))
return -EINVAL;
@@ -477,7 +510,7 @@ static int mock_get_log(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
return 0;
}
-static int mock_rcd_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_identify id = {
.fw_revision = { "mock fw v1 " },
@@ -495,7 +528,7 @@ static int mock_rcd_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
return 0;
}
-static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_id(struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_identify id = {
.fw_revision = { "mock fw v1 " },
@@ -517,8 +550,7 @@ static int mock_id(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
return 0;
}
-static int mock_partition_info(struct cxl_dev_state *cxlds,
- struct cxl_mbox_cmd *cmd)
+static int mock_partition_info(struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_get_partition_info pi = {
.active_volatile_cap =
@@ -535,11 +567,52 @@ static int mock_partition_info(struct cxl_dev_state *cxlds,
return 0;
}
-static int mock_get_security_state(struct cxl_dev_state *cxlds,
- struct cxl_mbox_cmd *cmd)
+static int mock_sanitize(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
+ if (cmd->size_in != 0)
+ return -EINVAL;
+
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ return 0; /* assume less than 2 secs, no bg */
+}
+
+static int mock_secure_erase(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ if (cmd->size_in != 0)
+ return -EINVAL;
+ if (cmd->size_out != 0)
+ return -EINVAL;
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
+ cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int mock_get_security_state(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
if (cmd->size_in)
return -EINVAL;
@@ -569,9 +642,9 @@ static void user_plimit_check(struct cxl_mockmem_data *mdata)
mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
}
-static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
struct cxl_set_pass *set_pass;
if (cmd->size_in != sizeof(*set_pass))
@@ -629,9 +702,9 @@ static int mock_set_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd
return -EINVAL;
}
-static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
struct cxl_disable_pass *dis_pass;
if (cmd->size_in != sizeof(*dis_pass))
@@ -700,10 +773,9 @@ static int mock_disable_passphrase(struct cxl_dev_state *cxlds, struct cxl_mbox_
return 0;
}
-static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_freeze_security(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
-
if (cmd->size_in != 0)
return -EINVAL;
@@ -717,10 +789,9 @@ static int mock_freeze_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd
return 0;
}
-static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_unlock_security(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
-
if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
return -EINVAL;
@@ -759,10 +830,9 @@ static int mock_unlock_security(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd
return 0;
}
-static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds,
+static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
struct cxl_mbox_cmd *cmd)
{
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
struct cxl_pass_erase *erase;
if (cmd->size_in != sizeof(*erase))
@@ -858,10 +928,10 @@ static int mock_passphrase_secure_erase(struct cxl_dev_state *cxlds,
return 0;
}
-static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_get_lsa(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
void *lsa = mdata->lsa;
u32 offset, length;
@@ -878,10 +948,10 @@ static int mock_get_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
return 0;
}
-static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_set_lsa(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
- struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
void *lsa = mdata->lsa;
u32 offset, length;
@@ -896,8 +966,7 @@ static int mock_set_lsa(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
return 0;
}
-static int mock_health_info(struct cxl_dev_state *cxlds,
- struct cxl_mbox_cmd *cmd)
+static int mock_health_info(struct cxl_mbox_cmd *cmd)
{
struct cxl_mbox_health_info health_info = {
/* set flags for maint needed, perf degraded, hw replacement */
@@ -1114,9 +1183,90 @@ static struct attribute *cxl_mock_mem_core_attrs[] = {
};
ATTRIBUTE_GROUPS(cxl_mock_mem_core);
-static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
+static int mock_fw_info(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_get_fw_info fw_info = {
+ .num_slots = FW_SLOTS,
+ .slot_info = (mdata->fw_slot & 0x7) |
+ ((mdata->fw_staged & 0x7) << 3),
+ .activation_cap = 0,
+ };
+
+ strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
+ strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
+ strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
+ strcpy(fw_info.slot_4_revision, "");
+
+ if (cmd->size_out < sizeof(fw_info))
+ return -EINVAL;
+
+ memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
+ return 0;
+}
+
+static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
{
+ struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
+ void *fw = mdata->fw;
+ size_t offset, length;
+
+ offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
+ length = cmd->size_in - sizeof(*transfer);
+ if (offset + length > FW_SIZE)
+ return -EINVAL;
+
+ switch (transfer->action) {
+ case CXL_FW_TRANSFER_ACTION_FULL:
+ if (offset != 0)
+ return -EINVAL;
+ fallthrough;
+ case CXL_FW_TRANSFER_ACTION_END:
+ if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
+ return -EINVAL;
+ mdata->fw_size = offset + length;
+ break;
+ case CXL_FW_TRANSFER_ACTION_INITIATE:
+ case CXL_FW_TRANSFER_ACTION_CONTINUE:
+ break;
+ case CXL_FW_TRANSFER_ACTION_ABORT:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(fw + offset, transfer->data, length);
+ return 0;
+}
+
+static int mock_activate_fw(struct cxl_mockmem_data *mdata,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_mbox_activate_fw *activate = cmd->payload_in;
+
+ if (activate->slot == 0 || activate->slot > FW_SLOTS)
+ return -EINVAL;
+
+ switch (activate->action) {
+ case CXL_FW_ACTIVATE_ONLINE:
+ mdata->fw_slot = activate->slot;
+ mdata->fw_staged = 0;
+ return 0;
+ case CXL_FW_ACTIVATE_OFFLINE:
+ mdata->fw_staged = activate->slot;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
+ struct cxl_mbox_cmd *cmd)
+{
+ struct cxl_dev_state *cxlds = &mds->cxlds;
struct device *dev = cxlds->dev;
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
int rc = -EIO;
switch (cmd->opcode) {
@@ -1127,49 +1277,55 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *
rc = mock_gsl(cmd);
break;
case CXL_MBOX_OP_GET_LOG:
- rc = mock_get_log(cxlds, cmd);
+ rc = mock_get_log(mds, cmd);
break;
case CXL_MBOX_OP_IDENTIFY:
if (cxlds->rcd)
- rc = mock_rcd_id(cxlds, cmd);
+ rc = mock_rcd_id(cmd);
else
- rc = mock_id(cxlds, cmd);
+ rc = mock_id(cmd);
break;
case CXL_MBOX_OP_GET_LSA:
- rc = mock_get_lsa(cxlds, cmd);
+ rc = mock_get_lsa(mdata, cmd);
break;
case CXL_MBOX_OP_GET_PARTITION_INFO:
- rc = mock_partition_info(cxlds, cmd);
+ rc = mock_partition_info(cmd);
break;
case CXL_MBOX_OP_GET_EVENT_RECORD:
- rc = mock_get_event(cxlds, cmd);
+ rc = mock_get_event(dev, cmd);
break;
case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
- rc = mock_clear_event(cxlds, cmd);
+ rc = mock_clear_event(dev, cmd);
break;
case CXL_MBOX_OP_SET_LSA:
- rc = mock_set_lsa(cxlds, cmd);
+ rc = mock_set_lsa(mdata, cmd);
break;
case CXL_MBOX_OP_GET_HEALTH_INFO:
- rc = mock_health_info(cxlds, cmd);
+ rc = mock_health_info(cmd);
+ break;
+ case CXL_MBOX_OP_SANITIZE:
+ rc = mock_sanitize(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_SECURE_ERASE:
+ rc = mock_secure_erase(mdata, cmd);
break;
case CXL_MBOX_OP_GET_SECURITY_STATE:
- rc = mock_get_security_state(cxlds, cmd);
+ rc = mock_get_security_state(mdata, cmd);
break;
case CXL_MBOX_OP_SET_PASSPHRASE:
- rc = mock_set_passphrase(cxlds, cmd);
+ rc = mock_set_passphrase(mdata, cmd);
break;
case CXL_MBOX_OP_DISABLE_PASSPHRASE:
- rc = mock_disable_passphrase(cxlds, cmd);
+ rc = mock_disable_passphrase(mdata, cmd);
break;
case CXL_MBOX_OP_FREEZE_SECURITY:
- rc = mock_freeze_security(cxlds, cmd);
+ rc = mock_freeze_security(mdata, cmd);
break;
case CXL_MBOX_OP_UNLOCK:
- rc = mock_unlock_security(cxlds, cmd);
+ rc = mock_unlock_security(mdata, cmd);
break;
case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
- rc = mock_passphrase_secure_erase(cxlds, cmd);
+ rc = mock_passphrase_secure_erase(mdata, cmd);
break;
case CXL_MBOX_OP_GET_POISON:
rc = mock_get_poison(cxlds, cmd);
@@ -1180,6 +1336,15 @@ static int cxl_mock_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *
case CXL_MBOX_OP_CLEAR_POISON:
rc = mock_clear_poison(cxlds, cmd);
break;
+ case CXL_MBOX_OP_GET_FW_INFO:
+ rc = mock_fw_info(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_TRANSFER_FW:
+ rc = mock_transfer_fw(mdata, cmd);
+ break;
+ case CXL_MBOX_OP_ACTIVATE_FW:
+ rc = mock_activate_fw(mdata, cmd);
+ break;
default:
break;
}
@@ -1195,6 +1360,11 @@ static void label_area_release(void *lsa)
vfree(lsa);
}
+static void fw_buf_release(void *buf)
+{
+ vfree(buf);
+}
+
static bool is_rcd(struct platform_device *pdev)
{
const struct platform_device_id *id = platform_get_device_id(pdev);
@@ -1215,6 +1385,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cxl_memdev *cxlmd;
+ struct cxl_memdev_state *mds;
struct cxl_dev_state *cxlds;
struct cxl_mockmem_data *mdata;
int rc;
@@ -1227,52 +1398,67 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
mdata->lsa = vmalloc(LSA_SIZE);
if (!mdata->lsa)
return -ENOMEM;
+ mdata->fw = vmalloc(FW_SIZE);
+ if (!mdata->fw)
+ return -ENOMEM;
+ mdata->fw_slot = 2;
+
rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
if (rc)
return rc;
- cxlds = cxl_dev_state_create(dev);
- if (IS_ERR(cxlds))
- return PTR_ERR(cxlds);
+ rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
+ if (rc)
+ return rc;
+
+ mds = cxl_memdev_state_create(dev);
+ if (IS_ERR(mds))
+ return PTR_ERR(mds);
+ mds->mbox_send = cxl_mock_mbox_send;
+ mds->payload_size = SZ_4K;
+ mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
+
+ cxlds = &mds->cxlds;
cxlds->serial = pdev->id;
- cxlds->mbox_send = cxl_mock_mbox_send;
- cxlds->payload_size = SZ_4K;
- cxlds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
if (is_rcd(pdev)) {
cxlds->rcd = true;
cxlds->component_reg_phys = CXL_RESOURCE_NONE;
}
- rc = cxl_enumerate_cmds(cxlds);
+ rc = cxl_enumerate_cmds(mds);
if (rc)
return rc;
- rc = cxl_poison_state_init(cxlds);
+ rc = cxl_poison_state_init(mds);
if (rc)
return rc;
- rc = cxl_set_timestamp(cxlds);
+ rc = cxl_set_timestamp(mds);
if (rc)
return rc;
cxlds->media_ready = true;
- rc = cxl_dev_state_identify(cxlds);
+ rc = cxl_dev_state_identify(mds);
if (rc)
return rc;
- rc = cxl_mem_create_range_info(cxlds);
+ rc = cxl_mem_create_range_info(mds);
if (rc)
return rc;
- mdata->mes.cxlds = cxlds;
+ mdata->mes.mds = mds;
cxl_mock_add_event_logs(&mdata->mes);
cxlmd = devm_cxl_add_memdev(cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- cxl_mem_get_event_records(cxlds, CXLDEV_EVENT_STATUS_ALL);
+ rc = cxl_memdev_setup_fw_upload(mds);
+ if (rc)
+ return rc;
+
+ cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
return 0;
}
@@ -1310,9 +1496,40 @@ static ssize_t security_lock_store(struct device *dev, struct device_attribute *
static DEVICE_ATTR_RW(security_lock);
+static ssize_t fw_buf_checksum_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+ u8 hash[SHA256_DIGEST_SIZE];
+ unsigned char *hstr, *hptr;
+ struct sha256_state sctx;
+ ssize_t written = 0;
+ int i;
+
+ sha256_init(&sctx);
+ sha256_update(&sctx, mdata->fw, mdata->fw_size);
+ sha256_final(&sctx, hash);
+
+ hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
+ if (!hstr)
+ return -ENOMEM;
+
+ hptr = hstr;
+ for (i = 0; i < SHA256_DIGEST_SIZE; i++)
+ hptr += sprintf(hptr, "%02x", hash[i]);
+
+ written = sysfs_emit(buf, "%s\n", hstr);
+
+ kfree(hstr);
+ return written;
+}
+
+static DEVICE_ATTR_RO(fw_buf_checksum);
+
static struct attribute *cxl_mock_mem_attrs[] = {
&dev_attr_security_lock.attr,
&dev_attr_event_trigger.attr,
+ &dev_attr_fw_buf_checksum.attr,
NULL
};
ATTRIBUTE_GROUPS(cxl_mock_mem);
diff --git a/tools/testing/cxl/test/mock.c b/tools/testing/cxl/test/mock.c
index 284416527644..1a61e68e3095 100644
--- a/tools/testing/cxl/test/mock.c
+++ b/tools/testing/cxl/test/mock.c
@@ -139,7 +139,7 @@ struct cxl_hdm *__wrap_devm_cxl_setup_hdm(struct cxl_port *port,
struct cxl_hdm *cxlhdm;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
- if (ops && ops->is_mock_port(port->uport))
+ if (ops && ops->is_mock_port(port->uport_dev))
cxlhdm = ops->devm_cxl_setup_hdm(port, info);
else
cxlhdm = devm_cxl_setup_hdm(port, info);
@@ -149,27 +149,12 @@ struct cxl_hdm *__wrap_devm_cxl_setup_hdm(struct cxl_port *port,
}
EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_setup_hdm, CXL);
-int __wrap_devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm)
-{
- int index, rc;
- struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
-
- if (ops && ops->is_mock_port(port->uport))
- rc = 0;
- else
- rc = devm_cxl_enable_hdm(port, cxlhdm);
- put_cxl_mock_ops(index);
-
- return rc;
-}
-EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_enable_hdm, CXL);
-
int __wrap_devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{
int rc, index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
- if (ops && ops->is_mock_port(port->uport))
+ if (ops && ops->is_mock_port(port->uport_dev))
rc = ops->devm_cxl_add_passthrough_decoder(port);
else
rc = devm_cxl_add_passthrough_decoder(port);
@@ -186,7 +171,7 @@ int __wrap_devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
struct cxl_port *port = cxlhdm->port;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
- if (ops && ops->is_mock_port(port->uport))
+ if (ops && ops->is_mock_port(port->uport_dev))
rc = ops->devm_cxl_enumerate_decoders(cxlhdm, info);
else
rc = devm_cxl_enumerate_decoders(cxlhdm, info);
@@ -201,7 +186,7 @@ int __wrap_devm_cxl_port_enumerate_dports(struct cxl_port *port)
int rc, index;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
- if (ops && ops->is_mock_port(port->uport))
+ if (ops && ops->is_mock_port(port->uport_dev))
rc = ops->devm_cxl_port_enumerate_dports(port);
else
rc = devm_cxl_port_enumerate_dports(port);
@@ -259,24 +244,46 @@ int __wrap_cxl_dvsec_rr_decode(struct device *dev, int dvsec,
}
EXPORT_SYMBOL_NS_GPL(__wrap_cxl_dvsec_rr_decode, CXL);
-resource_size_t __wrap_cxl_rcrb_to_component(struct device *dev,
- resource_size_t rcrb,
- enum cxl_rcrb which)
+struct cxl_dport *__wrap_devm_cxl_add_rch_dport(struct cxl_port *port,
+ struct device *dport_dev,
+ int port_id,
+ resource_size_t rcrb)
+{
+ int index;
+ struct cxl_dport *dport;
+ struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+ if (ops && ops->is_mock_port(dport_dev)) {
+ dport = devm_cxl_add_dport(port, dport_dev, port_id,
+ CXL_RESOURCE_NONE);
+ if (!IS_ERR(dport)) {
+ dport->rcrb.base = rcrb;
+ dport->rch = true;
+ }
+ } else
+ dport = devm_cxl_add_rch_dport(port, dport_dev, port_id, rcrb);
+ put_cxl_mock_ops(index);
+
+ return dport;
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_add_rch_dport, CXL);
+
+resource_size_t __wrap_cxl_rcd_component_reg_phys(struct device *dev,
+ struct cxl_dport *dport)
{
int index;
resource_size_t component_reg_phys;
struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
if (ops && ops->is_mock_port(dev))
- component_reg_phys =
- ops->cxl_rcrb_to_component(dev, rcrb, which);
+ component_reg_phys = CXL_RESOURCE_NONE;
else
- component_reg_phys = cxl_rcrb_to_component(dev, rcrb, which);
+ component_reg_phys = cxl_rcd_component_reg_phys(dev, dport);
put_cxl_mock_ops(index);
return component_reg_phys;
}
-EXPORT_SYMBOL_NS_GPL(__wrap_cxl_rcrb_to_component, CXL);
+EXPORT_SYMBOL_NS_GPL(__wrap_cxl_rcd_component_reg_phys, CXL);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(ACPI);
diff --git a/tools/testing/cxl/test/mock.h b/tools/testing/cxl/test/mock.h
index bef8817b01f2..a94223750346 100644
--- a/tools/testing/cxl/test/mock.h
+++ b/tools/testing/cxl/test/mock.h
@@ -15,9 +15,6 @@ struct cxl_mock_ops {
acpi_string pathname,
struct acpi_object_list *arguments,
unsigned long long *data);
- resource_size_t (*cxl_rcrb_to_component)(struct device *dev,
- resource_size_t rcrb,
- enum cxl_rcrb which);
struct acpi_pci_root *(*acpi_pci_find_root)(acpi_handle handle);
bool (*is_mock_bus)(struct pci_bus *bus);
bool (*is_mock_port)(struct device *dev);
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index e4e2d1650dd5..005043bd9623 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -3240,11 +3240,6 @@ static int nfit_test_probe(struct platform_device *pdev)
return 0;
}
-static int nfit_test_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static void nfit_test_release(struct device *dev)
{
struct nfit_test *nfit_test = to_nfit_test(dev);
@@ -3259,7 +3254,6 @@ static const struct platform_device_id nfit_test_id[] = {
static struct platform_driver nfit_test_driver = {
.probe = nfit_test_probe,
- .remove = nfit_test_remove,
.driver = {
.name = KBUILD_MODNAME,
},
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index b5f7a996c4d0..b00583d1eace 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -207,7 +207,36 @@ typedef struct nfit_test_resource *(*nfit_test_lookup_fn)(resource_size_t);
typedef union acpi_object *(*nfit_test_evaluate_dsm_fn)(acpi_handle handle,
const guid_t *guid, u64 rev, u64 func,
union acpi_object *argv4);
+void __iomem *__wrap_devm_ioremap(struct device *dev,
+ resource_size_t offset, unsigned long size);
+void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
+ size_t size, unsigned long flags);
+void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
+pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags);
+void *__wrap_memremap(resource_size_t offset, size_t size,
+ unsigned long flags);
+void __wrap_devm_memunmap(struct device *dev, void *addr);
+void __iomem *__wrap_ioremap(resource_size_t offset, unsigned long size);
+void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size);
void __wrap_iounmap(volatile void __iomem *addr);
+void __wrap_memunmap(void *addr);
+struct resource *__wrap___request_region(struct resource *parent,
+ resource_size_t start, resource_size_t n, const char *name,
+ int flags);
+int __wrap_insert_resource(struct resource *parent, struct resource *res);
+int __wrap_remove_resource(struct resource *res);
+struct resource *__wrap___devm_request_region(struct device *dev,
+ struct resource *parent, resource_size_t start,
+ resource_size_t n, const char *name);
+void __wrap___release_region(struct resource *parent, resource_size_t start,
+ resource_size_t n);
+void __wrap___devm_release_region(struct device *dev, struct resource *parent,
+ resource_size_t start, resource_size_t n);
+acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
+ struct acpi_object_list *p, struct acpi_buffer *buf);
+union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
+ u64 rev, u64 func, union acpi_object *argv4);
+
void nfit_test_setup(nfit_test_lookup_fn lookup,
nfit_test_evaluate_dsm_fn evaluate);
void nfit_test_teardown(void);
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 6b456c5ecec1..666b56f22a41 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -87,6 +87,7 @@ TARGETS += timers
endif
TARGETS += tmpfs
TARGETS += tpm2
+TARGETS += tty
TARGETS += user
TARGETS += vDSO
TARGETS += mm
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
new file mode 100644
index 000000000000..b89de1771655
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
@@ -0,0 +1,58 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Generic dynamic event - add/remove probes with BTF arguments
+# requires: dynamic_events "<argname>":README
+
+KPROBES=
+FPROBES=
+
+if grep -qF "p[:[<group>/][<event>]] <place> [<args>]" README ; then
+ KPROBES=yes
+fi
+if grep -qF "f[:[<group>/][<event>]] <func-name>[%return] [<args>]" README ; then
+ FPROBES=yes
+fi
+
+if [ -z "$KPROBES" -a "$FPROBES" ] ; then
+ exit_unsupported
+fi
+
+echo 0 > events/enable
+echo > dynamic_events
+
+TP=kfree
+
+if [ "$FPROBES" ] ; then
+echo "f:fpevent $TP object" >> dynamic_events
+echo "t:tpevent $TP ptr" >> dynamic_events
+
+grep -q "fpevent.*object=object" dynamic_events
+grep -q "tpevent.*ptr=ptr" dynamic_events
+
+echo > dynamic_events
+
+echo "f:fpevent $TP "'$arg1' >> dynamic_events
+grep -q "fpevent.*object=object" dynamic_events
+echo > dynamic_events
+
+echo "f:fpevent $TP "'$arg*' >> dynamic_events
+echo "t:tpevent $TP "'$arg*' >> dynamic_events
+
+grep -q "fpevent.*object=object" dynamic_events
+grep -q "tpevent.*ptr=ptr" dynamic_events
+! grep -q "tpevent.*_data" dynamic_events
+fi
+
+echo > dynamic_events
+
+if [ "$KPROBES" ] ; then
+echo "p:kpevent $TP object" >> dynamic_events
+grep -q "kpevent.*object=object" dynamic_events
+
+echo > dynamic_events
+
+echo "p:kpevent $TP "'$arg*' >> dynamic_events
+grep -q "kpevent.*object=object" dynamic_events
+fi
+
+clear_trace
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
new file mode 100644
index 000000000000..dc25bcf4f9e2
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe.tc
@@ -0,0 +1,26 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Generic dynamic event - add/remove fprobe events
+# requires: dynamic_events "f[:[<group>/][<event>]] <func-name>[%return] [<args>]":README
+
+echo 0 > events/enable
+echo > dynamic_events
+
+PLACE=$FUNCTION_FORK
+
+echo "f:myevent1 $PLACE" >> dynamic_events
+echo "f:myevent2 $PLACE%return" >> dynamic_events
+
+grep -q myevent1 dynamic_events
+grep -q myevent2 dynamic_events
+test -d events/fprobes/myevent1
+test -d events/fprobes/myevent2
+
+echo "-:myevent2" >> dynamic_events
+
+grep -q myevent1 dynamic_events
+! grep -q myevent2 dynamic_events
+
+echo > dynamic_events
+
+clear_trace
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe.tc
new file mode 100644
index 000000000000..155792eaeee5
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_tprobe.tc
@@ -0,0 +1,27 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Generic dynamic event - add/remove tracepoint probe events
+# requires: dynamic_events "t[:[<group>/][<event>]] <tracepoint> [<args>]":README
+
+echo 0 > events/enable
+echo > dynamic_events
+
+TRACEPOINT1=kmem_cache_alloc
+TRACEPOINT2=kmem_cache_free
+
+echo "t:myevent1 $TRACEPOINT1" >> dynamic_events
+echo "t:myevent2 $TRACEPOINT2" >> dynamic_events
+
+grep -q myevent1 dynamic_events
+grep -q myevent2 dynamic_events
+test -d events/tracepoints/myevent1
+test -d events/tracepoints/myevent2
+
+echo "-:myevent2" >> dynamic_events
+
+grep -q myevent1 dynamic_events
+! grep -q myevent2 dynamic_events
+
+echo > dynamic_events
+
+clear_trace
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc
new file mode 100644
index 000000000000..812f5b3f6055
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc
@@ -0,0 +1,111 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Fprobe event parser error log check
+# requires: dynamic_events "f[:[<group>/][<event>]] <func-name>[%return] [<args>]":README
+
+check_error() { # command-with-error-pos-by-^
+ ftrace_errlog_check 'trace_fprobe' "$1" 'dynamic_events'
+}
+
+case `uname -m` in
+x86_64|i[3456]86)
+ REG=%ax ;;
+aarch64)
+ REG=%x0 ;;
+*)
+ REG=%r0 ;;
+esac
+
+check_error 'f^100 vfs_read' # MAXACT_NO_KPROBE
+check_error 'f^1a111 vfs_read' # BAD_MAXACT
+check_error 'f^100000 vfs_read' # MAXACT_TOO_BIG
+
+check_error 'f ^non_exist_func' # BAD_PROBE_ADDR (enoent)
+check_error 'f ^vfs_read+10' # BAD_PROBE_ADDR
+check_error 'f:^/bar vfs_read' # NO_GROUP_NAME
+check_error 'f:^12345678901234567890123456789012345678901234567890123456789012345/bar vfs_read' # GROUP_TOO_LONG
+
+check_error 'f:^foo.1/bar vfs_read' # BAD_GROUP_NAME
+check_error 'f:^ vfs_read' # NO_EVENT_NAME
+check_error 'f:foo/^12345678901234567890123456789012345678901234567890123456789012345 vfs_read' # EVENT_TOO_LONG
+check_error 'f:foo/^bar.1 vfs_read' # BAD_EVENT_NAME
+
+check_error 'f vfs_read ^$retval' # RETVAL_ON_PROBE
+check_error 'f vfs_read ^$stack10000' # BAD_STACK_NUM
+
+check_error 'f vfs_read ^$arg10000' # BAD_ARG_NUM
+
+check_error 'f vfs_read ^$none_var' # BAD_VAR
+check_error 'f vfs_read ^'$REG # BAD_VAR
+
+check_error 'f vfs_read ^@12345678abcde' # BAD_MEM_ADDR
+check_error 'f vfs_read ^@+10' # FILE_ON_KPROBE
+
+grep -q "imm-value" README && \
+check_error 'f vfs_read arg1=\^x' # BAD_IMM
+grep -q "imm-string" README && \
+check_error 'f vfs_read arg1=\"abcd^' # IMMSTR_NO_CLOSE
+
+check_error 'f vfs_read ^+0@0)' # DEREF_NEED_BRACE
+check_error 'f vfs_read ^+0ab1(@0)' # BAD_DEREF_OFFS
+check_error 'f vfs_read +0(+0(@0^)' # DEREF_OPEN_BRACE
+
+if grep -A1 "fetcharg:" README | grep -q '\$comm' ; then
+check_error 'f vfs_read +0(^$comm)' # COMM_CANT_DEREF
+fi
+
+check_error 'f vfs_read ^&1' # BAD_FETCH_ARG
+
+
+# We've introduced this limitation with array support
+if grep -q ' <type>\\\[<array-size>\\\]' README; then
+check_error 'f vfs_read +0(^+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(@0))))))))))))))' # TOO_MANY_OPS?
+check_error 'f vfs_read +0(@11):u8[10^' # ARRAY_NO_CLOSE
+check_error 'f vfs_read +0(@11):u8[10]^a' # BAD_ARRAY_SUFFIX
+check_error 'f vfs_read +0(@11):u8[^10a]' # BAD_ARRAY_NUM
+check_error 'f vfs_read +0(@11):u8[^256]' # ARRAY_TOO_BIG
+fi
+
+check_error 'f vfs_read @11:^unknown_type' # BAD_TYPE
+check_error 'f vfs_read $stack0:^string' # BAD_STRING
+check_error 'f vfs_read @11:^b10@a/16' # BAD_BITFIELD
+
+check_error 'f vfs_read ^arg123456789012345678901234567890=@11' # ARG_NAME_TOO_LOG
+check_error 'f vfs_read ^=@11' # NO_ARG_NAME
+check_error 'f vfs_read ^var.1=@11' # BAD_ARG_NAME
+check_error 'f vfs_read var1=@11 ^var1=@12' # USED_ARG_NAME
+check_error 'f vfs_read ^+1234567(+1234567(+1234567(+1234567(+1234567(+1234567(@1234))))))' # ARG_TOO_LONG
+check_error 'f vfs_read arg1=^' # NO_ARG_BODY
+
+
+# multiprobe errors
+if grep -q "Create/append/" README && grep -q "imm-value" README; then
+echo "f:fprobes/testevent $FUNCTION_FORK" > dynamic_events
+check_error '^f:fprobes/testevent do_exit%return' # DIFF_PROBE_TYPE
+
+# Explicitly use printf "%s" to not interpret \1
+printf "%s" "f:fprobes/testevent $FUNCTION_FORK abcd=\\1" > dynamic_events
+check_error "f:fprobes/testevent $FUNCTION_FORK ^bcd=\\1" # DIFF_ARG_TYPE
+check_error "f:fprobes/testevent $FUNCTION_FORK ^abcd=\\1:u8" # DIFF_ARG_TYPE
+check_error "f:fprobes/testevent $FUNCTION_FORK ^abcd=\\\"foo\"" # DIFF_ARG_TYPE
+check_error "^f:fprobes/testevent $FUNCTION_FORK abcd=\\1" # SAME_PROBE
+fi
+
+# %return suffix errors
+check_error 'f vfs_read^%hoge' # BAD_ADDR_SUFFIX
+
+# BTF arguments errors
+if grep -q "<argname>" README; then
+check_error 'f vfs_read args=^$arg*' # BAD_VAR_ARGS
+check_error 'f vfs_read +0(^$arg*)' # BAD_VAR_ARGS
+check_error 'f vfs_read $arg* ^$arg*' # DOUBLE_ARGS
+check_error 'f vfs_read%return ^$arg*' # NOFENTRY_ARGS
+check_error 'f vfs_read ^hoge' # NO_BTFARG
+check_error 'f kfree ^$arg10' # NO_BTFARG (exceed the number of parameters)
+check_error 'f kfree%return ^$retval' # NO_RETVAL
+else
+check_error 'f vfs_read ^$arg*' # NOSUP_BTFARG
+check_error 't kfree ^$arg*' # NOSUP_BTFARG
+fi
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc
new file mode 100644
index 000000000000..da117b8f1d12
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/tprobe_syntax_errors.tc
@@ -0,0 +1,82 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Tracepoint probe event parser error log check
+# requires: dynamic_events "t[:[<group>/][<event>]] <tracepoint> [<args>]":README
+
+check_error() { # command-with-error-pos-by-^
+ ftrace_errlog_check 'trace_fprobe' "$1" 'dynamic_events'
+}
+
+check_error 't^100 kfree' # BAD_MAXACT_TYPE
+
+check_error 't ^non_exist_tracepoint' # NO_TRACEPOINT
+check_error 't:^/bar kfree' # NO_GROUP_NAME
+check_error 't:^12345678901234567890123456789012345678901234567890123456789012345/bar kfree' # GROUP_TOO_LONG
+
+check_error 't:^foo.1/bar kfree' # BAD_GROUP_NAME
+check_error 't:^ kfree' # NO_EVENT_NAME
+check_error 't:foo/^12345678901234567890123456789012345678901234567890123456789012345 kfree' # EVENT_TOO_LONG
+check_error 't:foo/^bar.1 kfree' # BAD_EVENT_NAME
+
+check_error 't kfree ^$retval' # RETVAL_ON_PROBE
+check_error 't kfree ^$stack10000' # BAD_STACK_NUM
+
+check_error 't kfree ^$arg10000' # BAD_ARG_NUM
+
+check_error 't kfree ^$none_var' # BAD_VAR
+check_error 't kfree ^%rax' # BAD_VAR
+
+check_error 't kfree ^@12345678abcde' # BAD_MEM_ADDR
+check_error 't kfree ^@+10' # FILE_ON_KPROBE
+
+grep -q "imm-value" README && \
+check_error 't kfree arg1=\^x' # BAD_IMM
+grep -q "imm-string" README && \
+check_error 't kfree arg1=\"abcd^' # IMMSTR_NO_CLOSE
+
+check_error 't kfree ^+0@0)' # DEREF_NEED_BRACE
+check_error 't kfree ^+0ab1(@0)' # BAD_DEREF_OFFS
+check_error 't kfree +0(+0(@0^)' # DEREF_OPEN_BRACE
+
+if grep -A1 "fetcharg:" README | grep -q '\$comm' ; then
+check_error 't kfree +0(^$comm)' # COMM_CANT_DEREF
+fi
+
+check_error 't kfree ^&1' # BAD_FETCH_ARG
+
+
+# We've introduced this limitation with array support
+if grep -q ' <type>\\\[<array-size>\\\]' README; then
+check_error 't kfree +0(^+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(+0(@0))))))))))))))' # TOO_MANY_OPS?
+check_error 't kfree +0(@11):u8[10^' # ARRAY_NO_CLOSE
+check_error 't kfree +0(@11):u8[10]^a' # BAD_ARRAY_SUFFIX
+check_error 't kfree +0(@11):u8[^10a]' # BAD_ARRAY_NUM
+check_error 't kfree +0(@11):u8[^256]' # ARRAY_TOO_BIG
+fi
+
+check_error 't kfree @11:^unknown_type' # BAD_TYPE
+check_error 't kfree $stack0:^string' # BAD_STRING
+check_error 't kfree @11:^b10@a/16' # BAD_BITFIELD
+
+check_error 't kfree ^arg123456789012345678901234567890=@11' # ARG_NAME_TOO_LOG
+check_error 't kfree ^=@11' # NO_ARG_NAME
+check_error 't kfree ^var.1=@11' # BAD_ARG_NAME
+check_error 't kfree var1=@11 ^var1=@12' # USED_ARG_NAME
+check_error 't kfree ^+1234567(+1234567(+1234567(+1234567(+1234567(+1234567(@1234))))))' # ARG_TOO_LONG
+check_error 't kfree arg1=^' # NO_ARG_BODY
+
+
+# multiprobe errors
+if grep -q "Create/append/" README && grep -q "imm-value" README; then
+echo "t:tracepoint/testevent kfree" > dynamic_events
+check_error '^f:tracepoint/testevent kfree' # DIFF_PROBE_TYPE
+
+# Explicitly use printf "%s" to not interpret \1
+printf "%s" "t:tracepoints/testevent kfree abcd=\\1" > dynamic_events
+check_error "t:tracepoints/testevent kfree ^bcd=\\1" # DIFF_ARG_TYPE
+check_error "t:tracepoints/testevent kfree ^abcd=\\1:u8" # DIFF_ARG_TYPE
+check_error "t:tracepoints/testevent kfree ^abcd=\\\"foo\"" # DIFF_ARG_TYPE
+check_error "^t:tracepoints/testevent kfree abcd=\\1" # SAME_PROBE
+fi
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-retval.tc b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-retval.tc
new file mode 100644
index 000000000000..e34c0bdef3ed
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/fgraph-retval.tc
@@ -0,0 +1,44 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: ftrace - function graph print function return value
+# requires: options/funcgraph-retval options/funcgraph-retval-hex function_graph:tracer
+
+# Make sure that funcgraph-retval works
+
+fail() { # msg
+ echo $1
+ exit_fail
+}
+
+disable_tracing
+clear_trace
+
+# get self PID, can not use $$, because it is PPID
+read PID _ < /proc/self/stat
+
+[ -f set_ftrace_filter ] && echo proc_reg_write > set_ftrace_filter
+[ -f set_ftrace_pid ] && echo ${PID} > set_ftrace_pid
+echo function_graph > current_tracer
+echo 1 > options/funcgraph-retval
+
+set +e
+enable_tracing
+echo > /proc/interrupts
+disable_tracing
+set -e
+
+: "Test printing the error code in signed decimal format"
+echo 0 > options/funcgraph-retval-hex
+count=`cat trace | grep 'proc_reg_write' | grep '= -5' | wc -l`
+if [ $count -eq 0 ]; then
+ fail "Return value can not be printed in signed decimal format"
+fi
+
+: "Test printing the error code in hexadecimal format"
+echo 1 > options/funcgraph-retval-hex
+count=`cat trace | grep 'proc_reg_write' | grep 'fffffffb' | wc -l`
+if [ $count -eq 0 ]; then
+ fail "Return value can not be printed in hexadecimal format"
+fi
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_insn_boundary.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_insn_boundary.tc
new file mode 100644
index 000000000000..4f7cc318f331
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_insn_boundary.tc
@@ -0,0 +1,19 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2023 Akanksha J N, IBM corporation
+# description: Register multiple kprobe events in a function
+# requires: kprobe_events
+
+for i in `seq 0 255`; do
+ echo p $FUNCTION_FORK+${i} >> kprobe_events || continue
+done
+
+cat kprobe_events >> $testlog
+
+echo 1 > events/kprobes/enable
+( echo "forked" )
+echo 0 > events/kprobes/enable
+echo > kprobe_events
+echo "Waiting for unoptimizing & freeing"
+sleep 5
+echo "Done"
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
index 9e85d3019ff0..65fbb26fd58c 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
@@ -8,7 +8,7 @@ check_error() { # command-with-error-pos-by-^
}
if grep -q 'r\[maxactive\]' README; then
-check_error 'p^100 vfs_read' # MAXACT_NO_KPROBE
+check_error 'p^100 vfs_read' # BAD_MAXACT_TYPE
check_error 'r^1a111 vfs_read' # BAD_MAXACT
check_error 'r^100000 vfs_read' # MAXACT_TOO_BIG
fi
@@ -103,4 +103,18 @@ check_error 'p vfs_read^%hoge' # BAD_ADDR_SUFFIX
check_error 'p ^vfs_read+10%return' # BAD_RETPROBE
fi
+# BTF arguments errors
+if grep -q "<argname>" README; then
+check_error 'p vfs_read args=^$arg*' # BAD_VAR_ARGS
+check_error 'p vfs_read +0(^$arg*)' # BAD_VAR_ARGS
+check_error 'p vfs_read $arg* ^$arg*' # DOUBLE_ARGS
+check_error 'r vfs_read ^$arg*' # NOFENTRY_ARGS
+check_error 'p vfs_read+8 ^$arg*' # NOFENTRY_ARGS
+check_error 'p vfs_read ^hoge' # NO_BTFARG
+check_error 'p kfree ^$arg10' # NO_BTFARG (exceed the number of parameters)
+check_error 'r kfree ^$retval' # NO_RETVAL
+else
+check_error 'p vfs_read ^$arg*' # NOSUP_BTFARG
+fi
+
exit 0
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 4761b768b773..c692cc86e7da 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -61,6 +61,7 @@ TEST_PROGS_x86_64 += x86_64/nx_huge_pages_test.sh
# Compiled test targets
TEST_GEN_PROGS_x86_64 = x86_64/cpuid_test
TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
+TEST_GEN_PROGS_x86_64 += x86_64/dirty_log_page_splitting_test
TEST_GEN_PROGS_x86_64 += x86_64/get_msr_index_features
TEST_GEN_PROGS_x86_64 += x86_64/exit_on_emulation_failure_test
TEST_GEN_PROGS_x86_64 += x86_64/fix_hypercall_test
@@ -164,6 +165,7 @@ TEST_GEN_PROGS_s390x = s390x/memop
TEST_GEN_PROGS_s390x += s390x/resets
TEST_GEN_PROGS_s390x += s390x/sync_regs_test
TEST_GEN_PROGS_s390x += s390x/tprot
+TEST_GEN_PROGS_s390x += s390x/cmma_test
TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
@@ -184,6 +186,8 @@ TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(ARCH_DIR))
TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(ARCH_DIR))
LIBKVM += $(LIBKVM_$(ARCH_DIR))
+OVERRIDE_TARGETS = 1
+
# lib.mak defines $(OUTPUT), prepends $(OUTPUT)/ to $(TEST_GEN_PROGS), and most
# importantly defines, i.e. overwrites, $(CC) (unless `make -e` or `make CC=`,
# which causes the environment variable to override the makefile).
@@ -198,7 +202,7 @@ else
LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
endif
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
- -Wno-gnu-variable-sized-type-not-at-end \
+ -Wno-gnu-variable-sized-type-not-at-end -MD\
-fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
-I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
@@ -225,7 +229,18 @@ LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
-EXTRA_CLEAN += $(LIBKVM_OBJS) cscope.*
+TEST_GEN_OBJ = $(patsubst %, %.o, $(TEST_GEN_PROGS))
+TEST_GEN_OBJ += $(patsubst %, %.o, $(TEST_GEN_PROGS_EXTENDED))
+TEST_DEP_FILES = $(patsubst %.o, %.d, $(TEST_GEN_OBJ))
+TEST_DEP_FILES += $(patsubst %.o, %.d, $(LIBKVM_OBJS))
+-include $(TEST_DEP_FILES)
+
+$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): %: %.o
+ $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $< $(LIBKVM_OBJS) $(LDLIBS) -o $@
+$(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c
+ $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
+
+EXTRA_CLEAN += $(LIBKVM_OBJS) $(TEST_DEP_FILES) $(TEST_GEN_OBJ) cscope.*
x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
index 2439c4043fed..09c116a82a84 100644
--- a/tools/testing/selftests/kvm/demand_paging_test.c
+++ b/tools/testing/selftests/kvm/demand_paging_test.c
@@ -128,6 +128,7 @@ static void prefault_mem(void *alias, uint64_t len)
static void run_test(enum vm_guest_mode mode, void *arg)
{
+ struct memstress_vcpu_args *vcpu_args;
struct test_params *p = arg;
struct uffd_desc **uffd_descs = NULL;
struct timespec start;
@@ -145,24 +146,24 @@ static void run_test(enum vm_guest_mode mode, void *arg)
"Failed to allocate buffer for guest data pattern");
memset(guest_data_prototype, 0xAB, demand_paging_size);
+ if (p->uffd_mode == UFFDIO_REGISTER_MODE_MINOR) {
+ for (i = 0; i < nr_vcpus; i++) {
+ vcpu_args = &memstress_args.vcpu_args[i];
+ prefault_mem(addr_gpa2alias(vm, vcpu_args->gpa),
+ vcpu_args->pages * memstress_args.guest_page_size);
+ }
+ }
+
if (p->uffd_mode) {
uffd_descs = malloc(nr_vcpus * sizeof(struct uffd_desc *));
TEST_ASSERT(uffd_descs, "Memory allocation failed");
-
for (i = 0; i < nr_vcpus; i++) {
- struct memstress_vcpu_args *vcpu_args;
void *vcpu_hva;
- void *vcpu_alias;
vcpu_args = &memstress_args.vcpu_args[i];
/* Cache the host addresses of the region */
vcpu_hva = addr_gpa2hva(vm, vcpu_args->gpa);
- vcpu_alias = addr_gpa2alias(vm, vcpu_args->gpa);
-
- prefault_mem(vcpu_alias,
- vcpu_args->pages * memstress_args.guest_page_size);
-
/*
* Set up user fault fd to handle demand paging
* requests.
@@ -207,10 +208,11 @@ static void help(char *name)
{
puts("");
printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n"
- " [-b memory] [-s type] [-v vcpus] [-o]\n", name);
+ " [-b memory] [-s type] [-v vcpus] [-c cpu_list] [-o]\n", name);
guest_modes_help();
printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
" UFFD registration mode: 'MISSING' or 'MINOR'.\n");
+ kvm_print_vcpu_pinning_help();
printf(" -d: add a delay in usec to the User Fault\n"
" FD handler to simulate demand paging\n"
" overheads. Ignored without -u.\n");
@@ -228,6 +230,7 @@ static void help(char *name)
int main(int argc, char *argv[])
{
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+ const char *cpulist = NULL;
struct test_params p = {
.src_type = DEFAULT_VM_MEM_SRC,
.partition_vcpu_memory_access = true,
@@ -236,7 +239,7 @@ int main(int argc, char *argv[])
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:o")) != -1) {
+ while ((opt = getopt(argc, argv, "hm:u:d:b:s:v:c:o")) != -1) {
switch (opt) {
case 'm':
guest_modes_cmdline(optarg);
@@ -263,6 +266,9 @@ int main(int argc, char *argv[])
TEST_ASSERT(nr_vcpus <= max_vcpus,
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
break;
+ case 'c':
+ cpulist = optarg;
+ break;
case 'o':
p.partition_vcpu_memory_access = false;
break;
@@ -278,6 +284,12 @@ int main(int argc, char *argv[])
TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");
}
+ if (cpulist) {
+ kvm_parse_vcpu_pinning(cpulist, memstress_args.vcpu_to_pcpu,
+ nr_vcpus);
+ memstress_args.pin_vcpus = true;
+ }
+
for_each_guest_mode(run_test, &p);
return 0;
diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index e9d6d1aecf89..d374dbcf9a53 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -136,77 +136,6 @@ struct test_params {
bool random_access;
};
-static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
-{
- int i;
-
- for (i = 0; i < slots; i++) {
- int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
- int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
-
- vm_mem_region_set_flags(vm, slot, flags);
- }
-}
-
-static inline void enable_dirty_logging(struct kvm_vm *vm, int slots)
-{
- toggle_dirty_logging(vm, slots, true);
-}
-
-static inline void disable_dirty_logging(struct kvm_vm *vm, int slots)
-{
- toggle_dirty_logging(vm, slots, false);
-}
-
-static void get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
-{
- int i;
-
- for (i = 0; i < slots; i++) {
- int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
-
- kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
- }
-}
-
-static void clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
- int slots, uint64_t pages_per_slot)
-{
- int i;
-
- for (i = 0; i < slots; i++) {
- int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
-
- kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
- }
-}
-
-static unsigned long **alloc_bitmaps(int slots, uint64_t pages_per_slot)
-{
- unsigned long **bitmaps;
- int i;
-
- bitmaps = malloc(slots * sizeof(bitmaps[0]));
- TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
-
- for (i = 0; i < slots; i++) {
- bitmaps[i] = bitmap_zalloc(pages_per_slot);
- TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
- }
-
- return bitmaps;
-}
-
-static void free_bitmaps(unsigned long *bitmaps[], int slots)
-{
- int i;
-
- for (i = 0; i < slots; i++)
- free(bitmaps[i]);
-
- free(bitmaps);
-}
-
static void run_test(enum vm_guest_mode mode, void *arg)
{
struct test_params *p = arg;
@@ -236,7 +165,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
host_num_pages = vm_num_host_pages(mode, guest_num_pages);
pages_per_slot = host_num_pages / p->slots;
- bitmaps = alloc_bitmaps(p->slots, pages_per_slot);
+ bitmaps = memstress_alloc_bitmaps(p->slots, pages_per_slot);
if (dirty_log_manual_caps)
vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
@@ -277,7 +206,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Enable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
- enable_dirty_logging(vm, p->slots);
+ memstress_enable_dirty_logging(vm, p->slots);
ts_diff = timespec_elapsed(start);
pr_info("Enabling dirty logging time: %ld.%.9lds\n\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
@@ -306,7 +235,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
iteration, ts_diff.tv_sec, ts_diff.tv_nsec);
clock_gettime(CLOCK_MONOTONIC, &start);
- get_dirty_log(vm, bitmaps, p->slots);
+ memstress_get_dirty_log(vm, bitmaps, p->slots);
ts_diff = timespec_elapsed(start);
get_dirty_log_total = timespec_add(get_dirty_log_total,
ts_diff);
@@ -315,7 +244,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
if (dirty_log_manual_caps) {
clock_gettime(CLOCK_MONOTONIC, &start);
- clear_dirty_log(vm, bitmaps, p->slots, pages_per_slot);
+ memstress_clear_dirty_log(vm, bitmaps, p->slots,
+ pages_per_slot);
ts_diff = timespec_elapsed(start);
clear_dirty_log_total = timespec_add(clear_dirty_log_total,
ts_diff);
@@ -334,7 +264,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Disable dirty logging */
clock_gettime(CLOCK_MONOTONIC, &start);
- disable_dirty_logging(vm, p->slots);
+ memstress_disable_dirty_logging(vm, p->slots);
ts_diff = timespec_elapsed(start);
pr_info("Disabling dirty logging time: %ld.%.9lds\n",
ts_diff.tv_sec, ts_diff.tv_nsec);
@@ -359,7 +289,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
clear_dirty_log_total.tv_nsec, avg.tv_sec, avg.tv_nsec);
}
- free_bitmaps(bitmaps, p->slots);
+ memstress_free_bitmaps(bitmaps, p->slots);
arch_cleanup_vm(vm);
memstress_destroy_vm(vm);
}
@@ -402,17 +332,7 @@ static void help(char *name)
" so -w X means each page has an X%% chance of writing\n"
" and a (100-X)%% chance of reading.\n"
" (default: 100 i.e. all pages are written to.)\n");
- printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n"
- " values (target pCPU), one for each vCPU, plus an optional\n"
- " entry for the main application task (specified via entry\n"
- " <nr_vcpus + 1>). If used, entries must be provided for all\n"
- " vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
- " E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
- " vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
- " ./dirty_log_perf_test -v 3 -c 22,23,24,50\n\n"
- " To leave the application task unpinned, drop the final entry:\n\n"
- " ./dirty_log_perf_test -v 3 -c 22,23,24\n\n"
- " (default: no pinning)\n");
+ kvm_print_vcpu_pinning_help();
puts("");
exit(0);
}
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index a089c356f354..07732a157ccd 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -733,6 +733,7 @@ static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
+void kvm_print_vcpu_pinning_help(void);
void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
int nr_vcpus);
diff --git a/tools/testing/selftests/kvm/include/memstress.h b/tools/testing/selftests/kvm/include/memstress.h
index 72e3e358ef7b..ce4e603050ea 100644
--- a/tools/testing/selftests/kvm/include/memstress.h
+++ b/tools/testing/selftests/kvm/include/memstress.h
@@ -72,4 +72,12 @@ void memstress_guest_code(uint32_t vcpu_id);
uint64_t memstress_nested_pages(int nr_vcpus);
void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);
+void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots);
+void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots);
+void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots);
+void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
+ int slots, uint64_t pages_per_slot);
+unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot);
+void memstress_free_bitmaps(unsigned long *bitmaps[], int slots);
+
#endif /* SELFTEST_KVM_MEMSTRESS_H */
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 298c4372fb1a..9741a7ff6380 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -494,6 +494,23 @@ static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask)
return pcpu;
}
+void kvm_print_vcpu_pinning_help(void)
+{
+ const char *name = program_invocation_name;
+
+ printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n"
+ " values (target pCPU), one for each vCPU, plus an optional\n"
+ " entry for the main application task (specified via entry\n"
+ " <nr_vcpus + 1>). If used, entries must be provided for all\n"
+ " vCPUs, i.e. pinning vCPUs is all or nothing.\n\n"
+ " E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n"
+ " vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n"
+ " %s -v 3 -c 22,23,24,50\n\n"
+ " To leave the application task unpinned, drop the final entry:\n\n"
+ " %s -v 3 -c 22,23,24\n\n"
+ " (default: no pinning)\n", name, name);
+}
+
void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
int nr_vcpus)
{
diff --git a/tools/testing/selftests/kvm/lib/memstress.c b/tools/testing/selftests/kvm/lib/memstress.c
index 5f1d3173c238..df457452d146 100644
--- a/tools/testing/selftests/kvm/lib/memstress.c
+++ b/tools/testing/selftests/kvm/lib/memstress.c
@@ -5,6 +5,7 @@
#define _GNU_SOURCE
#include <inttypes.h>
+#include <linux/bitmap.h>
#include "kvm_util.h"
#include "memstress.h"
@@ -64,6 +65,9 @@ void memstress_guest_code(uint32_t vcpu_idx)
GUEST_ASSERT(vcpu_args->vcpu_idx == vcpu_idx);
while (true) {
+ for (i = 0; i < sizeof(memstress_args); i += args->guest_page_size)
+ (void) *((volatile char *)args + i);
+
for (i = 0; i < pages; i++) {
if (args->random_access)
page = guest_random_u32(&rand_state) % pages;
@@ -320,3 +324,74 @@ void memstress_join_vcpu_threads(int nr_vcpus)
for (i = 0; i < nr_vcpus; i++)
pthread_join(vcpu_threads[i].thread, NULL);
}
+
+static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable)
+{
+ int i;
+
+ for (i = 0; i < slots; i++) {
+ int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
+ int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0;
+
+ vm_mem_region_set_flags(vm, slot, flags);
+ }
+}
+
+void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots)
+{
+ toggle_dirty_logging(vm, slots, true);
+}
+
+void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots)
+{
+ toggle_dirty_logging(vm, slots, false);
+}
+
+void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots)
+{
+ int i;
+
+ for (i = 0; i < slots; i++) {
+ int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
+
+ kvm_vm_get_dirty_log(vm, slot, bitmaps[i]);
+ }
+}
+
+void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[],
+ int slots, uint64_t pages_per_slot)
+{
+ int i;
+
+ for (i = 0; i < slots; i++) {
+ int slot = MEMSTRESS_MEM_SLOT_INDEX + i;
+
+ kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot);
+ }
+}
+
+unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot)
+{
+ unsigned long **bitmaps;
+ int i;
+
+ bitmaps = malloc(slots * sizeof(bitmaps[0]));
+ TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array.");
+
+ for (i = 0; i < slots; i++) {
+ bitmaps[i] = bitmap_zalloc(pages_per_slot);
+ TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap.");
+ }
+
+ return bitmaps;
+}
+
+void memstress_free_bitmaps(unsigned long *bitmaps[], int slots)
+{
+ int i;
+
+ for (i = 0; i < slots; i++)
+ free(bitmaps[i]);
+
+ free(bitmaps);
+}
diff --git a/tools/testing/selftests/kvm/lib/userfaultfd_util.c b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
index 92cef20902f1..271f63891581 100644
--- a/tools/testing/selftests/kvm/lib/userfaultfd_util.c
+++ b/tools/testing/selftests/kvm/lib/userfaultfd_util.c
@@ -70,7 +70,7 @@ static void *uffd_handler_thread_fn(void *arg)
r = read(pollfd[1].fd, &tmp_chr, 1);
TEST_ASSERT(r == 1,
"Error reading pipefd in UFFD thread\n");
- return NULL;
+ break;
}
if (!(pollfd[0].revents & POLLIN))
@@ -103,7 +103,7 @@ static void *uffd_handler_thread_fn(void *arg)
ts_diff = timespec_elapsed(start);
PER_VCPU_DEBUG("userfaulted %ld pages over %ld.%.9lds. (%f/sec)\n",
pages, ts_diff.tv_sec, ts_diff.tv_nsec,
- pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / 100000000.0));
+ pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / NSEC_PER_SEC));
return NULL;
}
diff --git a/tools/testing/selftests/kvm/s390x/cmma_test.c b/tools/testing/selftests/kvm/s390x/cmma_test.c
new file mode 100644
index 000000000000..1d73e78e8fa7
--- /dev/null
+++ b/tools/testing/selftests/kvm/s390x/cmma_test.c
@@ -0,0 +1,700 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test for s390x CMMA migration
+ *
+ * Copyright IBM Corp. 2023
+ *
+ * Authors:
+ * Nico Boehr <nrb@linux.ibm.com>
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "kselftest.h"
+
+#define MAIN_PAGE_COUNT 512
+
+#define TEST_DATA_PAGE_COUNT 512
+#define TEST_DATA_MEMSLOT 1
+#define TEST_DATA_START_GFN 4096
+
+#define TEST_DATA_TWO_PAGE_COUNT 256
+#define TEST_DATA_TWO_MEMSLOT 2
+#define TEST_DATA_TWO_START_GFN 8192
+
+static char cmma_value_buf[MAIN_PAGE_COUNT + TEST_DATA_PAGE_COUNT];
+
+/**
+ * Dirty CMMA attributes of exactly one page in the TEST_DATA memslot,
+ * so use_cmma goes on and the CMMA related ioctls do something.
+ */
+static void guest_do_one_essa(void)
+{
+ asm volatile(
+ /* load TEST_DATA_START_GFN into r1 */
+ " llilf 1,%[start_gfn]\n"
+ /* calculate the address from the gfn */
+ " sllg 1,1,12(0)\n"
+ /* set the first page in TEST_DATA memslot to STABLE */
+ " .insn rrf,0xb9ab0000,2,1,1,0\n"
+ /* hypercall */
+ " diag 0,0,0x501\n"
+ "0: j 0b"
+ :
+ : [start_gfn] "L"(TEST_DATA_START_GFN)
+ : "r1", "r2", "memory", "cc"
+ );
+}
+
+/**
+ * Touch CMMA attributes of all pages in TEST_DATA memslot. Set them to stable
+ * state.
+ */
+static void guest_dirty_test_data(void)
+{
+ asm volatile(
+ /* r1 = TEST_DATA_START_GFN */
+ " xgr 1,1\n"
+ " llilf 1,%[start_gfn]\n"
+ /* r5 = TEST_DATA_PAGE_COUNT */
+ " lghi 5,%[page_count]\n"
+ /* r5 += r1 */
+ "2: agfr 5,1\n"
+ /* r2 = r1 << 12 */
+ "1: sllg 2,1,12(0)\n"
+ /* essa(r4, r2, SET_STABLE) */
+ " .insn rrf,0xb9ab0000,4,2,1,0\n"
+ /* i++ */
+ " agfi 1,1\n"
+ /* if r1 < r5 goto 1 */
+ " cgrjl 1,5,1b\n"
+ /* hypercall */
+ " diag 0,0,0x501\n"
+ "0: j 0b"
+ :
+ : [start_gfn] "L"(TEST_DATA_START_GFN),
+ [page_count] "L"(TEST_DATA_PAGE_COUNT)
+ :
+ /* the counter in our loop over the pages */
+ "r1",
+ /* the calculated page physical address */
+ "r2",
+ /* ESSA output register */
+ "r4",
+ /* last page */
+ "r5",
+ "cc", "memory"
+ );
+}
+
+static struct kvm_vm *create_vm(void)
+{
+ return ____vm_create(VM_MODE_DEFAULT);
+}
+
+static void create_main_memslot(struct kvm_vm *vm)
+{
+ int i;
+
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, MAIN_PAGE_COUNT, 0);
+ /* set the array of memslots to zero like __vm_create does */
+ for (i = 0; i < NR_MEM_REGIONS; i++)
+ vm->memslots[i] = 0;
+}
+
+static void create_test_memslot(struct kvm_vm *vm)
+{
+ vm_userspace_mem_region_add(vm,
+ VM_MEM_SRC_ANONYMOUS,
+ TEST_DATA_START_GFN << vm->page_shift,
+ TEST_DATA_MEMSLOT,
+ TEST_DATA_PAGE_COUNT,
+ 0
+ );
+ vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT;
+}
+
+static void create_memslots(struct kvm_vm *vm)
+{
+ /*
+ * Our VM has the following memory layout:
+ * +------+---------------------------+
+ * | GFN | Memslot |
+ * +------+---------------------------+
+ * | 0 | |
+ * | ... | MAIN (Code, Stack, ...) |
+ * | 511 | |
+ * +------+---------------------------+
+ * | 4096 | |
+ * | ... | TEST_DATA |
+ * | 4607 | |
+ * +------+---------------------------+
+ */
+ create_main_memslot(vm);
+ create_test_memslot(vm);
+}
+
+static void finish_vm_setup(struct kvm_vm *vm)
+{
+ struct userspace_mem_region *slot0;
+
+ kvm_vm_elf_load(vm, program_invocation_name);
+
+ slot0 = memslot2region(vm, 0);
+ ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
+
+ kvm_arch_vm_post_create(vm);
+}
+
+static struct kvm_vm *create_vm_two_memslots(void)
+{
+ struct kvm_vm *vm;
+
+ vm = create_vm();
+
+ create_memslots(vm);
+
+ finish_vm_setup(vm);
+
+ return vm;
+}
+
+static void enable_cmma(struct kvm_vm *vm)
+{
+ int r;
+
+ r = __kvm_device_attr_set(vm->fd, KVM_S390_VM_MEM_CTRL, KVM_S390_VM_MEM_ENABLE_CMMA, NULL);
+ TEST_ASSERT(!r, "enabling cmma failed r=%d errno=%d", r, errno);
+}
+
+static void enable_dirty_tracking(struct kvm_vm *vm)
+{
+ vm_mem_region_set_flags(vm, 0, KVM_MEM_LOG_DIRTY_PAGES);
+ vm_mem_region_set_flags(vm, TEST_DATA_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
+}
+
+static int __enable_migration_mode(struct kvm_vm *vm)
+{
+ return __kvm_device_attr_set(vm->fd,
+ KVM_S390_VM_MIGRATION,
+ KVM_S390_VM_MIGRATION_START,
+ NULL
+ );
+}
+
+static void enable_migration_mode(struct kvm_vm *vm)
+{
+ int r = __enable_migration_mode(vm);
+
+ TEST_ASSERT(!r, "enabling migration mode failed r=%d errno=%d", r, errno);
+}
+
+static bool is_migration_mode_on(struct kvm_vm *vm)
+{
+ u64 out;
+ int r;
+
+ r = __kvm_device_attr_get(vm->fd,
+ KVM_S390_VM_MIGRATION,
+ KVM_S390_VM_MIGRATION_STATUS,
+ &out
+ );
+ TEST_ASSERT(!r, "getting migration mode status failed r=%d errno=%d", r, errno);
+ return out;
+}
+
+static int vm_get_cmma_bits(struct kvm_vm *vm, u64 flags, int *errno_out)
+{
+ struct kvm_s390_cmma_log args;
+ int rc;
+
+ errno = 0;
+
+ args = (struct kvm_s390_cmma_log){
+ .start_gfn = 0,
+ .count = sizeof(cmma_value_buf),
+ .flags = flags,
+ .values = (__u64)&cmma_value_buf[0]
+ };
+ rc = __vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
+
+ *errno_out = errno;
+ return rc;
+}
+
+static void test_get_cmma_basic(void)
+{
+ struct kvm_vm *vm = create_vm_two_memslots();
+ struct kvm_vcpu *vcpu;
+ int rc, errno_out;
+
+ /* GET_CMMA_BITS without CMMA enabled should fail */
+ rc = vm_get_cmma_bits(vm, 0, &errno_out);
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_out, ENXIO);
+
+ enable_cmma(vm);
+ vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
+
+ vcpu_run(vcpu);
+
+ /* GET_CMMA_BITS without migration mode and without peeking should fail */
+ rc = vm_get_cmma_bits(vm, 0, &errno_out);
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_out, EINVAL);
+
+ /* GET_CMMA_BITS without migration mode and with peeking should work */
+ rc = vm_get_cmma_bits(vm, KVM_S390_CMMA_PEEK, &errno_out);
+ ASSERT_EQ(rc, 0);
+ ASSERT_EQ(errno_out, 0);
+
+ enable_dirty_tracking(vm);
+ enable_migration_mode(vm);
+
+ /* GET_CMMA_BITS with invalid flags */
+ rc = vm_get_cmma_bits(vm, 0xfeedc0fe, &errno_out);
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno_out, EINVAL);
+
+ kvm_vm_free(vm);
+}
+
+static void assert_exit_was_hypercall(struct kvm_vcpu *vcpu)
+{
+ ASSERT_EQ(vcpu->run->exit_reason, 13);
+ ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4);
+ ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300);
+ ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000);
+}
+
+static void test_migration_mode(void)
+{
+ struct kvm_vm *vm = create_vm();
+ struct kvm_vcpu *vcpu;
+ u64 orig_psw;
+ int rc;
+
+ /* enabling migration mode on a VM without memory should fail */
+ rc = __enable_migration_mode(vm);
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno, EINVAL);
+ TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
+ errno = 0;
+
+ create_memslots(vm);
+ finish_vm_setup(vm);
+
+ enable_cmma(vm);
+ vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
+ orig_psw = vcpu->run->psw_addr;
+
+ /*
+ * Execute one essa instruction in the guest. Otherwise the guest will
+ * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
+ */
+ vcpu_run(vcpu);
+ assert_exit_was_hypercall(vcpu);
+
+ /* migration mode when memslots have dirty tracking off should fail */
+ rc = __enable_migration_mode(vm);
+ ASSERT_EQ(rc, -1);
+ ASSERT_EQ(errno, EINVAL);
+ TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
+ errno = 0;
+
+ /* enable dirty tracking */
+ enable_dirty_tracking(vm);
+
+ /* enabling migration mode should work now */
+ rc = __enable_migration_mode(vm);
+ ASSERT_EQ(rc, 0);
+ TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
+ errno = 0;
+
+ /* execute another ESSA instruction to see this goes fine */
+ vcpu->run->psw_addr = orig_psw;
+ vcpu_run(vcpu);
+ assert_exit_was_hypercall(vcpu);
+
+ /*
+ * With migration mode on, create a new memslot with dirty tracking off.
+ * This should turn off migration mode.
+ */
+ TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
+ vm_userspace_mem_region_add(vm,
+ VM_MEM_SRC_ANONYMOUS,
+ TEST_DATA_TWO_START_GFN << vm->page_shift,
+ TEST_DATA_TWO_MEMSLOT,
+ TEST_DATA_TWO_PAGE_COUNT,
+ 0
+ );
+ TEST_ASSERT(!is_migration_mode_on(vm),
+ "creating memslot without dirty tracking turns off migration mode"
+ );
+
+ /* ESSA instructions should still execute fine */
+ vcpu->run->psw_addr = orig_psw;
+ vcpu_run(vcpu);
+ assert_exit_was_hypercall(vcpu);
+
+ /*
+ * Turn on dirty tracking on the new memslot.
+ * It should be possible to turn migration mode back on again.
+ */
+ vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
+ rc = __enable_migration_mode(vm);
+ ASSERT_EQ(rc, 0);
+ TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
+ errno = 0;
+
+ /*
+ * Turn off dirty tracking again, this time with just a flag change.
+ * Again, migration mode should turn off.
+ */
+ TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
+ vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, 0);
+ TEST_ASSERT(!is_migration_mode_on(vm),
+ "disabling dirty tracking should turn off migration mode"
+ );
+
+ /* ESSA instructions should still execute fine */
+ vcpu->run->psw_addr = orig_psw;
+ vcpu_run(vcpu);
+ assert_exit_was_hypercall(vcpu);
+
+ kvm_vm_free(vm);
+}
+
+/**
+ * Given a VM with the MAIN and TEST_DATA memslot, assert that both slots have
+ * CMMA attributes of all pages in both memslots and nothing more dirty.
+ * This has the useful side effect of ensuring nothing is CMMA dirty after this
+ * function.
+ */
+static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
+{
+ struct kvm_s390_cmma_log args;
+
+ /*
+ * First iteration - everything should be dirty.
+ * Start at the main memslot...
+ */
+ args = (struct kvm_s390_cmma_log){
+ .start_gfn = 0,
+ .count = sizeof(cmma_value_buf),
+ .flags = 0,
+ .values = (__u64)&cmma_value_buf[0]
+ };
+ memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
+ vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
+ ASSERT_EQ(args.count, MAIN_PAGE_COUNT);
+ ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT);
+ ASSERT_EQ(args.start_gfn, 0);
+
+ /* ...and then - after a hole - the TEST_DATA memslot should follow */
+ args = (struct kvm_s390_cmma_log){
+ .start_gfn = MAIN_PAGE_COUNT,
+ .count = sizeof(cmma_value_buf),
+ .flags = 0,
+ .values = (__u64)&cmma_value_buf[0]
+ };
+ memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
+ vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
+ ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT);
+ ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN);
+ ASSERT_EQ(args.remaining, 0);
+
+ /* ...and nothing else should be there */
+ args = (struct kvm_s390_cmma_log){
+ .start_gfn = TEST_DATA_START_GFN + TEST_DATA_PAGE_COUNT,
+ .count = sizeof(cmma_value_buf),
+ .flags = 0,
+ .values = (__u64)&cmma_value_buf[0]
+ };
+ memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
+ vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
+ ASSERT_EQ(args.count, 0);
+ ASSERT_EQ(args.start_gfn, 0);
+ ASSERT_EQ(args.remaining, 0);
+}
+
+/**
+ * Given a VM, assert no pages are CMMA dirty.
+ */
+static void assert_no_pages_cmma_dirty(struct kvm_vm *vm)
+{
+ struct kvm_s390_cmma_log args;
+
+ /* If we start from GFN 0 again, nothing should be dirty. */
+ args = (struct kvm_s390_cmma_log){
+ .start_gfn = 0,
+ .count = sizeof(cmma_value_buf),
+ .flags = 0,
+ .values = (__u64)&cmma_value_buf[0]
+ };
+ memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
+ vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
+ if (args.count || args.remaining || args.start_gfn)
+ TEST_FAIL("pages are still dirty start_gfn=0x%llx count=%u remaining=%llu",
+ args.start_gfn,
+ args.count,
+ args.remaining
+ );
+}
+
+static void test_get_inital_dirty(void)
+{
+ struct kvm_vm *vm = create_vm_two_memslots();
+ struct kvm_vcpu *vcpu;
+
+ enable_cmma(vm);
+ vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
+
+ /*
+ * Execute one essa instruction in the guest. Otherwise the guest will
+ * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
+ */
+ vcpu_run(vcpu);
+ assert_exit_was_hypercall(vcpu);
+
+ enable_dirty_tracking(vm);
+ enable_migration_mode(vm);
+
+ assert_all_slots_cmma_dirty(vm);
+
+ /* Start from the beginning again and make sure nothing else is dirty */
+ assert_no_pages_cmma_dirty(vm);
+
+ kvm_vm_free(vm);
+}
+
+static void query_cmma_range(struct kvm_vm *vm,
+ u64 start_gfn, u64 gfn_count,
+ struct kvm_s390_cmma_log *res_out)
+{
+ *res_out = (struct kvm_s390_cmma_log){
+ .start_gfn = start_gfn,
+ .count = gfn_count,
+ .flags = 0,
+ .values = (__u64)&cmma_value_buf[0]
+ };
+ memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
+ vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, res_out);
+}
+
+/**
+ * Assert the given cmma_log struct that was executed by query_cmma_range()
+ * indicates the first dirty gfn is at first_dirty_gfn and contains exactly
+ * dirty_gfn_count CMMA values.
+ */
+static void assert_cmma_dirty(u64 first_dirty_gfn,
+ u64 dirty_gfn_count,
+ const struct kvm_s390_cmma_log *res)
+{
+ ASSERT_EQ(res->start_gfn, first_dirty_gfn);
+ ASSERT_EQ(res->count, dirty_gfn_count);
+ for (size_t i = 0; i < dirty_gfn_count; i++)
+ ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */
+ ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */
+}
+
+static void test_get_skip_holes(void)
+{
+ size_t gfn_offset;
+ struct kvm_vm *vm = create_vm_two_memslots();
+ struct kvm_s390_cmma_log log;
+ struct kvm_vcpu *vcpu;
+ u64 orig_psw;
+
+ enable_cmma(vm);
+ vcpu = vm_vcpu_add(vm, 1, guest_dirty_test_data);
+
+ orig_psw = vcpu->run->psw_addr;
+
+ /*
+ * Execute some essa instructions in the guest. Otherwise the guest will
+ * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
+ */
+ vcpu_run(vcpu);
+ assert_exit_was_hypercall(vcpu);
+
+ enable_dirty_tracking(vm);
+ enable_migration_mode(vm);
+
+ /* un-dirty all pages */
+ assert_all_slots_cmma_dirty(vm);
+
+ /* Then, dirty just the TEST_DATA memslot */
+ vcpu->run->psw_addr = orig_psw;
+ vcpu_run(vcpu);
+
+ gfn_offset = TEST_DATA_START_GFN;
+ /**
+ * Query CMMA attributes of one page, starting at page 0. Since the
+ * main memslot was not touched by the VM, this should yield the first
+ * page of the TEST_DATA memslot.
+ * The dirty bitmap should now look like this:
+ * 0: not dirty
+ * [0x1, 0x200): dirty
+ */
+ query_cmma_range(vm, 0, 1, &log);
+ assert_cmma_dirty(gfn_offset, 1, &log);
+ gfn_offset++;
+
+ /**
+ * Query CMMA attributes of 32 (0x20) pages past the end of the TEST_DATA
+ * memslot. This should wrap back to the beginning of the TEST_DATA
+ * memslot, page 1.
+ * The dirty bitmap should now look like this:
+ * [0, 0x21): not dirty
+ * [0x21, 0x200): dirty
+ */
+ query_cmma_range(vm, TEST_DATA_START_GFN + TEST_DATA_PAGE_COUNT, 0x20, &log);
+ assert_cmma_dirty(gfn_offset, 0x20, &log);
+ gfn_offset += 0x20;
+
+ /* Skip 32 pages */
+ gfn_offset += 0x20;
+
+ /**
+ * After skipping 32 pages, query the next 32 (0x20) pages.
+ * The dirty bitmap should now look like this:
+ * [0, 0x21): not dirty
+ * [0x21, 0x41): dirty
+ * [0x41, 0x61): not dirty
+ * [0x61, 0x200): dirty
+ */
+ query_cmma_range(vm, gfn_offset, 0x20, &log);
+ assert_cmma_dirty(gfn_offset, 0x20, &log);
+ gfn_offset += 0x20;
+
+ /**
+ * Query 1 page from the beginning of the TEST_DATA memslot. This should
+ * yield page 0x21.
+ * The dirty bitmap should now look like this:
+ * [0, 0x22): not dirty
+ * [0x22, 0x41): dirty
+ * [0x41, 0x61): not dirty
+ * [0x61, 0x200): dirty
+ */
+ query_cmma_range(vm, TEST_DATA_START_GFN, 1, &log);
+ assert_cmma_dirty(TEST_DATA_START_GFN + 0x21, 1, &log);
+ gfn_offset++;
+
+ /**
+ * Query 15 (0xF) pages from page 0x23 in TEST_DATA memslot.
+ * This should yield pages [0x23, 0x33).
+ * The dirty bitmap should now look like this:
+ * [0, 0x22): not dirty
+ * 0x22: dirty
+ * [0x23, 0x33): not dirty
+ * [0x33, 0x41): dirty
+ * [0x41, 0x61): not dirty
+ * [0x61, 0x200): dirty
+ */
+ gfn_offset = TEST_DATA_START_GFN + 0x23;
+ query_cmma_range(vm, gfn_offset, 15, &log);
+ assert_cmma_dirty(gfn_offset, 15, &log);
+
+ /**
+ * Query 17 (0x11) pages from page 0x22 in TEST_DATA memslot.
+ * This should yield page [0x22, 0x33)
+ * The dirty bitmap should now look like this:
+ * [0, 0x33): not dirty
+ * [0x33, 0x41): dirty
+ * [0x41, 0x61): not dirty
+ * [0x61, 0x200): dirty
+ */
+ gfn_offset = TEST_DATA_START_GFN + 0x22;
+ query_cmma_range(vm, gfn_offset, 17, &log);
+ assert_cmma_dirty(gfn_offset, 17, &log);
+
+ /**
+ * Query 25 (0x19) pages from page 0x40 in TEST_DATA memslot.
+ * This should yield page 0x40 and nothing more, since there are more
+ * than 16 non-dirty pages after page 0x40.
+ * The dirty bitmap should now look like this:
+ * [0, 0x33): not dirty
+ * [0x33, 0x40): dirty
+ * [0x40, 0x61): not dirty
+ * [0x61, 0x200): dirty
+ */
+ gfn_offset = TEST_DATA_START_GFN + 0x40;
+ query_cmma_range(vm, gfn_offset, 25, &log);
+ assert_cmma_dirty(gfn_offset, 1, &log);
+
+ /**
+ * Query pages [0x33, 0x40).
+ * The dirty bitmap should now look like this:
+ * [0, 0x61): not dirty
+ * [0x61, 0x200): dirty
+ */
+ gfn_offset = TEST_DATA_START_GFN + 0x33;
+ query_cmma_range(vm, gfn_offset, 0x40 - 0x33, &log);
+ assert_cmma_dirty(gfn_offset, 0x40 - 0x33, &log);
+
+ /**
+ * Query the remaining pages [0x61, 0x200).
+ */
+ gfn_offset = TEST_DATA_START_GFN;
+ query_cmma_range(vm, gfn_offset, TEST_DATA_PAGE_COUNT - 0x61, &log);
+ assert_cmma_dirty(TEST_DATA_START_GFN + 0x61, TEST_DATA_PAGE_COUNT - 0x61, &log);
+
+ assert_no_pages_cmma_dirty(vm);
+}
+
+struct testdef {
+ const char *name;
+ void (*test)(void);
+} testlist[] = {
+ { "migration mode and dirty tracking", test_migration_mode },
+ { "GET_CMMA_BITS: basic calls", test_get_cmma_basic },
+ { "GET_CMMA_BITS: all pages are dirty initally", test_get_inital_dirty },
+ { "GET_CMMA_BITS: holes are skipped", test_get_skip_holes },
+};
+
+/**
+ * The kernel may support CMMA, but the machine may not (i.e. if running as
+ * guest-3).
+ *
+ * In this case, the CMMA capabilities are all there, but the CMMA-related
+ * ioctls fail. To find out whether the machine supports CMMA, create a
+ * temporary VM and then query the CMMA feature of the VM.
+ */
+static int machine_has_cmma(void)
+{
+ struct kvm_vm *vm = create_vm();
+ int r;
+
+ r = !__kvm_has_device_attr(vm->fd, KVM_S390_VM_MEM_CTRL, KVM_S390_VM_MEM_ENABLE_CMMA);
+ kvm_vm_free(vm);
+
+ return r;
+}
+
+int main(int argc, char *argv[])
+{
+ int idx;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_SYNC_REGS));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_CMMA_MIGRATION));
+ TEST_REQUIRE(machine_has_cmma());
+
+ ksft_print_header();
+
+ ksft_set_plan(ARRAY_SIZE(testlist));
+
+ for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
+ testlist[idx].test();
+ ksft_test_result_pass("%s\n", testlist[idx].name);
+ }
+
+ ksft_finished(); /* Print results and exit() accordingly */
+}
diff --git a/tools/testing/selftests/kvm/x86_64/cpuid_test.c b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
index 2fc3ad9c887e..d3c3aa93f090 100644
--- a/tools/testing/selftests/kvm/x86_64/cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
@@ -163,6 +163,25 @@ static void set_cpuid_after_run(struct kvm_vcpu *vcpu)
ent->eax = eax;
}
+static void test_get_cpuid2(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent + 1);
+ int i, r;
+
+ vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid);
+ TEST_ASSERT(cpuid->nent == vcpu->cpuid->nent,
+ "KVM didn't update nent on success, wanted %u, got %u\n",
+ vcpu->cpuid->nent, cpuid->nent);
+
+ for (i = 0; i < vcpu->cpuid->nent; i++) {
+ cpuid->nent = i;
+ r = __vcpu_ioctl(vcpu, KVM_GET_CPUID2, cpuid);
+ TEST_ASSERT(r && errno == E2BIG, KVM_IOCTL_ERROR(KVM_GET_CPUID2, r));
+ TEST_ASSERT(cpuid->nent == i, "KVM modified nent on failure");
+ }
+ free(cpuid);
+}
+
int main(void)
{
struct kvm_vcpu *vcpu;
@@ -183,5 +202,7 @@ int main(void)
set_cpuid_after_run(vcpu);
+ test_get_cpuid2(vcpu);
+
kvm_vm_free(vm);
}
diff --git a/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
new file mode 100644
index 000000000000..beb7e2c10211
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KVM dirty logging page splitting test
+ *
+ * Based on dirty_log_perf.c
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ * Copyright (C) 2023, Google, Inc.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <linux/bitmap.h>
+
+#include "kvm_util.h"
+#include "test_util.h"
+#include "memstress.h"
+#include "guest_modes.h"
+
+#define VCPUS 2
+#define SLOTS 2
+#define ITERATIONS 2
+
+static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
+
+static enum vm_mem_backing_src_type backing_src = VM_MEM_SRC_ANONYMOUS_HUGETLB;
+
+static u64 dirty_log_manual_caps;
+static bool host_quit;
+static int iteration;
+static int vcpu_last_completed_iteration[KVM_MAX_VCPUS];
+
+struct kvm_page_stats {
+ uint64_t pages_4k;
+ uint64_t pages_2m;
+ uint64_t pages_1g;
+ uint64_t hugepages;
+};
+
+static void get_page_stats(struct kvm_vm *vm, struct kvm_page_stats *stats, const char *stage)
+{
+ stats->pages_4k = vm_get_stat(vm, "pages_4k");
+ stats->pages_2m = vm_get_stat(vm, "pages_2m");
+ stats->pages_1g = vm_get_stat(vm, "pages_1g");
+ stats->hugepages = stats->pages_2m + stats->pages_1g;
+
+ pr_debug("\nPage stats after %s: 4K: %ld 2M: %ld 1G: %ld huge: %ld\n",
+ stage, stats->pages_4k, stats->pages_2m, stats->pages_1g,
+ stats->hugepages);
+}
+
+static void run_vcpu_iteration(struct kvm_vm *vm)
+{
+ int i;
+
+ iteration++;
+ for (i = 0; i < VCPUS; i++) {
+ while (READ_ONCE(vcpu_last_completed_iteration[i]) !=
+ iteration)
+ ;
+ }
+}
+
+static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
+{
+ struct kvm_vcpu *vcpu = vcpu_args->vcpu;
+ int vcpu_idx = vcpu_args->vcpu_idx;
+
+ while (!READ_ONCE(host_quit)) {
+ int current_iteration = READ_ONCE(iteration);
+
+ vcpu_run(vcpu);
+
+ ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
+
+ vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
+
+ /* Wait for the start of the next iteration to be signaled. */
+ while (current_iteration == READ_ONCE(iteration) &&
+ READ_ONCE(iteration) >= 0 &&
+ !READ_ONCE(host_quit))
+ ;
+ }
+}
+
+static void run_test(enum vm_guest_mode mode, void *unused)
+{
+ struct kvm_vm *vm;
+ unsigned long **bitmaps;
+ uint64_t guest_num_pages;
+ uint64_t host_num_pages;
+ uint64_t pages_per_slot;
+ int i;
+ uint64_t total_4k_pages;
+ struct kvm_page_stats stats_populated;
+ struct kvm_page_stats stats_dirty_logging_enabled;
+ struct kvm_page_stats stats_dirty_pass[ITERATIONS];
+ struct kvm_page_stats stats_clear_pass[ITERATIONS];
+ struct kvm_page_stats stats_dirty_logging_disabled;
+ struct kvm_page_stats stats_repopulated;
+
+ vm = memstress_create_vm(mode, VCPUS, guest_percpu_mem_size,
+ SLOTS, backing_src, false);
+
+ guest_num_pages = (VCPUS * guest_percpu_mem_size) >> vm->page_shift;
+ guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
+ host_num_pages = vm_num_host_pages(mode, guest_num_pages);
+ pages_per_slot = host_num_pages / SLOTS;
+
+ bitmaps = memstress_alloc_bitmaps(SLOTS, pages_per_slot);
+
+ if (dirty_log_manual_caps)
+ vm_enable_cap(vm, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2,
+ dirty_log_manual_caps);
+
+ /* Start the iterations */
+ iteration = -1;
+ host_quit = false;
+
+ for (i = 0; i < VCPUS; i++)
+ vcpu_last_completed_iteration[i] = -1;
+
+ memstress_start_vcpu_threads(VCPUS, vcpu_worker);
+
+ run_vcpu_iteration(vm);
+ get_page_stats(vm, &stats_populated, "populating memory");
+
+ /* Enable dirty logging */
+ memstress_enable_dirty_logging(vm, SLOTS);
+
+ get_page_stats(vm, &stats_dirty_logging_enabled, "enabling dirty logging");
+
+ while (iteration < ITERATIONS) {
+ run_vcpu_iteration(vm);
+ get_page_stats(vm, &stats_dirty_pass[iteration - 1],
+ "dirtying memory");
+
+ memstress_get_dirty_log(vm, bitmaps, SLOTS);
+
+ if (dirty_log_manual_caps) {
+ memstress_clear_dirty_log(vm, bitmaps, SLOTS, pages_per_slot);
+
+ get_page_stats(vm, &stats_clear_pass[iteration - 1], "clearing dirty log");
+ }
+ }
+
+ /* Disable dirty logging */
+ memstress_disable_dirty_logging(vm, SLOTS);
+
+ get_page_stats(vm, &stats_dirty_logging_disabled, "disabling dirty logging");
+
+ /* Run vCPUs again to fault pages back in. */
+ run_vcpu_iteration(vm);
+ get_page_stats(vm, &stats_repopulated, "repopulating memory");
+
+ /*
+ * Tell the vCPU threads to quit. No need to manually check that vCPUs
+ * have stopped running after disabling dirty logging, the join will
+ * wait for them to exit.
+ */
+ host_quit = true;
+ memstress_join_vcpu_threads(VCPUS);
+
+ memstress_free_bitmaps(bitmaps, SLOTS);
+ memstress_destroy_vm(vm);
+
+ /* Make assertions about the page counts. */
+ total_4k_pages = stats_populated.pages_4k;
+ total_4k_pages += stats_populated.pages_2m * 512;
+ total_4k_pages += stats_populated.pages_1g * 512 * 512;
+
+ /*
+ * Check that all huge pages were split. Since large pages can only
+ * exist in the data slot, and the vCPUs should have dirtied all pages
+ * in the data slot, there should be no huge pages left after splitting.
+ * Splitting happens at dirty log enable time without
+ * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 and after the first clear pass
+ * with that capability.
+ */
+ if (dirty_log_manual_caps) {
+ ASSERT_EQ(stats_clear_pass[0].hugepages, 0);
+ ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages);
+ ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages);
+ } else {
+ ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0);
+ ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages);
+ }
+
+ /*
+ * Once dirty logging is disabled and the vCPUs have touched all their
+ * memory again, the page counts should be the same as they were
+ * right after initial population of memory.
+ */
+ ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k);
+ ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m);
+ ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g);
+}
+
+static void help(char *name)
+{
+ puts("");
+ printf("usage: %s [-h] [-b vcpu bytes] [-s mem type]\n",
+ name);
+ puts("");
+ printf(" -b: specify the size of the memory region which should be\n"
+ " dirtied by each vCPU. e.g. 10M or 3G.\n"
+ " (default: 1G)\n");
+ backing_src_help("-s");
+ puts("");
+}
+
+int main(int argc, char *argv[])
+{
+ int opt;
+
+ TEST_REQUIRE(get_kvm_param_bool("eager_page_split"));
+ TEST_REQUIRE(get_kvm_param_bool("tdp_mmu"));
+
+ while ((opt = getopt(argc, argv, "b:hs:")) != -1) {
+ switch (opt) {
+ case 'b':
+ guest_percpu_mem_size = parse_size(optarg);
+ break;
+ case 'h':
+ help(argv[0]);
+ exit(0);
+ case 's':
+ backing_src = parse_backing_src_type(optarg);
+ break;
+ default:
+ help(argv[0]);
+ exit(1);
+ }
+ }
+
+ if (!is_backing_src_hugetlb(backing_src)) {
+ pr_info("This test will only work reliably with HugeTLB memory. "
+ "It can work with THP, but that is best effort.\n");
+ }
+
+ guest_modes_append_default();
+
+ dirty_log_manual_caps = 0;
+ for_each_guest_mode(run_test, NULL);
+
+ dirty_log_manual_caps =
+ kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
+
+ if (dirty_log_manual_caps) {
+ dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
+ KVM_DIRTY_LOG_INITIALLY_SET);
+ for_each_guest_mode(run_test, NULL);
+ } else {
+ pr_info("Skipping testing with MANUAL_PROTECT as it is not supported");
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
index 251794f83719..7f36c32fa760 100644
--- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
@@ -226,7 +226,7 @@ static void help(char *name)
puts("");
printf("usage: %s [-h] [-p period_ms] [-t token]\n", name);
puts("");
- printf(" -p: The NX reclaim period in miliseconds.\n");
+ printf(" -p: The NX reclaim period in milliseconds.\n");
printf(" -t: The magic token to indicate environment setup is done.\n");
printf(" -r: The test has reboot permissions and can disable NX huge pages.\n");
puts("");
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
index fa03c8d1ce4e..e710b6e7fb38 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c
@@ -116,29 +116,21 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_DONE();
}
-static void stable_tsc_check_supported(void)
+static bool system_has_stable_tsc(void)
{
+ bool tsc_is_stable;
FILE *fp;
char buf[4];
fp = fopen("/sys/devices/system/clocksource/clocksource0/current_clocksource", "r");
if (fp == NULL)
- goto skip_test;
+ return false;
- if (fgets(buf, sizeof(buf), fp) == NULL)
- goto close_fp;
+ tsc_is_stable = fgets(buf, sizeof(buf), fp) &&
+ !strncmp(buf, "tsc", sizeof(buf));
- if (strncmp(buf, "tsc", sizeof(buf)))
- goto close_fp;
-
- fclose(fp);
- return;
-
-close_fp:
fclose(fp);
-skip_test:
- print_skip("Kernel does not use TSC clocksource - assuming that host TSC is not stable");
- exit(KSFT_SKIP);
+ return tsc_is_stable;
}
int main(int argc, char *argv[])
@@ -156,7 +148,7 @@ int main(int argc, char *argv[])
TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
TEST_REQUIRE(kvm_has_cap(KVM_CAP_TSC_CONTROL));
- stable_tsc_check_supported();
+ TEST_REQUIRE(system_has_stable_tsc());
/*
* We set L1's scale factor to be a random number from 2 to 10.
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index d1d421ec10a3..cd3cc52c59b4 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -50,3 +50,4 @@ CONFIG_CRYPTO_SM4_GENERIC=y
CONFIG_AMT=m
CONFIG_VXLAN=m
CONFIG_IP_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config
index 6032f9b23c4c..e317c2e44dae 100644
--- a/tools/testing/selftests/net/mptcp/config
+++ b/tools/testing/selftests/net/mptcp/config
@@ -6,6 +6,7 @@ CONFIG_INET_DIAG=m
CONFIG_INET_MPTCP_DIAG=m
CONFIG_VETH=y
CONFIG_NET_SCH_NETEM=m
+CONFIG_SYN_COOKIES=y
CONFIG_NETFILTER=y
CONFIG_NETFILTER_ADVANCED=y
CONFIG_NETFILTER_NETLINK=m
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 13561e5bc0cd..bbae40882bfa 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -718,6 +718,7 @@ table inet mangle {
EOF
if [ $? -ne 0 ]; then
echo "SKIP: $msg, could not load nft ruleset"
+ mptcp_lib_fail_if_expected_feature "nft rules"
return
fi
@@ -733,6 +734,7 @@ EOF
if [ $? -ne 0 ]; then
ip netns exec "$listener_ns" nft flush ruleset
echo "SKIP: $msg, ip $r6flag rule failed"
+ mptcp_lib_fail_if_expected_feature "ip rule"
return
fi
@@ -741,6 +743,7 @@ EOF
ip netns exec "$listener_ns" nft flush ruleset
ip -net "$listener_ns" $r6flag rule del fwmark 1 lookup 100
echo "SKIP: $msg, ip route add local $local_addr failed"
+ mptcp_lib_fail_if_expected_feature "ip route"
return
fi
diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
index f295a371ff14..dc8d473fc82c 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
@@ -12,6 +12,8 @@ ksft_skip=4
timeout_poll=30
timeout_test=$((timeout_poll * 2 + 1))
mptcp_connect=""
+iptables="iptables"
+ip6tables="ip6tables"
sec=$(date +%s)
rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
@@ -25,7 +27,7 @@ add_mark_rules()
local m=$2
local t
- for t in iptables ip6tables; do
+ for t in ${iptables} ${ip6tables}; do
# just to debug: check we have multiple subflows connection requests
ip netns exec $ns $t -A OUTPUT -p tcp --syn -m mark --mark $m -j ACCEPT
@@ -95,14 +97,14 @@ if [ $? -ne 0 ];then
exit $ksft_skip
fi
-iptables -V > /dev/null 2>&1
-if [ $? -ne 0 ];then
+# Use the legacy version if available to support old kernel versions
+if iptables-legacy -V &> /dev/null; then
+ iptables="iptables-legacy"
+ ip6tables="ip6tables-legacy"
+elif ! iptables -V &> /dev/null; then
echo "SKIP: Could not run all tests without iptables tool"
exit $ksft_skip
-fi
-
-ip6tables -V > /dev/null 2>&1
-if [ $? -ne 0 ];then
+elif ! ip6tables -V &> /dev/null; then
echo "SKIP: Could not run all tests without ip6tables tool"
exit $ksft_skip
fi
@@ -112,10 +114,10 @@ check_mark()
local ns=$1
local af=$2
- local tables=iptables
+ local tables=${iptables}
if [ $af -eq 6 ];then
- tables=ip6tables
+ tables=${ip6tables}
fi
local counters values
@@ -126,6 +128,7 @@ check_mark()
for v in $values; do
if [ $v -ne 0 ]; then
echo "FAIL: got $tables $values in ns $ns , not 0 - not all expected packets marked" 1>&2
+ ret=1
return 1
fi
done
@@ -225,11 +228,11 @@ do_transfer()
fi
if [ $local_addr = "::" ];then
- check_mark $listener_ns 6
- check_mark $connector_ns 6
+ check_mark $listener_ns 6 || retc=1
+ check_mark $connector_ns 6 || retc=1
else
- check_mark $listener_ns 4
- check_mark $connector_ns 4
+ check_mark $listener_ns 4 || retc=1
+ check_mark $connector_ns 4 || retc=1
fi
check_transfer $cin $sout "file received by server"
diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
index abddf4c63e79..1887bd61bd9a 100644
--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
@@ -425,7 +425,7 @@ int dsf(int fd, int pm_family, int argc, char *argv[])
}
/* token */
- token = atoi(params[4]);
+ token = strtoul(params[4], NULL, 10);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_TOKEN;
rta->rta_len = RTA_LENGTH(4);
@@ -551,7 +551,7 @@ int csf(int fd, int pm_family, int argc, char *argv[])
}
/* token */
- token = atoi(params[4]);
+ token = strtoul(params[4], NULL, 10);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_TOKEN;
rta->rta_len = RTA_LENGTH(4);
@@ -598,7 +598,7 @@ int remove_addr(int fd, int pm_family, int argc, char *argv[])
if (++arg >= argc)
error(1, 0, " missing token value");
- token = atoi(argv[arg]);
+ token = strtoul(argv[arg], NULL, 10);
rta = (void *)(data + off);
rta->rta_type = MPTCP_PM_ATTR_TOKEN;
rta->rta_len = RTA_LENGTH(4);
@@ -710,7 +710,7 @@ int announce_addr(int fd, int pm_family, int argc, char *argv[])
if (++arg >= argc)
error(1, 0, " missing token value");
- token = atoi(argv[arg]);
+ token = strtoul(argv[arg], NULL, 10);
} else
error(1, 0, "unknown keyword %s", argv[arg]);
}
@@ -1347,7 +1347,7 @@ int set_flags(int fd, int pm_family, int argc, char *argv[])
error(1, 0, " missing token value");
/* token */
- token = atoi(argv[arg]);
+ token = strtoul(argv[arg], NULL, 10);
} else if (!strcmp(argv[arg], "flags")) {
char *tok, *str;
diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
index 98d9e4d2d3fc..b180133a30af 100755
--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
+++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
@@ -423,6 +423,7 @@ test_remove()
stdbuf -o0 -e0 printf "[OK]\n"
else
stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
fi
# RM_ADDR using an invalid addr id should result in no action
@@ -437,6 +438,7 @@ test_remove()
stdbuf -o0 -e0 printf "[OK]\n"
else
stdbuf -o0 -e0 printf "[FAIL]\n"
+ exit 1
fi
# RM_ADDR from the client to server machine
@@ -848,7 +850,7 @@ test_prio()
local count
# Send MP_PRIO signal from client to server machine
- ip netns exec "$ns2" ./pm_nl_ctl set 10.0.1.2 port "$client4_port" flags backup token "$client4_token" rip 10.0.1.1 rport "$server4_port"
+ ip netns exec "$ns2" ./pm_nl_ctl set 10.0.1.2 port "$client4_port" flags backup token "$client4_token" rip 10.0.1.1 rport "$app4_port"
sleep 0.5
# Check TX
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 62f3b0f56e4d..d3cdc2d33d4b 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -655,4 +655,4 @@ fi
# Control buffer size: --bootargs trace_buf_size=3k
# Get trace-buffer dumps on all oopses: --bootargs ftrace_dump_on_oops
# Ditto, but dump only the oopsing CPU: --bootargs ftrace_dump_on_oops=orig_cpu
-# Heavy-handed way to also dump on warnings: --bootargs panic_on_warn
+# Heavy-handed way to also dump on warnings: --bootargs panic_on_warn=1
diff --git a/tools/testing/selftests/tty/.gitignore b/tools/testing/selftests/tty/.gitignore
new file mode 100644
index 000000000000..fe70462a4aad
--- /dev/null
+++ b/tools/testing/selftests/tty/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+tty_tstamp_update
diff --git a/tools/testing/selftests/tty/Makefile b/tools/testing/selftests/tty/Makefile
new file mode 100644
index 000000000000..50d7027b2ae3
--- /dev/null
+++ b/tools/testing/selftests/tty/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS = -O2 -Wall
+TEST_GEN_PROGS := tty_tstamp_update
+
+include ../lib.mk
diff --git a/tools/testing/selftests/tty/tty_tstamp_update.c b/tools/testing/selftests/tty/tty_tstamp_update.c
new file mode 100644
index 000000000000..0ee97943dccc
--- /dev/null
+++ b/tools/testing/selftests/tty/tty_tstamp_update.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <linux/limits.h>
+
+#include "../kselftest.h"
+
+#define MIN_TTY_PATH_LEN 8
+
+static bool tty_valid(char *tty)
+{
+ if (strlen(tty) < MIN_TTY_PATH_LEN)
+ return false;
+
+ if (strncmp(tty, "/dev/tty", MIN_TTY_PATH_LEN) == 0 ||
+ strncmp(tty, "/dev/pts", MIN_TTY_PATH_LEN) == 0)
+ return true;
+
+ return false;
+}
+
+static int write_dev_tty(void)
+{
+ FILE *f;
+ int r = 0;
+
+ f = fopen("/dev/tty", "r+");
+ if (!f)
+ return -errno;
+
+ r = fprintf(f, "hello, world!\n");
+ if (r != strlen("hello, world!\n"))
+ r = -EIO;
+
+ fclose(f);
+ return r;
+}
+
+int main(int argc, char **argv)
+{
+ int r;
+ char tty[PATH_MAX] = {};
+ struct stat st1, st2;
+
+ ksft_print_header();
+ ksft_set_plan(1);
+
+ r = readlink("/proc/self/fd/0", tty, PATH_MAX);
+ if (r < 0)
+ ksft_exit_fail_msg("readlink on /proc/self/fd/0 failed: %m\n");
+
+ if (!tty_valid(tty))
+ ksft_exit_skip("invalid tty path '%s'\n", tty);
+
+ r = stat(tty, &st1);
+ if (r < 0)
+ ksft_exit_fail_msg("stat failed on tty path '%s': %m\n", tty);
+
+ /* We need to wait at least 8 seconds in order to observe timestamp change */
+ /* https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=fbf47635315ab308c9b58a1ea0906e711a9228de */
+ sleep(10);
+
+ r = write_dev_tty();
+ if (r < 0)
+ ksft_exit_fail_msg("failed to write to /dev/tty: %s\n",
+ strerror(-r));
+
+ r = stat(tty, &st2);
+ if (r < 0)
+ ksft_exit_fail_msg("stat failed on tty path '%s': %m\n", tty);
+
+ /* We wrote to the terminal so timestamps should have been updated */
+ if (st1.st_atim.tv_sec == st2.st_atim.tv_sec &&
+ st1.st_mtim.tv_sec == st2.st_mtim.tv_sec) {
+ ksft_test_result_fail("tty timestamps not updated\n");
+ ksft_exit_fail();
+ }
+
+ ksft_test_result_pass(
+ "timestamps of terminal '%s' updated after write to /dev/tty\n", tty);
+ return EXIT_SUCCESS;
+}
diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
index 69c7796c7ca9..405ff262ca93 100755
--- a/tools/testing/selftests/wireguard/netns.sh
+++ b/tools/testing/selftests/wireguard/netns.sh
@@ -514,10 +514,32 @@ n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter'
n1 ping -W 1 -c 1 192.168.241.2
[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.3:1" ]]
-ip1 link del veth1
-ip1 link del veth3
-ip1 link del wg0
-ip2 link del wg0
+ip1 link del dev veth3
+ip1 link del dev wg0
+ip2 link del dev wg0
+
+# Make sure persistent keep alives are sent when an adapter comes up
+ip1 link add dev wg0 type wireguard
+n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" endpoint 10.0.0.1:1 persistent-keepalive 1
+read _ _ tx_bytes < <(n1 wg show wg0 transfer)
+[[ $tx_bytes -eq 0 ]]
+ip1 link set dev wg0 up
+read _ _ tx_bytes < <(n1 wg show wg0 transfer)
+[[ $tx_bytes -gt 0 ]]
+ip1 link del dev wg0
+# This should also happen even if the private key is set later
+ip1 link add dev wg0 type wireguard
+n1 wg set wg0 peer "$pub2" endpoint 10.0.0.1:1 persistent-keepalive 1
+read _ _ tx_bytes < <(n1 wg show wg0 transfer)
+[[ $tx_bytes -eq 0 ]]
+ip1 link set dev wg0 up
+read _ _ tx_bytes < <(n1 wg show wg0 transfer)
+[[ $tx_bytes -eq 0 ]]
+n1 wg set wg0 private-key <(echo "$key1")
+read _ _ tx_bytes < <(n1 wg show wg0 transfer)
+[[ $tx_bytes -gt 0 ]]
+ip1 link del dev veth1
+ip1 link del dev wg0
# We test that Netlink/IPC is working properly by doing things that usually cause split responses
ip0 link add dev wg0 type wireguard