diff options
author | Jiri Kosina <jkosina@suse.cz> | 2021-06-30 09:02:28 +0200 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2021-06-30 09:02:28 +0200 |
commit | 5a94296bc02ac616336da7b5332b86d2ca8827f0 (patch) | |
tree | d13c4e7db2b4b01adb87fbf2ea5bafd2dc920b2f /tools/testing | |
parent | 3b770932eefb7c0c6319d332023efee87eb12913 (diff) | |
parent | 24a31ea94922d391a96a9dd0a9a830de65423817 (diff) |
Merge branch 'for-5.14/amd-sfh' into for-linus
- support for Renoir and Cezanne SoCs
- support for Ambient Light Sensor
- support for Human Presence Detection sensor
all from Basavaraj Natikar
Diffstat (limited to 'tools/testing')
72 files changed, 8843 insertions, 887 deletions
diff --git a/tools/testing/ktest/examples/vmware.conf b/tools/testing/ktest/examples/vmware.conf new file mode 100644 index 000000000000..61958163d242 --- /dev/null +++ b/tools/testing/ktest/examples/vmware.conf @@ -0,0 +1,137 @@ +# +# This config is an example usage of ktest.pl with a vmware guest +# +# VMware Setup: +# ------------- +# - Edit the Virtual Machine ("Edit virtual machine settings") +# - Add a Serial Port +# - You almost certainly want it set "Connect at power on" +# - Select "Use socket (named pipe)" +# - Select a name that you'll recognize, like 'ktestserialpipe' +# - From: Server +# - To: A Virtual Machine +# - Save +# - Make sure you note the name, it will be in the base directory of the +# virtual machine (where the "disks" are stored. The default +# is /var/lib/vmware/<virtual machine name>/<the name you entered above> +# +# - Make note of the path to the VM +# </End VMware setup> +# +# The guest is called 'Guest' and this would be something that +# could be run on the host to test a virtual machine target. + +MACHINE = Guest + +# Name of the serial pipe you set in the VMware settings +VMWARE_SERIAL_NAME = <the name you entered above> + +# Define a variable of the name of the VM +# Noting this needs to be the name of the kmx file, and usually, the +# name of the directory that it's in. If the directory and name +# differ change the VMWARE_VM_DIR accordingly. +# Please ommit the .kmx extension +VMWARE_VM_NAME = <virtual machine name> + +# VM dir name. This is usually the same as the virtual machine's name, +# but not always the case. Change if they differ +VMWARE_VM_DIR = ${VMWARE_VM_NAME} + +# Base directory that the Virtual machine is contained in +# /var/lib/vmware is the default on Linux +VMWARE_VM_BASE_DIR = /var/lib/vmware/${VMWARE_VM_DIR} + +# Use ncat to read the unix pipe. Anything that can read the Unix Pipe +# and output it's contents to stdout will work +CONSOLE = /usr/bin/ncat -U ${VMWARE_VM_BASE_DIR}/${VMWARE_SERIAL_NAME} + +# Define what version of Workstation you are using +# This is used by vmrun to use the appropriate appripriate pieces to +# test this. In all likelihood you want 'ws' or 'player' +# Valid options: +# ws - Workstation (Windows or Linux host) +# fusion - Fusion (Mac host) +# player - Using VMware Player (Windows or Linux host) +# Note: vmrun has to run directly on the host machine +VMWARE_HOST_TYPE = ws + +# VMware provides `vmrun` to allow you to do certain things to the virtual machine +# This should hard reset the VM and force a boot +VMWARE_POWER_CYCLE = /usr/bin/vmrun -T ${VMWARE_HOST_TYPE} reset ${VMWARE_VM_BASE_DIR}/${VMWARE_VM_NAME}.kmx nogui + +#*************************************# +# This part is the same as test.conf # +#*************************************# + +# The include files will set up the type of test to run. Just set TEST to +# which test you want to run. +# +# TESTS = patchcheck, randconfig, boot, test, config-bisect, bisect, min-config +# +# See the include/*.conf files that define these tests +# +TEST := patchcheck + +# Some tests may have more than one test to run. Define MULTI := 1 to run +# the extra tests. +MULTI := 0 + +# In case you want to differentiate which type of system you are testing +BITS := 64 + +# REBOOT = none, error, fail, empty +# See include/defaults.conf +REBOOT := empty + + +# The defaults file will set up various settings that can be used by all +# machine configs. +INCLUDE include/defaults.conf + + +#*************************************# +# Now we are different from test.conf # +#*************************************# + + +# The example here assumes that Guest is running a Fedora release +# that uses dracut for its initfs. The POST_INSTALL will be executed +# after the install of the kernel and modules are complete. +# +POST_INSTALL = ${SSH} /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION + +# Guests sometimes get stuck on reboot. We wait 3 seconds after running +# the reboot command and then do a full power-cycle of the guest. +# This forces the guest to restart. +# +POWERCYCLE_AFTER_REBOOT = 3 + +# We do the same after the halt command, but this time we wait 20 seconds. +POWEROFF_AFTER_HALT = 20 + + +# As the defaults.conf file has a POWER_CYCLE option already defined, +# and options can not be defined in the same section more than once +# (all DEFAULTS sections are considered the same). We use the +# DEFAULTS OVERRIDE to tell ktest.pl to ignore the previous defined +# options, for the options set in the OVERRIDE section. +# +DEFAULTS OVERRIDE + +# Instead of using the default POWER_CYCLE option defined in +# defaults.conf, we use virsh to cycle it. To do so, we destroy +# the guest, wait 5 seconds, and then start it up again. +# Crude, but effective. +# +POWER_CYCLE = ${VMWARE_POWER_CYCLE} + + +DEFAULTS + +# The following files each handle a different test case. +# Having them included allows you to set up more than one machine and share +# the same tests. +INCLUDE include/patchcheck.conf +INCLUDE include/tests.conf +INCLUDE include/bisect.conf +INCLUDE include/min-config.conf diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 4e2450964517..09d1578f9d66 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -24,7 +24,7 @@ my %evals; #default opts my %default = ( - "MAILER" => "sendmail", # default mailer + "MAILER" => "sendmail", # default mailer "EMAIL_ON_ERROR" => 1, "EMAIL_WHEN_FINISHED" => 1, "EMAIL_WHEN_CANCELED" => 0, @@ -36,15 +36,15 @@ my %default = ( "CLOSE_CONSOLE_SIGNAL" => "INT", "TIMEOUT" => 120, "TMP_DIR" => "/tmp/ktest/\${MACHINE}", - "SLEEP_TIME" => 60, # sleep time between tests + "SLEEP_TIME" => 60, # sleep time between tests "BUILD_NOCLEAN" => 0, "REBOOT_ON_ERROR" => 0, "POWEROFF_ON_ERROR" => 0, "REBOOT_ON_SUCCESS" => 1, "POWEROFF_ON_SUCCESS" => 0, "BUILD_OPTIONS" => "", - "BISECT_SLEEP_TIME" => 60, # sleep time between bisects - "PATCHCHECK_SLEEP_TIME" => 60, # sleep time between patch checks + "BISECT_SLEEP_TIME" => 60, # sleep time between bisects + "PATCHCHECK_SLEEP_TIME" => 60, # sleep time between patch checks "CLEAR_LOG" => 0, "BISECT_MANUAL" => 0, "BISECT_SKIP" => 1, @@ -512,6 +512,69 @@ $config_help{"REBOOT_SCRIPT"} = << "EOF" EOF ; +# used with process_expression() +my $d = 0; + +# defined before get_test_name() +my $in_die = 0; + +# defined before process_warning_line() +my $check_build_re = ".*:.*(warning|error|Error):.*"; +my $utf8_quote = "\\x{e2}\\x{80}(\\x{98}|\\x{99})"; + +# defined before child_finished() +my $child_done; + +# config_ignore holds the configs that were set (or unset) for +# a good config and we will ignore these configs for the rest +# of a config bisect. These configs stay as they were. +my %config_ignore; + +# config_set holds what all configs were set as. +my %config_set; + +# config_off holds the set of configs that the bad config had disabled. +# We need to record them and set them in the .config when running +# olddefconfig, because olddefconfig keeps the defaults. +my %config_off; + +# config_off_tmp holds a set of configs to turn off for now +my @config_off_tmp; + +# config_list is the set of configs that are being tested +my %config_list; +my %null_config; + +my %dependency; + +# found above run_config_bisect() +my $pass = 1; + +# found above add_dep() + +my %depends; +my %depcount; +my $iflevel = 0; +my @ifdeps; + +# prevent recursion +my %read_kconfigs; + +# found above test_this_config() +my %min_configs; +my %keep_configs; +my %save_configs; +my %processed_configs; +my %nochange_config; + +# +# These are first defined here, main function later on +# +sub run_command; +sub start_monitor; +sub end_monitor; +sub wait_for_monitor; + sub _logit { if (defined($opt{"LOG_FILE"})) { print LOG @_; @@ -537,7 +600,7 @@ sub read_prompt { my $ans; for (;;) { - if ($cancel) { + if ($cancel) { print "$prompt [y/n/C] "; } else { print "$prompt [Y/n] "; @@ -760,7 +823,7 @@ sub process_variables { # remove the space added in the beginning $retval =~ s/ //; - return "$retval" + return "$retval"; } sub set_value { @@ -863,7 +926,6 @@ sub value_defined { defined($opt{$2}); } -my $d = 0; sub process_expression { my ($name, $val) = @_; @@ -978,7 +1040,6 @@ sub __read_config { $override = 0; if ($type eq "TEST_START") { - if ($num_tests_set) { die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n"; } @@ -1048,7 +1109,6 @@ sub __read_config { $test_num = $old_test_num; $repeat = $old_repeat; } - } elsif (/^\s*ELSE\b(.*)$/) { if (!$if) { die "$name: $.: ELSE found with out matching IF section\n$_"; @@ -1095,7 +1155,7 @@ sub __read_config { } } } - + if ( ! -r $file ) { die "$name: $.: Can't read file $file\n$_"; } @@ -1186,13 +1246,13 @@ sub __read_config { } sub get_test_case { - print "What test case would you like to run?\n"; - print " (build, install or boot)\n"; - print " Other tests are available but require editing ktest.conf\n"; - print " (see tools/testing/ktest/sample.conf)\n"; - my $ans = <STDIN>; - chomp $ans; - $default{"TEST_TYPE"} = $ans; + print "What test case would you like to run?\n"; + print " (build, install or boot)\n"; + print " Other tests are available but require editing ktest.conf\n"; + print " (see tools/testing/ktest/sample.conf)\n"; + my $ans = <STDIN>; + chomp $ans; + $default{"TEST_TYPE"} = $ans; } sub read_config { @@ -1368,11 +1428,6 @@ sub eval_option { return $option; } -sub run_command; -sub start_monitor; -sub end_monitor; -sub wait_for_monitor; - sub reboot { my ($time) = @_; my $powercycle = 0; @@ -1457,8 +1512,6 @@ sub do_not_reboot { ($test_type eq "config_bisect" && $opt{"CONFIG_BISECT_TYPE[$i]"} eq "build"); } -my $in_die = 0; - sub get_test_name() { my $name; @@ -1471,7 +1524,6 @@ sub get_test_name() { } sub dodie { - # avoid recursion return if ($in_die); $in_die = 1; @@ -1481,10 +1533,8 @@ sub dodie { doprint "CRITICAL FAILURE... [TEST $i] ", @_, "\n"; if ($reboot_on_error && !do_not_reboot) { - doprint "REBOOTING\n"; reboot_to_good; - } elsif ($poweroff_on_error && defined($power_off)) { doprint "POWERING OFF\n"; `$power_off`; @@ -1519,13 +1569,14 @@ sub dodie { close O; close L; } - send_email("KTEST: critical failure for test $i [$name]", - "Your test started at $script_start_time has failed with:\n@_\n", $log_file); + + send_email("KTEST: critical failure for test $i [$name]", + "Your test started at $script_start_time has failed with:\n@_\n", $log_file); } if ($monitor_cnt) { - # restore terminal settings - system("stty $stty_orig"); + # restore terminal settings + system("stty $stty_orig"); } if (defined($post_test)) { @@ -1709,81 +1760,81 @@ sub wait_for_monitor { } sub save_logs { - my ($result, $basedir) = @_; - my @t = localtime; - my $date = sprintf "%04d%02d%02d%02d%02d%02d", - 1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0]; + my ($result, $basedir) = @_; + my @t = localtime; + my $date = sprintf "%04d%02d%02d%02d%02d%02d", + 1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0]; - my $type = $build_type; - if ($type =~ /useconfig/) { - $type = "useconfig"; - } + my $type = $build_type; + if ($type =~ /useconfig/) { + $type = "useconfig"; + } - my $dir = "$machine-$test_type-$type-$result-$date"; + my $dir = "$machine-$test_type-$type-$result-$date"; - $dir = "$basedir/$dir"; + $dir = "$basedir/$dir"; - if (!-d $dir) { - mkpath($dir) or - dodie "can't create $dir"; - } + if (!-d $dir) { + mkpath($dir) or + dodie "can't create $dir"; + } - my %files = ( - "config" => $output_config, - "buildlog" => $buildlog, - "dmesg" => $dmesg, - "testlog" => $testlog, - ); + my %files = ( + "config" => $output_config, + "buildlog" => $buildlog, + "dmesg" => $dmesg, + "testlog" => $testlog, + ); - while (my ($name, $source) = each(%files)) { - if (-f "$source") { - cp "$source", "$dir/$name" or - dodie "failed to copy $source"; - } + while (my ($name, $source) = each(%files)) { + if (-f "$source") { + cp "$source", "$dir/$name" or + dodie "failed to copy $source"; } + } - doprint "*** Saved info to $dir ***\n"; + doprint "*** Saved info to $dir ***\n"; } sub fail { - if ($die_on_failure) { - dodie @_; - } + if ($die_on_failure) { + dodie @_; + } - doprint "FAILED\n"; + doprint "FAILED\n"; - my $i = $iteration; + my $i = $iteration; - # no need to reboot for just building. - if (!do_not_reboot) { - doprint "REBOOTING\n"; - reboot_to_good $sleep_time; - } + # no need to reboot for just building. + if (!do_not_reboot) { + doprint "REBOOTING\n"; + reboot_to_good $sleep_time; + } - my $name = ""; + my $name = ""; - if (defined($test_name)) { - $name = " ($test_name)"; - } + if (defined($test_name)) { + $name = " ($test_name)"; + } - print_times; + print_times; - doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; - doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; - doprint "KTEST RESULT: TEST $i$name Failed: ", @_, "\n"; - doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; - doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; + doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; + doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; + doprint "KTEST RESULT: TEST $i$name Failed: ", @_, "\n"; + doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; + doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"; - if (defined($store_failures)) { - save_logs "fail", $store_failures; - } + if (defined($store_failures)) { + save_logs "fail", $store_failures; + } - if (defined($post_test)) { - run_command $post_test; - } + if (defined($post_test)) { + run_command $post_test; + } - return 1; + return 1; } sub run_command { @@ -1915,8 +1966,8 @@ sub _get_grub_index { my ($command, $target, $skip) = @_; return if (defined($grub_number) && defined($last_grub_menu) && - $last_grub_menu eq $grub_menu && defined($last_machine) && - $last_machine eq $machine); + $last_grub_menu eq $grub_menu && defined($last_machine) && + $last_machine eq $machine); doprint "Find $reboot_type menu ... "; $grub_number = -1; @@ -1924,8 +1975,8 @@ sub _get_grub_index { my $ssh_grub = $ssh_exec; $ssh_grub =~ s,\$SSH_COMMAND,$command,g; - open(IN, "$ssh_grub |") - or dodie "unable to execute $command"; + open(IN, "$ssh_grub |") or + dodie "unable to execute $command"; my $found = 0; @@ -1969,9 +2020,9 @@ sub get_grub_index { $target = '^menuentry.*' . $grub_menu_qt; $skip = '^menuentry\s|^submenu\s'; } elsif ($reboot_type eq "grub2bls") { - $command = $grub_bls_get; - $target = '^title=.*' . $grub_menu_qt; - $skip = '^title='; + $command = $grub_bls_get; + $target = '^title=.*' . $grub_menu_qt; + $skip = '^title='; } else { return; } @@ -1979,8 +2030,7 @@ sub get_grub_index { _get_grub_index($command, $target, $skip); } -sub wait_for_input -{ +sub wait_for_input { my ($fp, $time) = @_; my $start_time; my $rin; @@ -2096,7 +2146,6 @@ sub monitor { my $version_found = 0; while (!$done) { - if ($bug && defined($stop_after_failure) && $stop_after_failure >= 0) { my $time = $stop_after_failure - (time - $failure_start); @@ -2349,9 +2398,6 @@ sub start_monitor_and_install { return monitor; } -my $check_build_re = ".*:.*(warning|error|Error):.*"; -my $utf8_quote = "\\x{e2}\\x{80}(\\x{98}|\\x{99})"; - sub process_warning_line { my ($line) = @_; @@ -2394,7 +2440,7 @@ sub check_buildlog { while (<IN>) { if (/$check_build_re/) { my $warning = process_warning_line $_; - + $warnings_list{$warning} = 1; } } @@ -2571,7 +2617,6 @@ sub build { run_command "mv $outputdir/config_temp $output_config" or dodie "moving config_temp"; } - } elsif (!$noclean) { unlink "$output_config"; run_command "$make mrproper" or @@ -2594,6 +2639,9 @@ sub build { # Run old config regardless, to enforce min configurations make_oldconfig; + if (not defined($build_options)){ + $build_options = ""; + } my $build_ret = run_command "$make $build_options", $buildlog; if (defined($post_build)) { @@ -2649,14 +2697,15 @@ sub success { print_times; - doprint "\n\n*******************************************\n"; - doprint "*******************************************\n"; - doprint "KTEST RESULT: TEST $i$name SUCCESS!!!! **\n"; - doprint "*******************************************\n"; - doprint "*******************************************\n"; + doprint "\n\n"; + doprint "*******************************************\n"; + doprint "*******************************************\n"; + doprint "KTEST RESULT: TEST $i$name SUCCESS!!!! **\n"; + doprint "*******************************************\n"; + doprint "*******************************************\n"; if (defined($store_successes)) { - save_logs "success", $store_successes; + save_logs "success", $store_successes; } if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) { @@ -2698,8 +2747,6 @@ sub child_run_test { exit $run_command_status; } -my $child_done; - sub child_finished { $child_done = 1; } @@ -3031,7 +3078,6 @@ sub bisect { } if ($do_check) { - # get current HEAD my $head = get_sha1("HEAD"); @@ -3071,13 +3117,11 @@ sub bisect { run_command "git bisect replay $replay" or dodie "failed to run replay"; } else { - run_command "git bisect good $good" or dodie "could not set bisect good to $good"; run_git_bisect "git bisect bad $bad" or dodie "could not set bisect bad to $bad"; - } if (defined($start)) { @@ -3103,35 +3147,13 @@ sub bisect { success $i; } -# config_ignore holds the configs that were set (or unset) for -# a good config and we will ignore these configs for the rest -# of a config bisect. These configs stay as they were. -my %config_ignore; - -# config_set holds what all configs were set as. -my %config_set; - -# config_off holds the set of configs that the bad config had disabled. -# We need to record them and set them in the .config when running -# olddefconfig, because olddefconfig keeps the defaults. -my %config_off; - -# config_off_tmp holds a set of configs to turn off for now -my @config_off_tmp; - -# config_list is the set of configs that are being tested -my %config_list; -my %null_config; - -my %dependency; - sub assign_configs { my ($hash, $config) = @_; doprint "Reading configs from $config\n"; - open (IN, $config) - or dodie "Failed to read $config"; + open (IN, $config) or + dodie "Failed to read $config"; while (<IN>) { chomp; @@ -3219,8 +3241,6 @@ sub config_bisect_end { doprint "***************************************\n\n"; } -my $pass = 1; - sub run_config_bisect { my ($good, $bad, $last_result) = @_; my $reset = ""; @@ -3243,13 +3263,13 @@ sub run_config_bisect { $ret = run_config_bisect_test $config_bisect_type; if ($ret) { - doprint "NEW GOOD CONFIG ($pass)\n"; + doprint "NEW GOOD CONFIG ($pass)\n"; system("cp $output_config $tmpdir/good_config.tmp.$pass"); $pass++; # Return 3 for good config return 3; } else { - doprint "NEW BAD CONFIG ($pass)\n"; + doprint "NEW BAD CONFIG ($pass)\n"; system("cp $output_config $tmpdir/bad_config.tmp.$pass"); $pass++; # Return 4 for bad config @@ -3284,10 +3304,11 @@ sub config_bisect { if (!defined($config_bisect_exec)) { # First check the location that ktest.pl ran - my @locations = ( "$pwd/config-bisect.pl", - "$dirname/config-bisect.pl", - "$builddir/tools/testing/ktest/config-bisect.pl", - undef ); + my @locations = ( + "$pwd/config-bisect.pl", + "$dirname/config-bisect.pl", + "$builddir/tools/testing/ktest/config-bisect.pl", + undef ); foreach my $loc (@locations) { doprint "loc = $loc\n"; $config_bisect_exec = $loc; @@ -3368,7 +3389,7 @@ sub config_bisect { } while ($ret == 3 || $ret == 4); if ($ret == 2) { - config_bisect_end "$good_config.tmp", "$bad_config.tmp"; + config_bisect_end "$good_config.tmp", "$bad_config.tmp"; } return $ret if ($ret < 0); @@ -3511,14 +3532,6 @@ sub patchcheck { return 1; } -my %depends; -my %depcount; -my $iflevel = 0; -my @ifdeps; - -# prevent recursion -my %read_kconfigs; - sub add_dep { # $config depends on $dep my ($config, $dep) = @_; @@ -3548,7 +3561,6 @@ sub read_kconfig { my $cont = 0; my $line; - if (! -f $kconfig) { doprint "file $kconfig does not exist, skipping\n"; return; @@ -3630,8 +3642,8 @@ sub read_kconfig { sub read_depends { # find out which arch this is by the kconfig file - open (IN, $output_config) - or dodie "Failed to read $output_config"; + open (IN, $output_config) or + dodie "Failed to read $output_config"; my $arch; while (<IN>) { if (m,Linux/(\S+)\s+\S+\s+Kernel Configuration,) { @@ -3657,7 +3669,7 @@ sub read_depends { if (! -f $kconfig && $arch =~ /\d$/) { my $orig = $arch; - # some subarchs have numbers, truncate them + # some subarchs have numbers, truncate them $arch =~ s/\d*$//; $kconfig = "$builddir/arch/$arch/Kconfig"; if (! -f $kconfig) { @@ -3706,7 +3718,6 @@ sub get_depends { my @configs; while ($dep =~ /[$valid]/) { - if ($dep =~ /^[^$valid]*([$valid]+)/) { my $conf = "CONFIG_" . $1; @@ -3721,12 +3732,6 @@ sub get_depends { return @configs; } -my %min_configs; -my %keep_configs; -my %save_configs; -my %processed_configs; -my %nochange_config; - sub test_this_config { my ($config) = @_; @@ -3852,7 +3857,7 @@ sub make_min_config { foreach my $config (@config_keys) { my $kconfig = chomp_config $config; if (!defined $depcount{$kconfig}) { - $depcount{$kconfig} = 0; + $depcount{$kconfig} = 0; } } @@ -3887,7 +3892,6 @@ sub make_min_config { my $take_two = 0; while (!$done) { - my $config; my $found; @@ -3898,7 +3902,7 @@ sub make_min_config { # Sort keys by who is most dependent on @test_configs = sort { $depcount{chomp_config($b)} <=> $depcount{chomp_config($a)} } - @test_configs ; + @test_configs ; # Put configs that did not modify the config at the end. my $reset = 1; @@ -3954,13 +3958,13 @@ sub make_min_config { my $failed = 0; build "oldconfig" or $failed = 1; if (!$failed) { - start_monitor_and_install or $failed = 1; + start_monitor_and_install or $failed = 1; - if ($type eq "test" && !$failed) { - do_run_test or $failed = 1; - } + if ($type eq "test" && !$failed) { + do_run_test or $failed = 1; + } - end_monitor; + end_monitor; } $in_bisect = 0; @@ -3974,8 +3978,8 @@ sub make_min_config { # update new ignore configs if (defined($ignore_config)) { - open (OUT, ">$temp_config") - or dodie "Can't write to $temp_config"; + open (OUT, ">$temp_config") or + dodie "Can't write to $temp_config"; foreach my $config (keys %save_configs) { print OUT "$save_configs{$config}\n"; } @@ -4002,8 +4006,8 @@ sub make_min_config { } # Save off all the current mandatory configs - open (OUT, ">$temp_config") - or dodie "Can't write to $temp_config"; + open (OUT, ">$temp_config") or + dodie "Can't write to $temp_config"; foreach my $config (keys %keep_configs) { print OUT "$keep_configs{$config}\n"; } @@ -4041,7 +4045,6 @@ sub make_warnings_file { open(IN, $buildlog) or dodie "Can't open $buildlog"; while (<IN>) { - # Some compilers use UTF-8 extended for quotes # for distcc heterogeneous systems, this causes issues s/$utf8_quote/'/g; @@ -4057,98 +4060,6 @@ sub make_warnings_file { success $i; } -$#ARGV < 1 or die "ktest.pl version: $VERSION\n usage: ktest.pl [config-file]\n"; - -if ($#ARGV == 0) { - $ktest_config = $ARGV[0]; - if (! -f $ktest_config) { - print "$ktest_config does not exist.\n"; - if (!read_yn "Create it?") { - exit 0; - } - } -} - -if (! -f $ktest_config) { - $newconfig = 1; - get_test_case; - open(OUT, ">$ktest_config") or die "Can not create $ktest_config"; - print OUT << "EOF" -# Generated by ktest.pl -# - -# PWD is a ktest.pl variable that will result in the process working -# directory that ktest.pl is executed in. - -# THIS_DIR is automatically assigned the PWD of the path that generated -# the config file. It is best to use this variable when assigning other -# directory paths within this directory. This allows you to easily -# move the test cases to other locations or to other machines. -# -THIS_DIR := $variable{"PWD"} - -# Define each test with TEST_START -# The config options below it will override the defaults -TEST_START -TEST_TYPE = $default{"TEST_TYPE"} - -DEFAULTS -EOF -; - close(OUT); -} -read_config $ktest_config; - -if (defined($opt{"LOG_FILE"})) { - $opt{"LOG_FILE"} = eval_option("LOG_FILE", $opt{"LOG_FILE"}, -1); -} - -# Append any configs entered in manually to the config file. -my @new_configs = keys %entered_configs; -if ($#new_configs >= 0) { - print "\nAppending entered in configs to $ktest_config\n"; - open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config"; - foreach my $config (@new_configs) { - print OUT "$config = $entered_configs{$config}\n"; - $opt{$config} = process_variables($entered_configs{$config}); - } -} - -if (defined($opt{"LOG_FILE"})) { - if ($opt{"CLEAR_LOG"}) { - unlink $opt{"LOG_FILE"}; - } - open(LOG, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}"; - LOG->autoflush(1); -} - -doprint "\n\nSTARTING AUTOMATED TESTS\n\n"; - -for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) { - - if (!$i) { - doprint "DEFAULT OPTIONS:\n"; - } else { - doprint "\nTEST $i OPTIONS"; - if (defined($repeat_tests{$i})) { - $repeat = $repeat_tests{$i}; - doprint " ITERATE $repeat"; - } - doprint "\n"; - } - - foreach my $option (sort keys %opt) { - - if ($option =~ /\[(\d+)\]$/) { - next if ($i != $1); - } else { - next if ($i); - } - - doprint "$option = $opt{$option}\n"; - } -} - sub option_defined { my ($option) = @_; @@ -4261,7 +4172,6 @@ sub do_send_mail { } sub send_email { - if (defined($mailto)) { if (!defined($mailer)) { doprint "No email sent: email or mailer not specified in config.\n"; @@ -4274,12 +4184,103 @@ sub send_email { sub cancel_test { if ($email_when_canceled) { my $name = get_test_name; - send_email("KTEST: Your [$name] test was cancelled", - "Your test started at $script_start_time was cancelled: sig int"); + send_email("KTEST: Your [$name] test was cancelled", + "Your test started at $script_start_time was cancelled: sig int"); } die "\nCaught Sig Int, test interrupted: $!\n" } +$#ARGV < 1 or die "ktest.pl version: $VERSION\n usage: ktest.pl [config-file]\n"; + +if ($#ARGV == 0) { + $ktest_config = $ARGV[0]; + if (! -f $ktest_config) { + print "$ktest_config does not exist.\n"; + if (!read_yn "Create it?") { + exit 0; + } + } +} + +if (! -f $ktest_config) { + $newconfig = 1; + get_test_case; + open(OUT, ">$ktest_config") or die "Can not create $ktest_config"; + print OUT << "EOF" +# Generated by ktest.pl +# + +# PWD is a ktest.pl variable that will result in the process working +# directory that ktest.pl is executed in. + +# THIS_DIR is automatically assigned the PWD of the path that generated +# the config file. It is best to use this variable when assigning other +# directory paths within this directory. This allows you to easily +# move the test cases to other locations or to other machines. +# +THIS_DIR := $variable{"PWD"} + +# Define each test with TEST_START +# The config options below it will override the defaults +TEST_START +TEST_TYPE = $default{"TEST_TYPE"} + +DEFAULTS +EOF +; + close(OUT); +} +read_config $ktest_config; + +if (defined($opt{"LOG_FILE"})) { + $opt{"LOG_FILE"} = eval_option("LOG_FILE", $opt{"LOG_FILE"}, -1); +} + +# Append any configs entered in manually to the config file. +my @new_configs = keys %entered_configs; +if ($#new_configs >= 0) { + print "\nAppending entered in configs to $ktest_config\n"; + open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config"; + foreach my $config (@new_configs) { + print OUT "$config = $entered_configs{$config}\n"; + $opt{$config} = process_variables($entered_configs{$config}); + } +} + +if (defined($opt{"LOG_FILE"})) { + if ($opt{"CLEAR_LOG"}) { + unlink $opt{"LOG_FILE"}; + } + open(LOG, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}"; + LOG->autoflush(1); +} + +doprint "\n\nSTARTING AUTOMATED TESTS\n\n"; + +for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) { + + if (!$i) { + doprint "DEFAULT OPTIONS:\n"; + } else { + doprint "\nTEST $i OPTIONS"; + if (defined($repeat_tests{$i})) { + $repeat = $repeat_tests{$i}; + doprint " ITERATE $repeat"; + } + doprint "\n"; + } + + foreach my $option (sort keys %opt) { + if ($option =~ /\[(\d+)\]$/) { + next if ($i != $1); + } else { + next if ($i); + } + + doprint "$option = $opt{$option}\n"; + } +} + $SIG{INT} = qw(cancel_test); # First we need to do is the builds @@ -4323,15 +4324,15 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { # The first test may override the PRE_KTEST option if ($i == 1) { - if (defined($pre_ktest)) { - doprint "\n"; - run_command $pre_ktest; - } - if ($email_when_started) { + if (defined($pre_ktest)) { + doprint "\n"; + run_command $pre_ktest; + } + if ($email_when_started) { my $name = get_test_name; - send_email("KTEST: Your [$name] test was started", - "Your test was started on $script_start_time"); - } + send_email("KTEST: Your [$name] test was started", + "Your test was started on $script_start_time"); + } } # Any test can override the POST_KTEST option @@ -4409,7 +4410,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { my $ret = run_command $pre_test; if (!$ret && defined($pre_test_die) && $pre_test_die) { - dodie "failed to pre_test\n"; + dodie "failed to pre_test\n"; } } @@ -4503,12 +4504,11 @@ if ($opt{"POWEROFF_ON_SUCCESS"}) { run_command $switch_to_good; } - doprint "\n $successes of $opt{NUM_TESTS} tests were successful\n\n"; if ($email_when_finished) { send_email("KTEST: Your test has finished!", - "$successes of $opt{NUM_TESTS} tests started at $script_start_time were successful!"); + "$successes of $opt{NUM_TESTS} tests started at $script_start_time were successful!"); } if (defined($opt{"LOG_FILE"})) { @@ -4517,3 +4517,12 @@ if (defined($opt{"LOG_FILE"})) { } exit 0; + +## +# The following are here to standardize tabs/spaces/etc across the most likely editors +### + +# Local Variables: +# mode: perl +# End: +# vim: softtabstop=4 diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c index c62d372d426f..ed563bdd88f3 100644 --- a/tools/testing/nvdimm/test/iomap.c +++ b/tools/testing/nvdimm/test/iomap.c @@ -62,7 +62,7 @@ struct nfit_test_resource *get_nfit_res(resource_size_t resource) } EXPORT_SYMBOL(get_nfit_res); -void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size, +static void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size, void __iomem *(*fallback_fn)(resource_size_t, unsigned long)) { struct nfit_test_resource *nfit_res = get_nfit_res(offset); diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index 9b185bf82da8..54f367cbadae 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -1871,9 +1871,16 @@ static void smart_init(struct nfit_test *t) } } +static size_t sizeof_spa(struct acpi_nfit_system_address *spa) +{ + /* until spa location cookie support is added... */ + return sizeof(*spa) - 8; +} + static int nfit_test0_alloc(struct nfit_test *t) { - size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA + struct acpi_nfit_system_address *spa = NULL; + size_t nfit_size = sizeof_spa(spa) * NUM_SPA + sizeof(struct acpi_nfit_memory_map) * NUM_MEM + sizeof(struct acpi_nfit_control_region) * NUM_DCR + offsetof(struct acpi_nfit_control_region, @@ -1937,7 +1944,8 @@ static int nfit_test0_alloc(struct nfit_test *t) static int nfit_test1_alloc(struct nfit_test *t) { - size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2 + struct acpi_nfit_system_address *spa = NULL; + size_t nfit_size = sizeof_spa(spa) * 2 + sizeof(struct acpi_nfit_memory_map) * 2 + offsetof(struct acpi_nfit_control_region, window_size) * 2; int i; @@ -2000,7 +2008,7 @@ static void nfit_test0_setup(struct nfit_test *t) */ spa = nfit_buf; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); spa->range_index = 0+1; spa->address = t->spa_set_dma[0]; @@ -2014,7 +2022,7 @@ static void nfit_test0_setup(struct nfit_test *t) */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); spa->range_index = 1+1; spa->address = t->spa_set_dma[1]; @@ -2024,7 +2032,7 @@ static void nfit_test0_setup(struct nfit_test *t) /* spa2 (dcr0) dimm0 */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); spa->range_index = 2+1; spa->address = t->dcr_dma[0]; @@ -2034,7 +2042,7 @@ static void nfit_test0_setup(struct nfit_test *t) /* spa3 (dcr1) dimm1 */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); spa->range_index = 3+1; spa->address = t->dcr_dma[1]; @@ -2044,7 +2052,7 @@ static void nfit_test0_setup(struct nfit_test *t) /* spa4 (dcr2) dimm2 */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); spa->range_index = 4+1; spa->address = t->dcr_dma[2]; @@ -2054,7 +2062,7 @@ static void nfit_test0_setup(struct nfit_test *t) /* spa5 (dcr3) dimm3 */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); spa->range_index = 5+1; spa->address = t->dcr_dma[3]; @@ -2064,7 +2072,7 @@ static void nfit_test0_setup(struct nfit_test *t) /* spa6 (bdw for dcr0) dimm0 */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); spa->range_index = 6+1; spa->address = t->dimm_dma[0]; @@ -2074,7 +2082,7 @@ static void nfit_test0_setup(struct nfit_test *t) /* spa7 (bdw for dcr1) dimm1 */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); spa->range_index = 7+1; spa->address = t->dimm_dma[1]; @@ -2084,7 +2092,7 @@ static void nfit_test0_setup(struct nfit_test *t) /* spa8 (bdw for dcr2) dimm2 */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); spa->range_index = 8+1; spa->address = t->dimm_dma[2]; @@ -2094,7 +2102,7 @@ static void nfit_test0_setup(struct nfit_test *t) /* spa9 (bdw for dcr3) dimm3 */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); spa->range_index = 9+1; spa->address = t->dimm_dma[3]; @@ -2581,7 +2589,7 @@ static void nfit_test0_setup(struct nfit_test *t) /* spa10 (dcr4) dimm4 */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); spa->range_index = 10+1; spa->address = t->dcr_dma[4]; @@ -2595,7 +2603,7 @@ static void nfit_test0_setup(struct nfit_test *t) */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); spa->range_index = 11+1; spa->address = t->spa_set_dma[2]; @@ -2605,7 +2613,7 @@ static void nfit_test0_setup(struct nfit_test *t) /* spa12 (bdw for dcr4) dimm4 */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); spa->range_index = 12+1; spa->address = t->dimm_dma[4]; @@ -2739,7 +2747,7 @@ static void nfit_test1_setup(struct nfit_test *t) /* spa0 (flat range with no bdw aliasing) */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); spa->range_index = 0+1; spa->address = t->spa_set_dma[0]; @@ -2749,7 +2757,7 @@ static void nfit_test1_setup(struct nfit_test *t) /* virtual cd region */ spa = nfit_buf + offset; spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; - spa->header.length = sizeof(*spa); + spa->header.length = sizeof_spa(spa); memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16); spa->range_index = 0; spa->address = t->spa_set_dma[1]; diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 6c575cf34a71..bc3299a20338 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile @@ -25,6 +25,7 @@ TARGETS += ir TARGETS += kcmp TARGETS += kexec TARGETS += kvm +TARGETS += landlock TARGETS += lib TARGETS += livepatch TARGETS += lkdtm diff --git a/tools/testing/selftests/arm64/bti/test.c b/tools/testing/selftests/arm64/bti/test.c index 656b04976ccc..67b77ab83c20 100644 --- a/tools/testing/selftests/arm64/bti/test.c +++ b/tools/testing/selftests/arm64/bti/test.c @@ -6,6 +6,7 @@ #include "system.h" +#include <stddef.h> #include <linux/errno.h> #include <linux/auxvec.h> #include <linux/signal.h> diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index 12ee40284da0..2060bc122c53 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -40,7 +40,7 @@ struct ipv6_packet pkt_v6 = { .tcp.doff = 5, }; -static int settimeo(int fd, int timeout_ms) +int settimeo(int fd, int timeout_ms) { struct timeval timeout = { .tv_sec = 3 }; diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index 7205f8afdba1..5e0d51c07b63 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -33,6 +33,7 @@ struct ipv6_packet { } __packed; extern struct ipv6_packet pkt_v6; +int settimeo(int fd, int timeout_ms); int start_server(int family, int type, const char *addr, __u16 port, int timeout_ms); int connect_to_fd(int server_fd, int timeout_ms); diff --git a/tools/testing/selftests/bpf/prog_tests/ringbuf.c b/tools/testing/selftests/bpf/prog_tests/ringbuf.c index de78617f6550..f9a8ae331963 100644 --- a/tools/testing/selftests/bpf/prog_tests/ringbuf.c +++ b/tools/testing/selftests/bpf/prog_tests/ringbuf.c @@ -86,8 +86,9 @@ void test_ringbuf(void) const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample); pthread_t thread; long bg_ret = -1; - int err, cnt; + int err, cnt, rb_fd; int page_size = getpagesize(); + void *mmap_ptr, *tmp_ptr; skel = test_ringbuf__open(); if (CHECK(!skel, "skel_open", "skeleton open failed\n")) @@ -101,6 +102,52 @@ void test_ringbuf(void) if (CHECK(err != 0, "skel_load", "skeleton load failed\n")) goto cleanup; + rb_fd = bpf_map__fd(skel->maps.ringbuf); + /* good read/write cons_pos */ + mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0); + ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos"); + tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE); + if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend")) + goto cleanup; + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect"); + ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw"); + + /* bad writeable prod_pos */ + mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size); + err = -errno; + ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos"); + ASSERT_EQ(err, -EPERM, "wr_prod_pos_err"); + + /* bad writeable data pages */ + mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size); + err = -errno; + ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one"); + ASSERT_EQ(err, -EPERM, "wr_data_page_one_err"); + mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size); + ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two"); + mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size); + ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all"); + + /* good read-only pages */ + mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0); + if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos")) + goto cleanup; + + ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect"); + ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect"); + ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap"); + ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro"); + + /* good read-only pages with initial offset */ + mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size); + if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos")) + goto cleanup; + + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect"); + ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect"); + ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap"); + ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro"); + /* only trigger BPF program for current process */ skel->bss->pid = getpid(); diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c index a958c22aec75..dffbcaa1ec98 100644 --- a/tools/testing/selftests/bpf/prog_tests/snprintf.c +++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c @@ -43,6 +43,8 @@ void test_snprintf_positive(void) if (!ASSERT_OK_PTR(skel, "skel_open")) return; + skel->bss->pid = getpid(); + if (!ASSERT_OK(test_snprintf__attach(skel), "skel_attach")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c new file mode 100644 index 000000000000..5703c918812b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -0,0 +1,785 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * This test sets up 3 netns (src <-> fwd <-> dst). There is no direct veth link + * between src and dst. The netns fwd has veth links to each src and dst. The + * client is in src and server in dst. The test installs a TC BPF program to each + * host facing veth in fwd which calls into i) bpf_redirect_neigh() to perform the + * neigh addr population and redirect or ii) bpf_redirect_peer() for namespace + * switch from ingress side; it also installs a checker prog on the egress side + * to drop unexpected traffic. + */ + +#define _GNU_SOURCE + +#include <arpa/inet.h> +#include <linux/limits.h> +#include <linux/sysctl.h> +#include <linux/if_tun.h> +#include <linux/if.h> +#include <sched.h> +#include <stdbool.h> +#include <stdio.h> +#include <sys/stat.h> +#include <sys/mount.h> + +#include "test_progs.h" +#include "network_helpers.h" +#include "test_tc_neigh_fib.skel.h" +#include "test_tc_neigh.skel.h" +#include "test_tc_peer.skel.h" + +#define NS_SRC "ns_src" +#define NS_FWD "ns_fwd" +#define NS_DST "ns_dst" + +#define IP4_SRC "172.16.1.100" +#define IP4_DST "172.16.2.100" +#define IP4_TUN_SRC "172.17.1.100" +#define IP4_TUN_FWD "172.17.1.200" +#define IP4_PORT 9004 + +#define IP6_SRC "0::1:dead:beef:cafe" +#define IP6_DST "0::2:dead:beef:cafe" +#define IP6_TUN_SRC "1::1:dead:beef:cafe" +#define IP6_TUN_FWD "1::2:dead:beef:cafe" +#define IP6_PORT 9006 + +#define IP4_SLL "169.254.0.1" +#define IP4_DLL "169.254.0.2" +#define IP4_NET "169.254.0.0" + +#define MAC_DST_FWD "00:11:22:33:44:55" +#define MAC_DST "00:22:33:44:55:66" + +#define IFADDR_STR_LEN 18 +#define PING_ARGS "-i 0.2 -c 3 -w 10 -q" + +#define SRC_PROG_PIN_FILE "/sys/fs/bpf/test_tc_src" +#define DST_PROG_PIN_FILE "/sys/fs/bpf/test_tc_dst" +#define CHK_PROG_PIN_FILE "/sys/fs/bpf/test_tc_chk" + +#define TIMEOUT_MILLIS 10000 + +#define log_err(MSG, ...) \ + fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \ + __FILE__, __LINE__, strerror(errno), ##__VA_ARGS__) + +static const char * const namespaces[] = {NS_SRC, NS_FWD, NS_DST, NULL}; + +static int write_file(const char *path, const char *newval) +{ + FILE *f; + + f = fopen(path, "r+"); + if (!f) + return -1; + if (fwrite(newval, strlen(newval), 1, f) != 1) { + log_err("writing to %s failed", path); + fclose(f); + return -1; + } + fclose(f); + return 0; +} + +struct nstoken { + int orig_netns_fd; +}; + +static int setns_by_fd(int nsfd) +{ + int err; + + err = setns(nsfd, CLONE_NEWNET); + close(nsfd); + + if (!ASSERT_OK(err, "setns")) + return err; + + /* Switch /sys to the new namespace so that e.g. /sys/class/net + * reflects the devices in the new namespace. + */ + err = unshare(CLONE_NEWNS); + if (!ASSERT_OK(err, "unshare")) + return err; + + err = umount2("/sys", MNT_DETACH); + if (!ASSERT_OK(err, "umount2 /sys")) + return err; + + err = mount("sysfs", "/sys", "sysfs", 0, NULL); + if (!ASSERT_OK(err, "mount /sys")) + return err; + + err = mount("bpffs", "/sys/fs/bpf", "bpf", 0, NULL); + if (!ASSERT_OK(err, "mount /sys/fs/bpf")) + return err; + + return 0; +} + +/** + * open_netns() - Switch to specified network namespace by name. + * + * Returns token with which to restore the original namespace + * using close_netns(). + */ +static struct nstoken *open_netns(const char *name) +{ + int nsfd; + char nspath[PATH_MAX]; + int err; + struct nstoken *token; + + token = malloc(sizeof(struct nstoken)); + if (!ASSERT_OK_PTR(token, "malloc token")) + return NULL; + + token->orig_netns_fd = open("/proc/self/ns/net", O_RDONLY); + if (!ASSERT_GE(token->orig_netns_fd, 0, "open /proc/self/ns/net")) + goto fail; + + snprintf(nspath, sizeof(nspath), "%s/%s", "/var/run/netns", name); + nsfd = open(nspath, O_RDONLY | O_CLOEXEC); + if (!ASSERT_GE(nsfd, 0, "open netns fd")) + goto fail; + + err = setns_by_fd(nsfd); + if (!ASSERT_OK(err, "setns_by_fd")) + goto fail; + + return token; +fail: + free(token); + return NULL; +} + +static void close_netns(struct nstoken *token) +{ + ASSERT_OK(setns_by_fd(token->orig_netns_fd), "setns_by_fd"); + free(token); +} + +static int netns_setup_namespaces(const char *verb) +{ + const char * const *ns = namespaces; + char cmd[128]; + + while (*ns) { + snprintf(cmd, sizeof(cmd), "ip netns %s %s", verb, *ns); + if (!ASSERT_OK(system(cmd), cmd)) + return -1; + ns++; + } + return 0; +} + +struct netns_setup_result { + int ifindex_veth_src_fwd; + int ifindex_veth_dst_fwd; +}; + +static int get_ifaddr(const char *name, char *ifaddr) +{ + char path[PATH_MAX]; + FILE *f; + int ret; + + snprintf(path, PATH_MAX, "/sys/class/net/%s/address", name); + f = fopen(path, "r"); + if (!ASSERT_OK_PTR(f, path)) + return -1; + + ret = fread(ifaddr, 1, IFADDR_STR_LEN, f); + if (!ASSERT_EQ(ret, IFADDR_STR_LEN, "fread ifaddr")) { + fclose(f); + return -1; + } + fclose(f); + return 0; +} + +static int get_ifindex(const char *name) +{ + char path[PATH_MAX]; + char buf[32]; + FILE *f; + int ret; + + snprintf(path, PATH_MAX, "/sys/class/net/%s/ifindex", name); + f = fopen(path, "r"); + if (!ASSERT_OK_PTR(f, path)) + return -1; + + ret = fread(buf, 1, sizeof(buf), f); + if (!ASSERT_GT(ret, 0, "fread ifindex")) { + fclose(f); + return -1; + } + fclose(f); + return atoi(buf); +} + +#define SYS(fmt, ...) \ + ({ \ + char cmd[1024]; \ + snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + if (!ASSERT_OK(system(cmd), cmd)) \ + goto fail; \ + }) + +static int netns_setup_links_and_routes(struct netns_setup_result *result) +{ + struct nstoken *nstoken = NULL; + char veth_src_fwd_addr[IFADDR_STR_LEN+1] = {}; + + SYS("ip link add veth_src type veth peer name veth_src_fwd"); + SYS("ip link add veth_dst type veth peer name veth_dst_fwd"); + + SYS("ip link set veth_dst_fwd address " MAC_DST_FWD); + SYS("ip link set veth_dst address " MAC_DST); + + if (get_ifaddr("veth_src_fwd", veth_src_fwd_addr)) + goto fail; + + result->ifindex_veth_src_fwd = get_ifindex("veth_src_fwd"); + if (result->ifindex_veth_src_fwd < 0) + goto fail; + result->ifindex_veth_dst_fwd = get_ifindex("veth_dst_fwd"); + if (result->ifindex_veth_dst_fwd < 0) + goto fail; + + SYS("ip link set veth_src netns " NS_SRC); + SYS("ip link set veth_src_fwd netns " NS_FWD); + SYS("ip link set veth_dst_fwd netns " NS_FWD); + SYS("ip link set veth_dst netns " NS_DST); + + /** setup in 'src' namespace */ + nstoken = open_netns(NS_SRC); + if (!ASSERT_OK_PTR(nstoken, "setns src")) + goto fail; + + SYS("ip addr add " IP4_SRC "/32 dev veth_src"); + SYS("ip addr add " IP6_SRC "/128 dev veth_src nodad"); + SYS("ip link set dev veth_src up"); + + SYS("ip route add " IP4_DST "/32 dev veth_src scope global"); + SYS("ip route add " IP4_NET "/16 dev veth_src scope global"); + SYS("ip route add " IP6_DST "/128 dev veth_src scope global"); + + SYS("ip neigh add " IP4_DST " dev veth_src lladdr %s", + veth_src_fwd_addr); + SYS("ip neigh add " IP6_DST " dev veth_src lladdr %s", + veth_src_fwd_addr); + + close_netns(nstoken); + + /** setup in 'fwd' namespace */ + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns fwd")) + goto fail; + + /* The fwd netns automatically gets a v6 LL address / routes, but also + * needs v4 one in order to start ARP probing. IP4_NET route is added + * to the endpoints so that the ARP processing will reply. + */ + SYS("ip addr add " IP4_SLL "/32 dev veth_src_fwd"); + SYS("ip addr add " IP4_DLL "/32 dev veth_dst_fwd"); + SYS("ip link set dev veth_src_fwd up"); + SYS("ip link set dev veth_dst_fwd up"); + + SYS("ip route add " IP4_SRC "/32 dev veth_src_fwd scope global"); + SYS("ip route add " IP6_SRC "/128 dev veth_src_fwd scope global"); + SYS("ip route add " IP4_DST "/32 dev veth_dst_fwd scope global"); + SYS("ip route add " IP6_DST "/128 dev veth_dst_fwd scope global"); + + close_netns(nstoken); + + /** setup in 'dst' namespace */ + nstoken = open_netns(NS_DST); + if (!ASSERT_OK_PTR(nstoken, "setns dst")) + goto fail; + + SYS("ip addr add " IP4_DST "/32 dev veth_dst"); + SYS("ip addr add " IP6_DST "/128 dev veth_dst nodad"); + SYS("ip link set dev veth_dst up"); + + SYS("ip route add " IP4_SRC "/32 dev veth_dst scope global"); + SYS("ip route add " IP4_NET "/16 dev veth_dst scope global"); + SYS("ip route add " IP6_SRC "/128 dev veth_dst scope global"); + + SYS("ip neigh add " IP4_SRC " dev veth_dst lladdr " MAC_DST_FWD); + SYS("ip neigh add " IP6_SRC " dev veth_dst lladdr " MAC_DST_FWD); + + close_netns(nstoken); + + return 0; +fail: + if (nstoken) + close_netns(nstoken); + return -1; +} + +static int netns_load_bpf(void) +{ + SYS("tc qdisc add dev veth_src_fwd clsact"); + SYS("tc filter add dev veth_src_fwd ingress bpf da object-pinned " + SRC_PROG_PIN_FILE); + SYS("tc filter add dev veth_src_fwd egress bpf da object-pinned " + CHK_PROG_PIN_FILE); + + SYS("tc qdisc add dev veth_dst_fwd clsact"); + SYS("tc filter add dev veth_dst_fwd ingress bpf da object-pinned " + DST_PROG_PIN_FILE); + SYS("tc filter add dev veth_dst_fwd egress bpf da object-pinned " + CHK_PROG_PIN_FILE); + + return 0; +fail: + return -1; +} + +static void test_tcp(int family, const char *addr, __u16 port) +{ + int listen_fd = -1, accept_fd = -1, client_fd = -1; + char buf[] = "testing testing"; + int n; + struct nstoken *nstoken; + + nstoken = open_netns(NS_DST); + if (!ASSERT_OK_PTR(nstoken, "setns dst")) + return; + + listen_fd = start_server(family, SOCK_STREAM, addr, port, 0); + if (!ASSERT_GE(listen_fd, 0, "listen")) + goto done; + + close_netns(nstoken); + nstoken = open_netns(NS_SRC); + if (!ASSERT_OK_PTR(nstoken, "setns src")) + goto done; + + client_fd = connect_to_fd(listen_fd, TIMEOUT_MILLIS); + if (!ASSERT_GE(client_fd, 0, "connect_to_fd")) + goto done; + + accept_fd = accept(listen_fd, NULL, NULL); + if (!ASSERT_GE(accept_fd, 0, "accept")) + goto done; + + if (!ASSERT_OK(settimeo(accept_fd, TIMEOUT_MILLIS), "settimeo")) + goto done; + + n = write(client_fd, buf, sizeof(buf)); + if (!ASSERT_EQ(n, sizeof(buf), "send to server")) + goto done; + + n = read(accept_fd, buf, sizeof(buf)); + ASSERT_EQ(n, sizeof(buf), "recv from server"); + +done: + if (nstoken) + close_netns(nstoken); + if (listen_fd >= 0) + close(listen_fd); + if (accept_fd >= 0) + close(accept_fd); + if (client_fd >= 0) + close(client_fd); +} + +static int test_ping(int family, const char *addr) +{ + const char *ping = family == AF_INET6 ? "ping6" : "ping"; + + SYS("ip netns exec " NS_SRC " %s " PING_ARGS " %s > /dev/null", ping, addr); + return 0; +fail: + return -1; +} + +static void test_connectivity(void) +{ + test_tcp(AF_INET, IP4_DST, IP4_PORT); + test_ping(AF_INET, IP4_DST); + test_tcp(AF_INET6, IP6_DST, IP6_PORT); + test_ping(AF_INET6, IP6_DST); +} + +static int set_forwarding(bool enable) +{ + int err; + + err = write_file("/proc/sys/net/ipv4/ip_forward", enable ? "1" : "0"); + if (!ASSERT_OK(err, "set ipv4.ip_forward=0")) + return err; + + err = write_file("/proc/sys/net/ipv6/conf/all/forwarding", enable ? "1" : "0"); + if (!ASSERT_OK(err, "set ipv6.forwarding=0")) + return err; + + return 0; +} + +static void test_tc_redirect_neigh_fib(struct netns_setup_result *setup_result) +{ + struct nstoken *nstoken = NULL; + struct test_tc_neigh_fib *skel = NULL; + int err; + + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns fwd")) + return; + + skel = test_tc_neigh_fib__open(); + if (!ASSERT_OK_PTR(skel, "test_tc_neigh_fib__open")) + goto done; + + if (!ASSERT_OK(test_tc_neigh_fib__load(skel), "test_tc_neigh_fib__load")) + goto done; + + err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE)) + goto done; + + err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE)) + goto done; + + err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE)) + goto done; + + if (netns_load_bpf()) + goto done; + + /* bpf_fib_lookup() checks if forwarding is enabled */ + if (!ASSERT_OK(set_forwarding(true), "enable forwarding")) + goto done; + + test_connectivity(); + +done: + if (skel) + test_tc_neigh_fib__destroy(skel); + close_netns(nstoken); +} + +static void test_tc_redirect_neigh(struct netns_setup_result *setup_result) +{ + struct nstoken *nstoken = NULL; + struct test_tc_neigh *skel = NULL; + int err; + + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns fwd")) + return; + + skel = test_tc_neigh__open(); + if (!ASSERT_OK_PTR(skel, "test_tc_neigh__open")) + goto done; + + skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd; + skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd; + + err = test_tc_neigh__load(skel); + if (!ASSERT_OK(err, "test_tc_neigh__load")) + goto done; + + err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE)) + goto done; + + err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE)) + goto done; + + err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE)) + goto done; + + if (netns_load_bpf()) + goto done; + + if (!ASSERT_OK(set_forwarding(false), "disable forwarding")) + goto done; + + test_connectivity(); + +done: + if (skel) + test_tc_neigh__destroy(skel); + close_netns(nstoken); +} + +static void test_tc_redirect_peer(struct netns_setup_result *setup_result) +{ + struct nstoken *nstoken; + struct test_tc_peer *skel; + int err; + + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns fwd")) + return; + + skel = test_tc_peer__open(); + if (!ASSERT_OK_PTR(skel, "test_tc_peer__open")) + goto done; + + skel->rodata->IFINDEX_SRC = setup_result->ifindex_veth_src_fwd; + skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd; + + err = test_tc_peer__load(skel); + if (!ASSERT_OK(err, "test_tc_peer__load")) + goto done; + + err = bpf_program__pin(skel->progs.tc_src, SRC_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE)) + goto done; + + err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE)) + goto done; + + err = bpf_program__pin(skel->progs.tc_dst, DST_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE)) + goto done; + + if (netns_load_bpf()) + goto done; + + if (!ASSERT_OK(set_forwarding(false), "disable forwarding")) + goto done; + + test_connectivity(); + +done: + if (skel) + test_tc_peer__destroy(skel); + close_netns(nstoken); +} + +static int tun_open(char *name) +{ + struct ifreq ifr; + int fd, err; + + fd = open("/dev/net/tun", O_RDWR); + if (!ASSERT_GE(fd, 0, "open /dev/net/tun")) + return -1; + + memset(&ifr, 0, sizeof(ifr)); + + ifr.ifr_flags = IFF_TUN | IFF_NO_PI; + if (*name) + strncpy(ifr.ifr_name, name, IFNAMSIZ); + + err = ioctl(fd, TUNSETIFF, &ifr); + if (!ASSERT_OK(err, "ioctl TUNSETIFF")) + goto fail; + + SYS("ip link set dev %s up", name); + + return fd; +fail: + close(fd); + return -1; +} + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +enum { + SRC_TO_TARGET = 0, + TARGET_TO_SRC = 1, +}; + +static int tun_relay_loop(int src_fd, int target_fd) +{ + fd_set rfds, wfds; + + FD_ZERO(&rfds); + FD_ZERO(&wfds); + + for (;;) { + char buf[1500]; + int direction, nread, nwrite; + + FD_SET(src_fd, &rfds); + FD_SET(target_fd, &rfds); + + if (select(1 + MAX(src_fd, target_fd), &rfds, NULL, NULL, NULL) < 0) { + log_err("select failed"); + return 1; + } + + direction = FD_ISSET(src_fd, &rfds) ? SRC_TO_TARGET : TARGET_TO_SRC; + + nread = read(direction == SRC_TO_TARGET ? src_fd : target_fd, buf, sizeof(buf)); + if (nread < 0) { + log_err("read failed"); + return 1; + } + + nwrite = write(direction == SRC_TO_TARGET ? target_fd : src_fd, buf, nread); + if (nwrite != nread) { + log_err("write failed"); + return 1; + } + } +} + +static void test_tc_redirect_peer_l3(struct netns_setup_result *setup_result) +{ + struct test_tc_peer *skel = NULL; + struct nstoken *nstoken = NULL; + int err; + int tunnel_pid = -1; + int src_fd, target_fd; + int ifindex; + + /* Start a L3 TUN/TAP tunnel between the src and dst namespaces. + * This test is using TUN/TAP instead of e.g. IPIP or GRE tunnel as those + * expose the L2 headers encapsulating the IP packet to BPF and hence + * don't have skb in suitable state for this test. Alternative to TUN/TAP + * would be e.g. Wireguard which would appear as a pure L3 device to BPF, + * but that requires much more complicated setup. + */ + nstoken = open_netns(NS_SRC); + if (!ASSERT_OK_PTR(nstoken, "setns " NS_SRC)) + return; + + src_fd = tun_open("tun_src"); + if (!ASSERT_GE(src_fd, 0, "tun_open tun_src")) + goto fail; + + close_netns(nstoken); + + nstoken = open_netns(NS_FWD); + if (!ASSERT_OK_PTR(nstoken, "setns " NS_FWD)) + goto fail; + + target_fd = tun_open("tun_fwd"); + if (!ASSERT_GE(target_fd, 0, "tun_open tun_fwd")) + goto fail; + + tunnel_pid = fork(); + if (!ASSERT_GE(tunnel_pid, 0, "fork tun_relay_loop")) + goto fail; + + if (tunnel_pid == 0) + exit(tun_relay_loop(src_fd, target_fd)); + + skel = test_tc_peer__open(); + if (!ASSERT_OK_PTR(skel, "test_tc_peer__open")) + goto fail; + + ifindex = get_ifindex("tun_fwd"); + if (!ASSERT_GE(ifindex, 0, "get_ifindex tun_fwd")) + goto fail; + + skel->rodata->IFINDEX_SRC = ifindex; + skel->rodata->IFINDEX_DST = setup_result->ifindex_veth_dst_fwd; + + err = test_tc_peer__load(skel); + if (!ASSERT_OK(err, "test_tc_peer__load")) + goto fail; + + err = bpf_program__pin(skel->progs.tc_src_l3, SRC_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " SRC_PROG_PIN_FILE)) + goto fail; + + err = bpf_program__pin(skel->progs.tc_dst_l3, DST_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " DST_PROG_PIN_FILE)) + goto fail; + + err = bpf_program__pin(skel->progs.tc_chk, CHK_PROG_PIN_FILE); + if (!ASSERT_OK(err, "pin " CHK_PROG_PIN_FILE)) + goto fail; + + /* Load "tc_src_l3" to the tun_fwd interface to redirect packets + * towards dst, and "tc_dst" to redirect packets + * and "tc_chk" on veth_dst_fwd to drop non-redirected packets. + */ + SYS("tc qdisc add dev tun_fwd clsact"); + SYS("tc filter add dev tun_fwd ingress bpf da object-pinned " + SRC_PROG_PIN_FILE); + + SYS("tc qdisc add dev veth_dst_fwd clsact"); + SYS("tc filter add dev veth_dst_fwd ingress bpf da object-pinned " + DST_PROG_PIN_FILE); + SYS("tc filter add dev veth_dst_fwd egress bpf da object-pinned " + CHK_PROG_PIN_FILE); + + /* Setup route and neigh tables */ + SYS("ip -netns " NS_SRC " addr add dev tun_src " IP4_TUN_SRC "/24"); + SYS("ip -netns " NS_FWD " addr add dev tun_fwd " IP4_TUN_FWD "/24"); + + SYS("ip -netns " NS_SRC " addr add dev tun_src " IP6_TUN_SRC "/64 nodad"); + SYS("ip -netns " NS_FWD " addr add dev tun_fwd " IP6_TUN_FWD "/64 nodad"); + + SYS("ip -netns " NS_SRC " route del " IP4_DST "/32 dev veth_src scope global"); + SYS("ip -netns " NS_SRC " route add " IP4_DST "/32 via " IP4_TUN_FWD + " dev tun_src scope global"); + SYS("ip -netns " NS_DST " route add " IP4_TUN_SRC "/32 dev veth_dst scope global"); + SYS("ip -netns " NS_SRC " route del " IP6_DST "/128 dev veth_src scope global"); + SYS("ip -netns " NS_SRC " route add " IP6_DST "/128 via " IP6_TUN_FWD + " dev tun_src scope global"); + SYS("ip -netns " NS_DST " route add " IP6_TUN_SRC "/128 dev veth_dst scope global"); + + SYS("ip -netns " NS_DST " neigh add " IP4_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD); + SYS("ip -netns " NS_DST " neigh add " IP6_TUN_SRC " dev veth_dst lladdr " MAC_DST_FWD); + + if (!ASSERT_OK(set_forwarding(false), "disable forwarding")) + goto fail; + + test_connectivity(); + +fail: + if (tunnel_pid > 0) { + kill(tunnel_pid, SIGTERM); + waitpid(tunnel_pid, NULL, 0); + } + if (src_fd >= 0) + close(src_fd); + if (target_fd >= 0) + close(target_fd); + if (skel) + test_tc_peer__destroy(skel); + if (nstoken) + close_netns(nstoken); +} + +#define RUN_TEST(name) \ + ({ \ + struct netns_setup_result setup_result; \ + if (test__start_subtest(#name)) \ + if (ASSERT_OK(netns_setup_namespaces("add"), "setup namespaces")) { \ + if (ASSERT_OK(netns_setup_links_and_routes(&setup_result), \ + "setup links and routes")) \ + test_ ## name(&setup_result); \ + netns_setup_namespaces("delete"); \ + } \ + }) + +static void *test_tc_redirect_run_tests(void *arg) +{ + RUN_TEST(tc_redirect_peer); + RUN_TEST(tc_redirect_peer_l3); + RUN_TEST(tc_redirect_neigh); + RUN_TEST(tc_redirect_neigh_fib); + return NULL; +} + +void test_tc_redirect(void) +{ + pthread_t test_thread; + int err; + + /* Run the tests in their own thread to isolate the namespace changes + * so they do not affect the environment of other tests. + * (specifically needed because of unshare(CLONE_NEWNS) in open_netns()) + */ + err = pthread_create(&test_thread, NULL, &test_tc_redirect_run_tests, NULL); + if (ASSERT_OK(err, "pthread_create")) + ASSERT_OK(pthread_join(test_thread, NULL), "pthread_join"); +} diff --git a/tools/testing/selftests/bpf/progs/test_snprintf.c b/tools/testing/selftests/bpf/progs/test_snprintf.c index 951a0301c553..e35129bea0a0 100644 --- a/tools/testing/selftests/bpf/progs/test_snprintf.c +++ b/tools/testing/selftests/bpf/progs/test_snprintf.c @@ -5,6 +5,8 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> +__u32 pid = 0; + char num_out[64] = {}; long num_ret = 0; @@ -42,6 +44,9 @@ int handler(const void *ctx) static const char str1[] = "str1"; static const char longstr[] = "longstr"; + if ((int)bpf_get_current_pid_tgid() != pid) + return 0; + /* Integer types */ num_ret = BPF_SNPRINTF(num_out, sizeof(num_out), "%d %u %x %li %llu %lX", diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh.c b/tools/testing/selftests/bpf/progs/test_tc_neigh.c index b985ac4e7a81..0c93d326a663 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_neigh.c +++ b/tools/testing/selftests/bpf/progs/test_tc_neigh.c @@ -33,17 +33,8 @@ a.s6_addr32[3] == b.s6_addr32[3]) #endif -enum { - dev_src, - dev_dst, -}; - -struct bpf_map_def SEC("maps") ifindex_map = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(int), - .value_size = sizeof(int), - .max_entries = 2, -}; +volatile const __u32 IFINDEX_SRC; +volatile const __u32 IFINDEX_DST; static __always_inline bool is_remote_ep_v4(struct __sk_buff *skb, __be32 addr) @@ -79,14 +70,8 @@ static __always_inline bool is_remote_ep_v6(struct __sk_buff *skb, return v6_equal(ip6h->daddr, addr); } -static __always_inline int get_dev_ifindex(int which) -{ - int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which); - - return ifindex ? *ifindex : 0; -} - -SEC("chk_egress") int tc_chk(struct __sk_buff *skb) +SEC("classifier/chk_egress") +int tc_chk(struct __sk_buff *skb) { void *data_end = ctx_ptr(skb->data_end); void *data = ctx_ptr(skb->data); @@ -98,7 +83,8 @@ SEC("chk_egress") int tc_chk(struct __sk_buff *skb) return !raw[0] && !raw[1] && !raw[2] ? TC_ACT_SHOT : TC_ACT_OK; } -SEC("dst_ingress") int tc_dst(struct __sk_buff *skb) +SEC("classifier/dst_ingress") +int tc_dst(struct __sk_buff *skb) { __u8 zero[ETH_ALEN * 2]; bool redirect = false; @@ -119,10 +105,11 @@ SEC("dst_ingress") int tc_dst(struct __sk_buff *skb) if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0) return TC_ACT_SHOT; - return bpf_redirect_neigh(get_dev_ifindex(dev_src), NULL, 0, 0); + return bpf_redirect_neigh(IFINDEX_SRC, NULL, 0, 0); } -SEC("src_ingress") int tc_src(struct __sk_buff *skb) +SEC("classifier/src_ingress") +int tc_src(struct __sk_buff *skb) { __u8 zero[ETH_ALEN * 2]; bool redirect = false; @@ -143,7 +130,7 @@ SEC("src_ingress") int tc_src(struct __sk_buff *skb) if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0) return TC_ACT_SHOT; - return bpf_redirect_neigh(get_dev_ifindex(dev_dst), NULL, 0, 0); + return bpf_redirect_neigh(IFINDEX_DST, NULL, 0, 0); } char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c index d82ed3457030..f7ab69cf018e 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c +++ b/tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c @@ -75,7 +75,8 @@ static __always_inline int fill_fib_params_v6(struct __sk_buff *skb, return 0; } -SEC("chk_egress") int tc_chk(struct __sk_buff *skb) +SEC("classifier/chk_egress") +int tc_chk(struct __sk_buff *skb) { void *data_end = ctx_ptr(skb->data_end); void *data = ctx_ptr(skb->data); @@ -142,12 +143,14 @@ static __always_inline int tc_redir(struct __sk_buff *skb) /* these are identical, but keep them separate for compatibility with the * section names expected by test_tc_redirect.sh */ -SEC("dst_ingress") int tc_dst(struct __sk_buff *skb) +SEC("classifier/dst_ingress") +int tc_dst(struct __sk_buff *skb) { return tc_redir(skb); } -SEC("src_ingress") int tc_src(struct __sk_buff *skb) +SEC("classifier/src_ingress") +int tc_src(struct __sk_buff *skb) { return tc_redir(skb); } diff --git a/tools/testing/selftests/bpf/progs/test_tc_peer.c b/tools/testing/selftests/bpf/progs/test_tc_peer.c index fc84a7685aa2..fe818cd5f010 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_peer.c +++ b/tools/testing/selftests/bpf/progs/test_tc_peer.c @@ -5,41 +5,59 @@ #include <linux/bpf.h> #include <linux/stddef.h> #include <linux/pkt_cls.h> +#include <linux/if_ether.h> +#include <linux/ip.h> #include <bpf/bpf_helpers.h> -enum { - dev_src, - dev_dst, -}; +volatile const __u32 IFINDEX_SRC; +volatile const __u32 IFINDEX_DST; -struct bpf_map_def SEC("maps") ifindex_map = { - .type = BPF_MAP_TYPE_ARRAY, - .key_size = sizeof(int), - .value_size = sizeof(int), - .max_entries = 2, -}; +static const __u8 src_mac[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}; +static const __u8 dst_mac[] = {0x00, 0x22, 0x33, 0x44, 0x55, 0x66}; -static __always_inline int get_dev_ifindex(int which) +SEC("classifier/chk_egress") +int tc_chk(struct __sk_buff *skb) { - int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which); + return TC_ACT_SHOT; +} - return ifindex ? *ifindex : 0; +SEC("classifier/dst_ingress") +int tc_dst(struct __sk_buff *skb) +{ + return bpf_redirect_peer(IFINDEX_SRC, 0); } -SEC("chk_egress") int tc_chk(struct __sk_buff *skb) +SEC("classifier/src_ingress") +int tc_src(struct __sk_buff *skb) { - return TC_ACT_SHOT; + return bpf_redirect_peer(IFINDEX_DST, 0); } -SEC("dst_ingress") int tc_dst(struct __sk_buff *skb) +SEC("classifier/dst_ingress_l3") +int tc_dst_l3(struct __sk_buff *skb) { - return bpf_redirect_peer(get_dev_ifindex(dev_src), 0); + return bpf_redirect(IFINDEX_SRC, 0); } -SEC("src_ingress") int tc_src(struct __sk_buff *skb) +SEC("classifier/src_ingress_l3") +int tc_src_l3(struct __sk_buff *skb) { - return bpf_redirect_peer(get_dev_ifindex(dev_dst), 0); + __u16 proto = skb->protocol; + + if (bpf_skb_change_head(skb, ETH_HLEN, 0) != 0) + return TC_ACT_SHOT; + + if (bpf_skb_store_bytes(skb, 0, &src_mac, ETH_ALEN, 0) != 0) + return TC_ACT_SHOT; + + if (bpf_skb_store_bytes(skb, ETH_ALEN, &dst_mac, ETH_ALEN, 0) != 0) + return TC_ACT_SHOT; + + if (bpf_skb_store_bytes(skb, ETH_ALEN + ETH_ALEN, &proto, sizeof(__u16), 0) != 0) + return TC_ACT_SHOT; + + return bpf_redirect_peer(IFINDEX_DST, 0); } char __license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_tc_redirect.sh b/tools/testing/selftests/bpf/test_tc_redirect.sh deleted file mode 100755 index 8868aa1ca902..000000000000 --- a/tools/testing/selftests/bpf/test_tc_redirect.sh +++ /dev/null @@ -1,216 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 -# -# This test sets up 3 netns (src <-> fwd <-> dst). There is no direct veth link -# between src and dst. The netns fwd has veth links to each src and dst. The -# client is in src and server in dst. The test installs a TC BPF program to each -# host facing veth in fwd which calls into i) bpf_redirect_neigh() to perform the -# neigh addr population and redirect or ii) bpf_redirect_peer() for namespace -# switch from ingress side; it also installs a checker prog on the egress side -# to drop unexpected traffic. - -if [[ $EUID -ne 0 ]]; then - echo "This script must be run as root" - echo "FAIL" - exit 1 -fi - -# check that needed tools are present -command -v nc >/dev/null 2>&1 || \ - { echo >&2 "nc is not available"; exit 1; } -command -v dd >/dev/null 2>&1 || \ - { echo >&2 "dd is not available"; exit 1; } -command -v timeout >/dev/null 2>&1 || \ - { echo >&2 "timeout is not available"; exit 1; } -command -v ping >/dev/null 2>&1 || \ - { echo >&2 "ping is not available"; exit 1; } -if command -v ping6 >/dev/null 2>&1; then PING6=ping6; else PING6=ping; fi -command -v perl >/dev/null 2>&1 || \ - { echo >&2 "perl is not available"; exit 1; } -command -v jq >/dev/null 2>&1 || \ - { echo >&2 "jq is not available"; exit 1; } -command -v bpftool >/dev/null 2>&1 || \ - { echo >&2 "bpftool is not available"; exit 1; } - -readonly GREEN='\033[0;92m' -readonly RED='\033[0;31m' -readonly NC='\033[0m' # No Color - -readonly PING_ARG="-c 3 -w 10 -q" - -readonly TIMEOUT=10 - -readonly NS_SRC="ns-src-$(mktemp -u XXXXXX)" -readonly NS_FWD="ns-fwd-$(mktemp -u XXXXXX)" -readonly NS_DST="ns-dst-$(mktemp -u XXXXXX)" - -readonly IP4_SRC="172.16.1.100" -readonly IP4_DST="172.16.2.100" - -readonly IP6_SRC="::1:dead:beef:cafe" -readonly IP6_DST="::2:dead:beef:cafe" - -readonly IP4_SLL="169.254.0.1" -readonly IP4_DLL="169.254.0.2" -readonly IP4_NET="169.254.0.0" - -netns_cleanup() -{ - ip netns del ${NS_SRC} - ip netns del ${NS_FWD} - ip netns del ${NS_DST} -} - -netns_setup() -{ - ip netns add "${NS_SRC}" - ip netns add "${NS_FWD}" - ip netns add "${NS_DST}" - - ip link add veth_src type veth peer name veth_src_fwd - ip link add veth_dst type veth peer name veth_dst_fwd - - ip link set veth_src netns ${NS_SRC} - ip link set veth_src_fwd netns ${NS_FWD} - - ip link set veth_dst netns ${NS_DST} - ip link set veth_dst_fwd netns ${NS_FWD} - - ip -netns ${NS_SRC} addr add ${IP4_SRC}/32 dev veth_src - ip -netns ${NS_DST} addr add ${IP4_DST}/32 dev veth_dst - - # The fwd netns automatically get a v6 LL address / routes, but also - # needs v4 one in order to start ARP probing. IP4_NET route is added - # to the endpoints so that the ARP processing will reply. - - ip -netns ${NS_FWD} addr add ${IP4_SLL}/32 dev veth_src_fwd - ip -netns ${NS_FWD} addr add ${IP4_DLL}/32 dev veth_dst_fwd - - ip -netns ${NS_SRC} addr add ${IP6_SRC}/128 dev veth_src nodad - ip -netns ${NS_DST} addr add ${IP6_DST}/128 dev veth_dst nodad - - ip -netns ${NS_SRC} link set dev veth_src up - ip -netns ${NS_FWD} link set dev veth_src_fwd up - - ip -netns ${NS_DST} link set dev veth_dst up - ip -netns ${NS_FWD} link set dev veth_dst_fwd up - - ip -netns ${NS_SRC} route add ${IP4_DST}/32 dev veth_src scope global - ip -netns ${NS_SRC} route add ${IP4_NET}/16 dev veth_src scope global - ip -netns ${NS_FWD} route add ${IP4_SRC}/32 dev veth_src_fwd scope global - - ip -netns ${NS_SRC} route add ${IP6_DST}/128 dev veth_src scope global - ip -netns ${NS_FWD} route add ${IP6_SRC}/128 dev veth_src_fwd scope global - - ip -netns ${NS_DST} route add ${IP4_SRC}/32 dev veth_dst scope global - ip -netns ${NS_DST} route add ${IP4_NET}/16 dev veth_dst scope global - ip -netns ${NS_FWD} route add ${IP4_DST}/32 dev veth_dst_fwd scope global - - ip -netns ${NS_DST} route add ${IP6_SRC}/128 dev veth_dst scope global - ip -netns ${NS_FWD} route add ${IP6_DST}/128 dev veth_dst_fwd scope global - - fmac_src=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_src_fwd/address) - fmac_dst=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_dst_fwd/address) - - ip -netns ${NS_SRC} neigh add ${IP4_DST} dev veth_src lladdr $fmac_src - ip -netns ${NS_DST} neigh add ${IP4_SRC} dev veth_dst lladdr $fmac_dst - - ip -netns ${NS_SRC} neigh add ${IP6_DST} dev veth_src lladdr $fmac_src - ip -netns ${NS_DST} neigh add ${IP6_SRC} dev veth_dst lladdr $fmac_dst -} - -netns_test_connectivity() -{ - set +e - - ip netns exec ${NS_DST} bash -c "nc -4 -l -p 9004 &" - ip netns exec ${NS_DST} bash -c "nc -6 -l -p 9006 &" - - TEST="TCPv4 connectivity test" - ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero bs=1000 count=100 > /dev/tcp/${IP4_DST}/9004" - if [ $? -ne 0 ]; then - echo -e "${TEST}: ${RED}FAIL${NC}" - exit 1 - fi - echo -e "${TEST}: ${GREEN}PASS${NC}" - - TEST="TCPv6 connectivity test" - ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero bs=1000 count=100 > /dev/tcp/${IP6_DST}/9006" - if [ $? -ne 0 ]; then - echo -e "${TEST}: ${RED}FAIL${NC}" - exit 1 - fi - echo -e "${TEST}: ${GREEN}PASS${NC}" - - TEST="ICMPv4 connectivity test" - ip netns exec ${NS_SRC} ping $PING_ARG ${IP4_DST} - if [ $? -ne 0 ]; then - echo -e "${TEST}: ${RED}FAIL${NC}" - exit 1 - fi - echo -e "${TEST}: ${GREEN}PASS${NC}" - - TEST="ICMPv6 connectivity test" - ip netns exec ${NS_SRC} $PING6 $PING_ARG ${IP6_DST} - if [ $? -ne 0 ]; then - echo -e "${TEST}: ${RED}FAIL${NC}" - exit 1 - fi - echo -e "${TEST}: ${GREEN}PASS${NC}" - - set -e -} - -hex_mem_str() -{ - perl -e 'print join(" ", unpack("(H2)8", pack("L", @ARGV)))' $1 -} - -netns_setup_bpf() -{ - local obj=$1 - local use_forwarding=${2:-0} - - ip netns exec ${NS_FWD} tc qdisc add dev veth_src_fwd clsact - ip netns exec ${NS_FWD} tc filter add dev veth_src_fwd ingress bpf da obj $obj sec src_ingress - ip netns exec ${NS_FWD} tc filter add dev veth_src_fwd egress bpf da obj $obj sec chk_egress - - ip netns exec ${NS_FWD} tc qdisc add dev veth_dst_fwd clsact - ip netns exec ${NS_FWD} tc filter add dev veth_dst_fwd ingress bpf da obj $obj sec dst_ingress - ip netns exec ${NS_FWD} tc filter add dev veth_dst_fwd egress bpf da obj $obj sec chk_egress - - if [ "$use_forwarding" -eq "1" ]; then - # bpf_fib_lookup() checks if forwarding is enabled - ip netns exec ${NS_FWD} sysctl -w net.ipv4.ip_forward=1 - ip netns exec ${NS_FWD} sysctl -w net.ipv6.conf.veth_dst_fwd.forwarding=1 - ip netns exec ${NS_FWD} sysctl -w net.ipv6.conf.veth_src_fwd.forwarding=1 - return 0 - fi - - veth_src=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_src_fwd/ifindex) - veth_dst=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_dst_fwd/ifindex) - - progs=$(ip netns exec ${NS_FWD} bpftool net --json | jq -r '.[] | .tc | map(.id) | .[]') - for prog in $progs; do - map=$(bpftool prog show id $prog --json | jq -r '.map_ids | .? | .[]') - if [ ! -z "$map" ]; then - bpftool map update id $map key hex $(hex_mem_str 0) value hex $(hex_mem_str $veth_src) - bpftool map update id $map key hex $(hex_mem_str 1) value hex $(hex_mem_str $veth_dst) - fi - done -} - -trap netns_cleanup EXIT -set -e - -netns_setup -netns_setup_bpf test_tc_neigh.o -netns_test_connectivity -netns_cleanup -netns_setup -netns_setup_bpf test_tc_neigh_fib.o 1 -netns_test_connectivity -netns_cleanup -netns_setup -netns_setup_bpf test_tc_peer.o -netns_test_connectivity diff --git a/tools/testing/selftests/bpf/verifier/stack_ptr.c b/tools/testing/selftests/bpf/verifier/stack_ptr.c index 07eaa04412ae..8ab94d65f3d5 100644 --- a/tools/testing/selftests/bpf/verifier/stack_ptr.c +++ b/tools/testing/selftests/bpf/verifier/stack_ptr.c @@ -295,8 +295,6 @@ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0), BPF_EXIT_INSN(), }, - .result_unpriv = REJECT, - .errstr_unpriv = "invalid write to stack R1 off=0 size=1", .result = ACCEPT, .retval = 42, }, diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c index e5913fd3b903..7ae2859d495c 100644 --- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c @@ -300,8 +300,6 @@ }, .fixup_map_array_48b = { 3 }, .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", .retval = 1, }, { @@ -371,8 +369,6 @@ }, .fixup_map_array_48b = { 3 }, .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", .retval = 1, }, { @@ -472,8 +468,6 @@ }, .fixup_map_array_48b = { 3 }, .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", .retval = 1, }, { @@ -766,8 +760,6 @@ }, .fixup_map_array_48b = { 3 }, .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", .retval = 1, }, { diff --git a/tools/testing/selftests/cgroup/test_kmem.c b/tools/testing/selftests/cgroup/test_kmem.c index 0941aa16157e..22b31ebb3513 100644 --- a/tools/testing/selftests/cgroup/test_kmem.c +++ b/tools/testing/selftests/cgroup/test_kmem.c @@ -19,12 +19,12 @@ /* - * Memory cgroup charging and vmstat data aggregation is performed using - * percpu batches 32 pages big (look at MEMCG_CHARGE_BATCH). So the maximum - * discrepancy between charge and vmstat entries is number of cpus multiplied - * by 32 pages multiplied by 2. + * Memory cgroup charging is performed using percpu batches 32 pages + * big (look at MEMCG_CHARGE_BATCH), whereas memory.stat is exact. So + * the maximum discrepancy between charge and vmstat entries is number + * of cpus multiplied by 32 pages. */ -#define MAX_VMSTAT_ERROR (4096 * 32 * 2 * get_nprocs()) +#define MAX_VMSTAT_ERROR (4096 * 32 * get_nprocs()) static int alloc_dcache(const char *cgroup, void *arg) @@ -162,7 +162,7 @@ static int cg_run_in_subcgroups(const char *parent, */ static int test_kmem_memcg_deletion(const char *root) { - long current, slab, anon, file, kernel_stack, sum; + long current, slab, anon, file, kernel_stack, pagetables, percpu, sock, sum; int ret = KSFT_FAIL; char *parent; @@ -184,11 +184,14 @@ static int test_kmem_memcg_deletion(const char *root) anon = cg_read_key_long(parent, "memory.stat", "anon "); file = cg_read_key_long(parent, "memory.stat", "file "); kernel_stack = cg_read_key_long(parent, "memory.stat", "kernel_stack "); + pagetables = cg_read_key_long(parent, "memory.stat", "pagetables "); + percpu = cg_read_key_long(parent, "memory.stat", "percpu "); + sock = cg_read_key_long(parent, "memory.stat", "sock "); if (current < 0 || slab < 0 || anon < 0 || file < 0 || - kernel_stack < 0) + kernel_stack < 0 || pagetables < 0 || percpu < 0 || sock < 0) goto cleanup; - sum = slab + anon + file + kernel_stack; + sum = slab + anon + file + kernel_stack + pagetables + percpu + sock; if (abs(sum - current) < MAX_VMSTAT_ERROR) { ret = KSFT_PASS; } else { @@ -198,6 +201,9 @@ static int test_kmem_memcg_deletion(const char *root) printf("anon = %ld\n", anon); printf("file = %ld\n", file); printf("kernel_stack = %ld\n", kernel_stack); + printf("pagetables = %ld\n", pagetables); + printf("percpu = %ld\n", percpu); + printf("sock = %ld\n", sock); } cleanup: diff --git a/tools/testing/selftests/dma/dma_map_benchmark.c b/tools/testing/selftests/dma/dma_map_benchmark.c index fb23ce9617ea..485dff51bad2 100644 --- a/tools/testing/selftests/dma/dma_map_benchmark.c +++ b/tools/testing/selftests/dma/dma_map_benchmark.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2020 Hisilicon Limited. + * Copyright (C) 2020 HiSilicon Limited. */ #include <fcntl.h> @@ -40,7 +40,8 @@ struct map_benchmark { __u32 dma_bits; /* DMA addressing capability */ __u32 dma_dir; /* DMA data direction */ __u32 dma_trans_ns; /* time for DMA transmission in ns */ - __u8 expansion[80]; /* For future use */ + __u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */ + __u8 expansion[76]; /* For future use */ }; int main(int argc, char **argv) @@ -51,11 +52,13 @@ int main(int argc, char **argv) int threads = 1, seconds = 20, node = -1; /* default dma mask 32bit, bidirectional DMA */ int bits = 32, xdelay = 0, dir = DMA_MAP_BIDIRECTIONAL; + /* default granule 1 PAGESIZE */ + int granule = 1; int cmd = DMA_MAP_BENCHMARK; char *p; - while ((opt = getopt(argc, argv, "t:s:n:b:d:x:")) != -1) { + while ((opt = getopt(argc, argv, "t:s:n:b:d:x:g:")) != -1) { switch (opt) { case 't': threads = atoi(optarg); @@ -75,6 +78,9 @@ int main(int argc, char **argv) case 'x': xdelay = atoi(optarg); break; + case 'g': + granule = atoi(optarg); + break; default: return -1; } @@ -110,6 +116,11 @@ int main(int argc, char **argv) exit(1); } + if (granule < 1 || granule > 1024) { + fprintf(stderr, "invalid granule size\n"); + exit(1); + } + fd = open("/sys/kernel/debug/dma_map_benchmark", O_RDWR); if (fd == -1) { perror("open"); @@ -123,14 +134,15 @@ int main(int argc, char **argv) map.dma_bits = bits; map.dma_dir = dir; map.dma_trans_ns = xdelay; + map.granule = granule; if (ioctl(fd, cmd, &map)) { perror("ioctl"); exit(1); } - printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s\n", - threads, seconds, node, dir[directions]); + printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s granule: %d\n", + threads, seconds, node, dir[directions], granule); printf("average map latency(us):%.1f standard deviation:%.1f\n", map.avg_map_100ns/10.0, map.map_stddev/10.0); printf("average unmap latency(us):%.1f standard deviation:%.1f\n", diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile index cf69b2fcce59..dd61118df66e 100644 --- a/tools/testing/selftests/exec/Makefile +++ b/tools/testing/selftests/exec/Makefile @@ -28,8 +28,8 @@ $(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat cp $< $@ chmod -x $@ $(OUTPUT)/load_address_4096: load_address.c - $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie $< -o $@ + $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@ $(OUTPUT)/load_address_2097152: load_address.c - $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie $< -o $@ + $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@ $(OUTPUT)/load_address_16777216: load_address.c - $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie $< -o $@ + $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@ diff --git a/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c b/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c index ad7fabd575f9..65ede506305c 100644 --- a/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c +++ b/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c @@ -3449,4 +3449,48 @@ TEST(epoll63) close(sfd[1]); } +/* + * t0 t1 + * (ew) \ / (ew) + * e0 + * | (lt) + * s0 + */ +TEST(epoll64) +{ + pthread_t waiter[2]; + struct epoll_event e; + struct epoll_mtcontext ctx = { 0 }; + + signal(SIGUSR1, signal_handler); + + ASSERT_EQ(socketpair(AF_UNIX, SOCK_STREAM, 0, ctx.sfd), 0); + + ctx.efd[0] = epoll_create(1); + ASSERT_GE(ctx.efd[0], 0); + + e.events = EPOLLIN; + ASSERT_EQ(epoll_ctl(ctx.efd[0], EPOLL_CTL_ADD, ctx.sfd[0], &e), 0); + + /* + * main will act as the emitter once both waiter threads are + * blocked and expects to both be awoken upon the ready event. + */ + ctx.main = pthread_self(); + ASSERT_EQ(pthread_create(&waiter[0], NULL, waiter_entry1a, &ctx), 0); + ASSERT_EQ(pthread_create(&waiter[1], NULL, waiter_entry1a, &ctx), 0); + + usleep(100000); + ASSERT_EQ(write(ctx.sfd[1], "w", 1), 1); + + ASSERT_EQ(pthread_join(waiter[0], NULL), 0); + ASSERT_EQ(pthread_join(waiter[1], NULL), 0); + + EXPECT_EQ(ctx.count, 2); + + close(ctx.efd[0]); + close(ctx.sfd[0]); + close(ctx.sfd[1]); +} + TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 7bd7e776c266..524c857a049c 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only /aarch64/get-reg-list /aarch64/get-reg-list-sve +/aarch64/vgic_init /s390x/memop /s390x/resets /s390x/sync_regs_test @@ -38,6 +39,8 @@ /dirty_log_perf_test /hardware_disable_test /kvm_create_max_vcpus +/kvm_page_table_test /memslot_modification_stress_test +/memslot_perf_test /set_memory_region_test /steal_time diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index cb95b5bace7b..daaee1888b12 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -33,7 +33,7 @@ ifeq ($(ARCH),s390) UNAME_M := s390x endif -LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c +LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c @@ -72,16 +72,20 @@ TEST_GEN_PROGS_x86_64 += dirty_log_test TEST_GEN_PROGS_x86_64 += dirty_log_perf_test TEST_GEN_PROGS_x86_64 += hardware_disable_test TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus +TEST_GEN_PROGS_x86_64 += kvm_page_table_test TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test +TEST_GEN_PROGS_x86_64 += memslot_perf_test TEST_GEN_PROGS_x86_64 += set_memory_region_test TEST_GEN_PROGS_x86_64 += steal_time TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list-sve +TEST_GEN_PROGS_aarch64 += aarch64/vgic_init TEST_GEN_PROGS_aarch64 += demand_paging_test TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS_aarch64 += dirty_log_perf_test TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus +TEST_GEN_PROGS_aarch64 += kvm_page_table_test TEST_GEN_PROGS_aarch64 += set_memory_region_test TEST_GEN_PROGS_aarch64 += steal_time @@ -91,6 +95,7 @@ TEST_GEN_PROGS_s390x += s390x/sync_regs_test TEST_GEN_PROGS_s390x += demand_paging_test TEST_GEN_PROGS_s390x += dirty_log_test TEST_GEN_PROGS_s390x += kvm_create_max_vcpus +TEST_GEN_PROGS_s390x += kvm_page_table_test TEST_GEN_PROGS_s390x += set_memory_region_test TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M)) diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c new file mode 100644 index 000000000000..623f31a14326 --- /dev/null +++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * vgic init sequence tests + * + * Copyright (C) 2020, Red Hat, Inc. + */ +#define _GNU_SOURCE +#include <linux/kernel.h> +#include <sys/syscall.h> +#include <asm/kvm.h> +#include <asm/kvm_para.h> + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#define NR_VCPUS 4 + +#define REDIST_REGION_ATTR_ADDR(count, base, flags, index) (((uint64_t)(count) << 52) | \ + ((uint64_t)((base) >> 16) << 16) | ((uint64_t)(flags) << 12) | index) +#define REG_OFFSET(vcpu, offset) (((uint64_t)vcpu << 32) | offset) + +#define GICR_TYPER 0x8 + +struct vm_gic { + struct kvm_vm *vm; + int gic_fd; +}; + +static int max_ipa_bits; + +/* helper to access a redistributor register */ +static int access_redist_reg(int gicv3_fd, int vcpu, int offset, + uint32_t *val, bool write) +{ + uint64_t attr = REG_OFFSET(vcpu, offset); + + return _kvm_device_access(gicv3_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS, + attr, val, write); +} + +/* dummy guest code */ +static void guest_code(void) +{ + GUEST_SYNC(0); + GUEST_SYNC(1); + GUEST_SYNC(2); + GUEST_DONE(); +} + +/* we don't want to assert on run execution, hence that helper */ +static int run_vcpu(struct kvm_vm *vm, uint32_t vcpuid) +{ + ucall_init(vm, NULL); + int ret = _vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL); + if (ret) + return -errno; + return 0; +} + +static struct vm_gic vm_gic_create(void) +{ + struct vm_gic v; + + v.vm = vm_create_default_with_vcpus(NR_VCPUS, 0, 0, guest_code, NULL); + v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); + + return v; +} + +static void vm_gic_destroy(struct vm_gic *v) +{ + close(v->gic_fd); + kvm_vm_free(v->vm); +} + +/** + * Helper routine that performs KVM device tests in general and + * especially ARM_VGIC_V3 ones. Eventually the ARM_VGIC_V3 + * device gets created, a legacy RDIST region is set at @0x0 + * and a DIST region is set @0x60000 + */ +static void subtest_dist_rdist(struct vm_gic *v) +{ + int ret; + uint64_t addr; + + /* Check existing group/attributes */ + kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_DIST); + + kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST); + + /* check non existing attribute */ + ret = _kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, 0); + TEST_ASSERT(ret && errno == ENXIO, "attribute not supported"); + + /* misaligned DIST and REDIST address settings */ + addr = 0x1000; + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, "GICv3 dist base not 64kB aligned"); + + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, "GICv3 redist base not 64kB aligned"); + + /* out of range address */ + if (max_ipa_bits) { + addr = 1ULL << max_ipa_bits; + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true); + TEST_ASSERT(ret && errno == E2BIG, "dist address beyond IPA limit"); + + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true); + TEST_ASSERT(ret && errno == E2BIG, "redist address beyond IPA limit"); + } + + /* set REDIST base address @0x0*/ + addr = 0x00000; + kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true); + + /* Attempt to create a second legacy redistributor region */ + addr = 0xE0000; + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true); + TEST_ASSERT(ret && errno == EEXIST, "GICv3 redist base set again"); + + /* Attempt to mix legacy and new redistributor regions */ + addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 0, 0); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, "attempt to mix GICv3 REDIST and REDIST_REGION"); + + /* + * Set overlapping DIST / REDIST, cannot be detected here. Will be detected + * on first vcpu run instead. + */ + addr = 3 * 2 * 0x10000; + kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_DIST, + &addr, true); +} + +/* Test the new REDIST region API */ +static void subtest_redist_regions(struct vm_gic *v) +{ + uint64_t addr, expected_addr; + int ret; + + ret = kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST); + TEST_ASSERT(!ret, "Multiple redist regions advertised"); + + addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 2, 0); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, "redist region attr value with flags != 0"); + + addr = REDIST_REGION_ATTR_ADDR(0, 0x100000, 0, 0); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, "redist region attr value with count== 0"); + + addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 1); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, + "attempt to register the first rdist region with index != 0"); + + addr = REDIST_REGION_ATTR_ADDR(2, 0x201000, 0, 1); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, "rdist region with misaligned address"); + + addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0); + kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + + addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 1); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, "register an rdist region with already used index"); + + addr = REDIST_REGION_ATTR_ADDR(1, 0x210000, 0, 2); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, + "register an rdist region overlapping with another one"); + + addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 2); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, "register redist region with index not +1"); + + addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 1); + kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + + addr = REDIST_REGION_ATTR_ADDR(1, 1ULL << max_ipa_bits, 0, 2); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + TEST_ASSERT(ret && errno == E2BIG, + "register redist region with base address beyond IPA range"); + + addr = 0x260000; + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, + "Mix KVM_VGIC_V3_ADDR_TYPE_REDIST and REDIST_REGION"); + + /* + * Now there are 2 redist regions: + * region 0 @ 0x200000 2 redists + * region 1 @ 0x240000 1 redist + * Attempt to read their characteristics + */ + + addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 0); + expected_addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, false); + TEST_ASSERT(!ret && addr == expected_addr, "read characteristics of region #0"); + + addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 1); + expected_addr = REDIST_REGION_ATTR_ADDR(1, 0x240000, 0, 1); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, false); + TEST_ASSERT(!ret && addr == expected_addr, "read characteristics of region #1"); + + addr = REDIST_REGION_ATTR_ADDR(0, 0, 0, 2); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, false); + TEST_ASSERT(ret && errno == ENOENT, "read characteristics of non existing region"); + + addr = 0x260000; + kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true); + + addr = REDIST_REGION_ATTR_ADDR(1, 0x260000, 0, 2); + ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, "register redist region colliding with dist"); +} + +/* + * VGIC KVM device is created and initialized before the secondary CPUs + * get created + */ +static void test_vgic_then_vcpus(void) +{ + struct vm_gic v; + int ret, i; + + v.vm = vm_create_default(0, 0, guest_code); + v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); + + subtest_dist_rdist(&v); + + /* Add the rest of the VCPUs */ + for (i = 1; i < NR_VCPUS; ++i) + vm_vcpu_add_default(v.vm, i, guest_code); + + ret = run_vcpu(v.vm, 3); + TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run"); + + vm_gic_destroy(&v); +} + +/* All the VCPUs are created before the VGIC KVM device gets initialized */ +static void test_vcpus_then_vgic(void) +{ + struct vm_gic v; + int ret; + + v = vm_gic_create(); + + subtest_dist_rdist(&v); + + ret = run_vcpu(v.vm, 3); + TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run"); + + vm_gic_destroy(&v); +} + +static void test_new_redist_regions(void) +{ + void *dummy = NULL; + struct vm_gic v; + uint64_t addr; + int ret; + + v = vm_gic_create(); + subtest_redist_regions(&v); + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); + + ret = run_vcpu(v.vm, 3); + TEST_ASSERT(ret == -ENXIO, "running without sufficient number of rdists"); + vm_gic_destroy(&v); + + /* step2 */ + + v = vm_gic_create(); + subtest_redist_regions(&v); + + addr = REDIST_REGION_ATTR_ADDR(1, 0x280000, 0, 2); + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + + ret = run_vcpu(v.vm, 3); + TEST_ASSERT(ret == -EBUSY, "running without vgic explicit init"); + + vm_gic_destroy(&v); + + /* step 3 */ + + v = vm_gic_create(); + subtest_redist_regions(&v); + + _kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, dummy, true); + TEST_ASSERT(ret && errno == EFAULT, + "register a third region allowing to cover the 4 vcpus"); + + addr = REDIST_REGION_ATTR_ADDR(1, 0x280000, 0, 2); + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); + + ret = run_vcpu(v.vm, 3); + TEST_ASSERT(!ret, "vcpu run"); + + vm_gic_destroy(&v); +} + +static void test_typer_accesses(void) +{ + struct vm_gic v; + uint64_t addr; + uint32_t val; + int ret, i; + + v.vm = vm_create_default(0, 0, guest_code); + + v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); + + vm_vcpu_add_default(v.vm, 3, guest_code); + + ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); + TEST_ASSERT(ret && errno == EINVAL, "attempting to read GICR_TYPER of non created vcpu"); + + vm_vcpu_add_default(v.vm, 1, guest_code); + + ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); + TEST_ASSERT(ret && errno == EBUSY, "read GICR_TYPER before GIC initialized"); + + vm_vcpu_add_default(v.vm, 2, guest_code); + + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); + + for (i = 0; i < NR_VCPUS ; i++) { + ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && !val, "read GICR_TYPER before rdist region setting"); + } + + addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 0); + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + + /* The 2 first rdists should be put there (vcpu 0 and 3) */ + ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && !val, "read typer of rdist #0"); + + ret = access_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x310, "read typer of rdist #1"); + + addr = REDIST_REGION_ATTR_ADDR(10, 0x100000, 0, 1); + ret = _kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + TEST_ASSERT(ret && errno == EINVAL, "collision with previous rdist region"); + + ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x100, + "no redist region attached to vcpu #1 yet, last cannot be returned"); + + ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x200, + "no redist region attached to vcpu #2, last cannot be returned"); + + addr = REDIST_REGION_ATTR_ADDR(10, 0x20000, 0, 1); + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + + ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #1"); + + ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x210, + "read typer of rdist #1, last properly returned"); + + vm_gic_destroy(&v); +} + +/** + * Test GICR_TYPER last bit with new redist regions + * rdist regions #1 and #2 are contiguous + * rdist region #0 @0x100000 2 rdist capacity + * rdists: 0, 3 (Last) + * rdist region #1 @0x240000 2 rdist capacity + * rdists: 5, 4 (Last) + * rdist region #2 @0x200000 2 rdist capacity + * rdists: 1, 2 + */ +static void test_last_bit_redist_regions(void) +{ + uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 }; + struct vm_gic v; + uint64_t addr; + uint32_t val; + int ret; + + v.vm = vm_create_default_with_vcpus(6, 0, 0, guest_code, vcpuids); + + v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); + + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); + + addr = REDIST_REGION_ATTR_ADDR(2, 0x100000, 0, 0); + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + + addr = REDIST_REGION_ATTR_ADDR(2, 0x240000, 0, 1); + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + + addr = REDIST_REGION_ATTR_ADDR(2, 0x200000, 0, 2); + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true); + + ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x000, "read typer of rdist #0"); + + ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #1"); + + ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x200, "read typer of rdist #2"); + + ret = access_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x310, "read typer of rdist #3"); + + ret = access_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x500, "read typer of rdist #5"); + + ret = access_redist_reg(v.gic_fd, 4, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x410, "read typer of rdist #4"); + + vm_gic_destroy(&v); +} + +/* Test last bit with legacy region */ +static void test_last_bit_single_rdist(void) +{ + uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 }; + struct vm_gic v; + uint64_t addr; + uint32_t val; + int ret; + + v.vm = vm_create_default_with_vcpus(6, 0, 0, guest_code, vcpuids); + + v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); + + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); + + addr = 0x10000; + kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true); + + ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x000, "read typer of rdist #0"); + + ret = access_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x300, "read typer of rdist #1"); + + ret = access_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x500, "read typer of rdist #2"); + + ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #3"); + + ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false); + TEST_ASSERT(!ret && val == 0x210, "read typer of rdist #3"); + + vm_gic_destroy(&v); +} + +void test_kvm_device(void) +{ + struct vm_gic v; + int ret, fd; + + v.vm = vm_create_default_with_vcpus(NR_VCPUS, 0, 0, guest_code, NULL); + + /* try to create a non existing KVM device */ + ret = _kvm_create_device(v.vm, 0, true, &fd); + TEST_ASSERT(ret && errno == ENODEV, "unsupported device"); + + /* trial mode with VGIC_V3 device */ + ret = _kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, true, &fd); + if (ret) { + print_skip("GICv3 not supported"); + exit(KSFT_SKIP); + } + v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); + + ret = _kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false, &fd); + TEST_ASSERT(ret && errno == EEXIST, "create GICv3 device twice"); + + kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, true); + + if (!_kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V2, true, &fd)) { + ret = _kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V2, false, &fd); + TEST_ASSERT(ret && errno == EINVAL, "create GICv2 while v3 exists"); + } + + vm_gic_destroy(&v); +} + +int main(int ac, char **av) +{ + max_ipa_bits = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE); + + test_kvm_device(); + test_vcpus_then_vgic(); + test_vgic_then_vcpus(); + test_new_redist_regions(); + test_typer_accesses(); + test_last_bit_redist_regions(); + test_last_bit_single_rdist(); + + return 0; +} diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index 5f7a229c3af1..b74704305835 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -9,6 +9,7 @@ #define _GNU_SOURCE /* for pipe2 */ +#include <inttypes.h> #include <stdio.h> #include <stdlib.h> #include <time.h> @@ -38,6 +39,7 @@ static int nr_vcpus = 1; static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; +static size_t demand_paging_size; static char *guest_data_prototype; static void *vcpu_worker(void *data) @@ -71,36 +73,51 @@ static void *vcpu_worker(void *data) return NULL; } -static int handle_uffd_page_request(int uffd, uint64_t addr) +static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr) { - pid_t tid; + pid_t tid = syscall(__NR_gettid); struct timespec start; struct timespec ts_diff; - struct uffdio_copy copy; int r; - tid = syscall(__NR_gettid); + clock_gettime(CLOCK_MONOTONIC, &start); - copy.src = (uint64_t)guest_data_prototype; - copy.dst = addr; - copy.len = perf_test_args.host_page_size; - copy.mode = 0; + if (uffd_mode == UFFDIO_REGISTER_MODE_MISSING) { + struct uffdio_copy copy; - clock_gettime(CLOCK_MONOTONIC, &start); + copy.src = (uint64_t)guest_data_prototype; + copy.dst = addr; + copy.len = demand_paging_size; + copy.mode = 0; - r = ioctl(uffd, UFFDIO_COPY, ©); - if (r == -1) { - pr_info("Failed Paged in 0x%lx from thread %d with errno: %d\n", - addr, tid, errno); - return r; + r = ioctl(uffd, UFFDIO_COPY, ©); + if (r == -1) { + pr_info("Failed UFFDIO_COPY in 0x%lx from thread %d with errno: %d\n", + addr, tid, errno); + return r; + } + } else if (uffd_mode == UFFDIO_REGISTER_MODE_MINOR) { + struct uffdio_continue cont = {0}; + + cont.range.start = addr; + cont.range.len = demand_paging_size; + + r = ioctl(uffd, UFFDIO_CONTINUE, &cont); + if (r == -1) { + pr_info("Failed UFFDIO_CONTINUE in 0x%lx from thread %d with errno: %d\n", + addr, tid, errno); + return r; + } + } else { + TEST_FAIL("Invalid uffd mode %d", uffd_mode); } ts_diff = timespec_elapsed(start); - PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid, + PER_PAGE_DEBUG("UFFD page-in %d \t%ld ns\n", tid, timespec_to_ns(ts_diff)); PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n", - perf_test_args.host_page_size, addr, tid); + demand_paging_size, addr, tid); return 0; } @@ -108,6 +125,7 @@ static int handle_uffd_page_request(int uffd, uint64_t addr) bool quit_uffd_thread; struct uffd_handler_args { + int uffd_mode; int uffd; int pipefd; useconds_t delay; @@ -169,7 +187,7 @@ static void *uffd_handler_thread_fn(void *arg) if (r == -1) { if (errno == EAGAIN) continue; - pr_info("Read of uffd gor errno %d", errno); + pr_info("Read of uffd got errno %d\n", errno); return NULL; } @@ -184,7 +202,7 @@ static void *uffd_handler_thread_fn(void *arg) if (delay) usleep(delay); addr = msg.arg.pagefault.address; - r = handle_uffd_page_request(uffd, addr); + r = handle_uffd_page_request(uffd_args->uffd_mode, uffd, addr); if (r < 0) return NULL; pages++; @@ -198,43 +216,53 @@ static void *uffd_handler_thread_fn(void *arg) return NULL; } -static int setup_demand_paging(struct kvm_vm *vm, - pthread_t *uffd_handler_thread, int pipefd, - useconds_t uffd_delay, - struct uffd_handler_args *uffd_args, - void *hva, uint64_t len) +static void setup_demand_paging(struct kvm_vm *vm, + pthread_t *uffd_handler_thread, int pipefd, + int uffd_mode, useconds_t uffd_delay, + struct uffd_handler_args *uffd_args, + void *hva, void *alias, uint64_t len) { + bool is_minor = (uffd_mode == UFFDIO_REGISTER_MODE_MINOR); int uffd; struct uffdio_api uffdio_api; struct uffdio_register uffdio_register; + uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY; - uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); - if (uffd == -1) { - pr_info("uffd creation failed\n"); - return -1; + PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n", + is_minor ? "MINOR" : "MISSING", + is_minor ? "UFFDIO_CONINUE" : "UFFDIO_COPY"); + + /* In order to get minor faults, prefault via the alias. */ + if (is_minor) { + size_t p; + + expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE; + + TEST_ASSERT(alias != NULL, "Alias required for minor faults"); + for (p = 0; p < (len / demand_paging_size); ++p) { + memcpy(alias + (p * demand_paging_size), + guest_data_prototype, demand_paging_size); + } } + uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); + TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno); + uffdio_api.api = UFFD_API; uffdio_api.features = 0; - if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) { - pr_info("ioctl uffdio_api failed\n"); - return -1; - } + TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1, + "ioctl UFFDIO_API failed: %" PRIu64, + (uint64_t)uffdio_api.api); uffdio_register.range.start = (uint64_t)hva; uffdio_register.range.len = len; - uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING; - if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) { - pr_info("ioctl uffdio_register failed\n"); - return -1; - } - - if ((uffdio_register.ioctls & UFFD_API_RANGE_IOCTLS) != - UFFD_API_RANGE_IOCTLS) { - pr_info("unexpected userfaultfd ioctl set\n"); - return -1; - } + uffdio_register.mode = uffd_mode; + TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1, + "ioctl UFFDIO_REGISTER failed"); + TEST_ASSERT((uffdio_register.ioctls & expected_ioctls) == + expected_ioctls, "missing userfaultfd ioctls"); + uffd_args->uffd_mode = uffd_mode; uffd_args->uffd = uffd; uffd_args->pipefd = pipefd; uffd_args->delay = uffd_delay; @@ -243,13 +271,12 @@ static int setup_demand_paging(struct kvm_vm *vm, PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n", hva, hva + len); - - return 0; } struct test_params { - bool use_uffd; + int uffd_mode; useconds_t uffd_delay; + enum vm_mem_backing_src_type src_type; bool partition_vcpu_memory_access; }; @@ -267,14 +294,16 @@ static void run_test(enum vm_guest_mode mode, void *arg) int r; vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, - VM_MEM_SRC_ANONYMOUS); + p->src_type); perf_test_args.wr_fract = 1; - guest_data_prototype = malloc(perf_test_args.host_page_size); + demand_paging_size = get_backing_src_pagesz(p->src_type); + + guest_data_prototype = malloc(demand_paging_size); TEST_ASSERT(guest_data_prototype, "Failed to allocate buffer for guest data pattern"); - memset(guest_data_prototype, 0xAB, perf_test_args.host_page_size); + memset(guest_data_prototype, 0xAB, demand_paging_size); vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); TEST_ASSERT(vcpu_threads, "Memory allocation failed"); @@ -282,7 +311,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size, p->partition_vcpu_memory_access); - if (p->use_uffd) { + if (p->uffd_mode) { uffd_handler_threads = malloc(nr_vcpus * sizeof(*uffd_handler_threads)); TEST_ASSERT(uffd_handler_threads, "Memory allocation failed"); @@ -296,6 +325,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { vm_paddr_t vcpu_gpa; void *vcpu_hva; + void *vcpu_alias; uint64_t vcpu_mem_size; @@ -310,8 +340,9 @@ static void run_test(enum vm_guest_mode mode, void *arg) PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n", vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size); - /* Cache the HVA pointer of the region */ + /* Cache the host addresses of the region */ vcpu_hva = addr_gpa2hva(vm, vcpu_gpa); + vcpu_alias = addr_gpa2alias(vm, vcpu_gpa); /* * Set up user fault fd to handle demand paging @@ -321,13 +352,11 @@ static void run_test(enum vm_guest_mode mode, void *arg) O_CLOEXEC | O_NONBLOCK); TEST_ASSERT(!r, "Failed to set up pipefd"); - r = setup_demand_paging(vm, - &uffd_handler_threads[vcpu_id], - pipefds[vcpu_id * 2], - p->uffd_delay, &uffd_args[vcpu_id], - vcpu_hva, vcpu_mem_size); - if (r < 0) - exit(-r); + setup_demand_paging(vm, &uffd_handler_threads[vcpu_id], + pipefds[vcpu_id * 2], p->uffd_mode, + p->uffd_delay, &uffd_args[vcpu_id], + vcpu_hva, vcpu_alias, + vcpu_mem_size); } } @@ -355,7 +384,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) pr_info("All vCPU threads joined\n"); - if (p->use_uffd) { + if (p->uffd_mode) { char c; /* Tell the user fault fd handler threads to quit */ @@ -377,7 +406,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) free(guest_data_prototype); free(vcpu_threads); - if (p->use_uffd) { + if (p->uffd_mode) { free(uffd_handler_threads); free(uffd_args); free(pipefds); @@ -387,17 +416,19 @@ static void run_test(enum vm_guest_mode mode, void *arg) static void help(char *name) { puts(""); - printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n" - " [-b memory] [-v vcpus] [-o]\n", name); + printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n" + " [-b memory] [-t type] [-v vcpus] [-o]\n", name); guest_modes_help(); - printf(" -u: use User Fault FD to handle vCPU page\n" - " faults.\n"); + printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n" + " UFFD registration mode: 'MISSING' or 'MINOR'.\n"); printf(" -d: add a delay in usec to the User Fault\n" " FD handler to simulate demand paging\n" " overheads. Ignored without -u.\n"); printf(" -b: specify the size of the memory region which should be\n" " demand paged by each vCPU. e.g. 10M or 3G.\n" " Default: 1G\n"); + printf(" -t: The type of backing memory to use. Default: anonymous\n"); + backing_src_help(); printf(" -v: specify the number of vCPUs to run.\n"); printf(" -o: Overlap guest memory accesses instead of partitioning\n" " them into a separate region of memory for each vCPU.\n"); @@ -409,19 +440,24 @@ int main(int argc, char *argv[]) { int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); struct test_params p = { + .src_type = VM_MEM_SRC_ANONYMOUS, .partition_vcpu_memory_access = true, }; int opt; guest_modes_append_default(); - while ((opt = getopt(argc, argv, "hm:ud:b:v:o")) != -1) { + while ((opt = getopt(argc, argv, "hm:u:d:b:t:v:o")) != -1) { switch (opt) { case 'm': guest_modes_cmdline(optarg); break; case 'u': - p.use_uffd = true; + if (!strcmp("MISSING", optarg)) + p.uffd_mode = UFFDIO_REGISTER_MODE_MISSING; + else if (!strcmp("MINOR", optarg)) + p.uffd_mode = UFFDIO_REGISTER_MODE_MINOR; + TEST_ASSERT(p.uffd_mode, "UFFD mode must be 'MISSING' or 'MINOR'."); break; case 'd': p.uffd_delay = strtoul(optarg, NULL, 0); @@ -430,6 +466,9 @@ int main(int argc, char *argv[]) case 'b': guest_percpu_mem_size = parse_size(optarg); break; + case 't': + p.src_type = parse_backing_src_type(optarg); + break; case 'v': nr_vcpus = atoi(optarg); TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus, @@ -445,6 +484,11 @@ int main(int argc, char *argv[]) } } + if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR && + !backing_src_is_shared(p.src_type)) { + TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -t"); + } + for_each_guest_mode(run_test, &p); return 0; diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c index bb2752d78fe3..81edbd23d371 100644 --- a/tools/testing/selftests/kvm/dirty_log_test.c +++ b/tools/testing/selftests/kvm/dirty_log_test.c @@ -17,6 +17,7 @@ #include <linux/bitmap.h> #include <linux/bitops.h> #include <asm/barrier.h> +#include <linux/atomic.h> #include "kvm_util.h" #include "test_util.h" @@ -137,12 +138,20 @@ static uint64_t host_clear_count; static uint64_t host_track_next_count; /* Whether dirty ring reset is requested, or finished */ -static sem_t dirty_ring_vcpu_stop; -static sem_t dirty_ring_vcpu_cont; +static sem_t sem_vcpu_stop; +static sem_t sem_vcpu_cont; +/* + * This is only set by main thread, and only cleared by vcpu thread. It is + * used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC + * is the only place that we'll guarantee both "dirty bit" and "dirty data" + * will match. E.g., SIG_IPI won't guarantee that if the vcpu is interrupted + * after setting dirty bit but before the data is written. + */ +static atomic_t vcpu_sync_stop_requested; /* * This is updated by the vcpu thread to tell the host whether it's a * ring-full event. It should only be read until a sem_wait() of - * dirty_ring_vcpu_stop and before vcpu continues to run. + * sem_vcpu_stop and before vcpu continues to run. */ static bool dirty_ring_vcpu_ring_full; /* @@ -234,6 +243,17 @@ static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot, kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages); } +/* Should only be called after a GUEST_SYNC */ +static void vcpu_handle_sync_stop(void) +{ + if (atomic_read(&vcpu_sync_stop_requested)) { + /* It means main thread is sleeping waiting */ + atomic_set(&vcpu_sync_stop_requested, false); + sem_post(&sem_vcpu_stop); + sem_wait_until(&sem_vcpu_cont); + } +} + static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err) { struct kvm_run *run = vcpu_state(vm, VCPU_ID); @@ -244,6 +264,8 @@ static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err) TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC, "Invalid guest sync status: exit_reason=%s\n", exit_reason_str(run->exit_reason)); + + vcpu_handle_sync_stop(); } static bool dirty_ring_supported(void) @@ -301,13 +323,13 @@ static void dirty_ring_wait_vcpu(void) { /* This makes sure that hardware PML cache flushed */ vcpu_kick(); - sem_wait_until(&dirty_ring_vcpu_stop); + sem_wait_until(&sem_vcpu_stop); } static void dirty_ring_continue_vcpu(void) { pr_info("Notifying vcpu to continue\n"); - sem_post(&dirty_ring_vcpu_cont); + sem_post(&sem_vcpu_cont); } static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot, @@ -361,11 +383,11 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err) /* Update the flag first before pause */ WRITE_ONCE(dirty_ring_vcpu_ring_full, run->exit_reason == KVM_EXIT_DIRTY_RING_FULL); - sem_post(&dirty_ring_vcpu_stop); + sem_post(&sem_vcpu_stop); pr_info("vcpu stops because %s...\n", dirty_ring_vcpu_ring_full ? "dirty ring is full" : "vcpu is kicked out"); - sem_wait_until(&dirty_ring_vcpu_cont); + sem_wait_until(&sem_vcpu_cont); pr_info("vcpu continues now.\n"); } else { TEST_ASSERT(false, "Invalid guest sync status: " @@ -377,7 +399,7 @@ static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err) static void dirty_ring_before_vcpu_join(void) { /* Kick another round of vcpu just to make sure it will quit */ - sem_post(&dirty_ring_vcpu_cont); + sem_post(&sem_vcpu_cont); } struct log_mode { @@ -505,9 +527,8 @@ static void *vcpu_worker(void *data) */ sigmask->len = 8; pthread_sigmask(0, NULL, sigset); + sigdelset(sigset, SIG_IPI); vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask); - sigaddset(sigset, SIG_IPI); - pthread_sigmask(SIG_BLOCK, sigset, NULL); sigemptyset(sigset); sigaddset(sigset, SIG_IPI); @@ -768,7 +789,25 @@ static void run_test(enum vm_guest_mode mode, void *arg) usleep(p->interval * 1000); log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX, bmap, host_num_pages); + + /* + * See vcpu_sync_stop_requested definition for details on why + * we need to stop vcpu when verify data. + */ + atomic_set(&vcpu_sync_stop_requested, true); + sem_wait_until(&sem_vcpu_stop); + /* + * NOTE: for dirty ring, it's possible that we didn't stop at + * GUEST_SYNC but instead we stopped because ring is full; + * that's okay too because ring full means we're only missing + * the flush of the last page, and since we handle the last + * page specially verification will succeed anyway. + */ + assert(host_log_mode == LOG_MODE_DIRTY_RING || + atomic_read(&vcpu_sync_stop_requested) == false); vm_dirty_log_verify(mode, bmap); + sem_post(&sem_vcpu_cont); + iteration++; sync_global_to_guest(vm, iteration); } @@ -818,9 +857,10 @@ int main(int argc, char *argv[]) .interval = TEST_HOST_LOOP_INTERVAL, }; int opt, i; + sigset_t sigset; - sem_init(&dirty_ring_vcpu_stop, 0, 0); - sem_init(&dirty_ring_vcpu_cont, 0, 0); + sem_init(&sem_vcpu_stop, 0, 0); + sem_init(&sem_vcpu_cont, 0, 0); guest_modes_append_default(); @@ -876,6 +916,11 @@ int main(int argc, char *argv[]) srandom(time(0)); + /* Ensure that vCPU threads start with SIG_IPI blocked. */ + sigemptyset(&sigset); + sigaddset(&sigset, SIG_IPI); + pthread_sigmask(SIG_BLOCK, &sigset, NULL); + if (host_log_mode_option == LOG_MODE_ALL) { /* Run each log mode */ for (i = 0; i < LOG_MODE_NUM; i++) { diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c index 5aadf84c91c0..4b8db3bce610 100644 --- a/tools/testing/selftests/kvm/hardware_disable_test.c +++ b/tools/testing/selftests/kvm/hardware_disable_test.c @@ -132,6 +132,36 @@ static void run_test(uint32_t run) TEST_ASSERT(false, "%s: [%d] child escaped the ninja\n", __func__, run); } +void wait_for_child_setup(pid_t pid) +{ + /* + * Wait for the child to post to the semaphore, but wake up periodically + * to check if the child exited prematurely. + */ + for (;;) { + const struct timespec wait_period = { .tv_sec = 1 }; + int status; + + if (!sem_timedwait(sem, &wait_period)) + return; + + /* Child is still running, keep waiting. */ + if (pid != waitpid(pid, &status, WNOHANG)) + continue; + + /* + * Child is no longer running, which is not expected. + * + * If it exited with a non-zero status, we explicitly forward + * the child's status in case it exited with KSFT_SKIP. + */ + if (WIFEXITED(status)) + exit(WEXITSTATUS(status)); + else + TEST_ASSERT(false, "Child exited unexpectedly"); + } +} + int main(int argc, char **argv) { uint32_t i; @@ -148,7 +178,7 @@ int main(int argc, char **argv) run_test(i); /* This function always exits */ pr_debug("%s: [%d] waiting semaphore\n", __func__, i); - sem_wait(sem); + wait_for_child_setup(pid); r = (rand() % DELAY_US_MAX) + 1; pr_debug("%s: [%d] waiting %dus\n", __func__, i, r); usleep(r); diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 0f4258eaa629..fcd8e3855111 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -69,9 +69,6 @@ enum vm_guest_mode { #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT) #define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE) -#define vm_guest_mode_string(m) vm_guest_mode_string[m] -extern const char * const vm_guest_mode_string[]; - struct vm_guest_mode_params { unsigned int pa_bits; unsigned int va_bits; @@ -80,11 +77,13 @@ struct vm_guest_mode_params { }; extern const struct vm_guest_mode_params vm_guest_mode_params[]; +int open_kvm_dev_path_or_exit(void); int kvm_check_cap(long cap); int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap); int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, struct kvm_enable_cap *cap); void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size); +const char *vm_guest_mode_string(uint32_t i); struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm); void kvm_vm_free(struct kvm_vm *vmp); @@ -148,6 +147,7 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); +void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); /* * Address Guest Virtual to Guest Physical @@ -225,6 +225,15 @@ int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid, #endif void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid); +int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr); +int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr); +int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd); +int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test); +int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, + void *val, bool write); +int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, + void *val, bool write); + const char *exit_reason_str(unsigned int exit_reason); void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot); @@ -295,7 +304,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm); unsigned int vm_get_page_size(struct kvm_vm *vm); unsigned int vm_get_page_shift(struct kvm_vm *vm); -unsigned int vm_get_max_gfn(struct kvm_vm *vm); +uint64_t vm_get_max_gfn(struct kvm_vm *vm); int vm_get_fd(struct kvm_vm *vm); unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size); diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index b7f41399f22c..d79be15dd3d2 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -17,6 +17,7 @@ #include <errno.h> #include <unistd.h> #include <fcntl.h> +#include <sys/mman.h> #include "kselftest.h" static inline int _no_printf(const char *format, ...) { return 0; } @@ -71,14 +72,44 @@ enum vm_mem_backing_src_type { VM_MEM_SRC_ANONYMOUS, VM_MEM_SRC_ANONYMOUS_THP, VM_MEM_SRC_ANONYMOUS_HUGETLB, + VM_MEM_SRC_ANONYMOUS_HUGETLB_16KB, + VM_MEM_SRC_ANONYMOUS_HUGETLB_64KB, + VM_MEM_SRC_ANONYMOUS_HUGETLB_512KB, + VM_MEM_SRC_ANONYMOUS_HUGETLB_1MB, + VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB, + VM_MEM_SRC_ANONYMOUS_HUGETLB_8MB, + VM_MEM_SRC_ANONYMOUS_HUGETLB_16MB, + VM_MEM_SRC_ANONYMOUS_HUGETLB_32MB, + VM_MEM_SRC_ANONYMOUS_HUGETLB_256MB, + VM_MEM_SRC_ANONYMOUS_HUGETLB_512MB, + VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB, + VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB, + VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB, + VM_MEM_SRC_SHMEM, + VM_MEM_SRC_SHARED_HUGETLB, + NUM_SRC_TYPES, }; struct vm_mem_backing_src_alias { const char *name; - enum vm_mem_backing_src_type type; + uint32_t flag; }; +bool thp_configured(void); +size_t get_trans_hugepagesz(void); +size_t get_def_hugetlb_pagesz(void); +const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i); +size_t get_backing_src_pagesz(uint32_t i); void backing_src_help(void); enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name); +/* + * Whether or not the given source type is shared memory (as opposed to + * anonymous). + */ +static inline bool backing_src_is_shared(enum vm_mem_backing_src_type t) +{ + return vm_mem_backing_src_alias(t)->flag & MAP_SHARED; +} + #endif /* SELFTEST_KVM_TEST_UTIL_H */ diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c new file mode 100644 index 000000000000..1c4753fff19e --- /dev/null +++ b/tools/testing/selftests/kvm/kvm_page_table_test.c @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KVM page table test + * + * Copyright (C) 2021, Huawei, Inc. + * + * Make sure that THP has been enabled or enough HUGETLB pages with specific + * page size have been pre-allocated on your system, if you are planning to + * use hugepages to back the guest memory for testing. + */ + +#define _GNU_SOURCE /* for program_invocation_name */ + +#include <stdio.h> +#include <stdlib.h> +#include <time.h> +#include <pthread.h> +#include <semaphore.h> + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" +#include "guest_modes.h" + +#define TEST_MEM_SLOT_INDEX 1 + +/* Default size(1GB) of the memory for testing */ +#define DEFAULT_TEST_MEM_SIZE (1 << 30) + +/* Default guest test virtual memory offset */ +#define DEFAULT_GUEST_TEST_MEM 0xc0000000 + +/* Different guest memory accessing stages */ +enum test_stage { + KVM_BEFORE_MAPPINGS, + KVM_CREATE_MAPPINGS, + KVM_UPDATE_MAPPINGS, + KVM_ADJUST_MAPPINGS, + NUM_TEST_STAGES, +}; + +static const char * const test_stage_string[] = { + "KVM_BEFORE_MAPPINGS", + "KVM_CREATE_MAPPINGS", + "KVM_UPDATE_MAPPINGS", + "KVM_ADJUST_MAPPINGS", +}; + +struct vcpu_args { + int vcpu_id; + bool vcpu_write; +}; + +struct test_args { + struct kvm_vm *vm; + uint64_t guest_test_virt_mem; + uint64_t host_page_size; + uint64_t host_num_pages; + uint64_t large_page_size; + uint64_t large_num_pages; + uint64_t host_pages_per_lpage; + enum vm_mem_backing_src_type src_type; + struct vcpu_args vcpu_args[KVM_MAX_VCPUS]; +}; + +/* + * Guest variables. Use addr_gva2hva() if these variables need + * to be changed in host. + */ +static enum test_stage guest_test_stage; + +/* Host variables */ +static uint32_t nr_vcpus = 1; +static struct test_args test_args; +static enum test_stage *current_stage; +static bool host_quit; + +/* Whether the test stage is updated, or completed */ +static sem_t test_stage_updated; +static sem_t test_stage_completed; + +/* + * Guest physical memory offset of the testing memory slot. + * This will be set to the topmost valid physical address minus + * the test memory size. + */ +static uint64_t guest_test_phys_mem; + +/* + * Guest virtual memory offset of the testing memory slot. + * Must not conflict with identity mapped test code. + */ +static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; + +static void guest_code(int vcpu_id) +{ + struct test_args *p = &test_args; + struct vcpu_args *vcpu_args = &p->vcpu_args[vcpu_id]; + enum test_stage *current_stage = &guest_test_stage; + uint64_t addr; + int i, j; + + /* Make sure vCPU args data structure is not corrupt */ + GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id); + + while (true) { + addr = p->guest_test_virt_mem; + + switch (READ_ONCE(*current_stage)) { + /* + * All vCPU threads will be started in this stage, + * where guest code of each vCPU will do nothing. + */ + case KVM_BEFORE_MAPPINGS: + break; + + /* + * Before dirty logging, vCPUs concurrently access the first + * 8 bytes of each page (host page/large page) within the same + * memory region with different accessing types (read/write). + * Then KVM will create normal page mappings or huge block + * mappings for them. + */ + case KVM_CREATE_MAPPINGS: + for (i = 0; i < p->large_num_pages; i++) { + if (vcpu_args->vcpu_write) + *(uint64_t *)addr = 0x0123456789ABCDEF; + else + READ_ONCE(*(uint64_t *)addr); + + addr += p->large_page_size; + } + break; + + /* + * During dirty logging, KVM will only update attributes of the + * normal page mappings from RO to RW if memory backing src type + * is anonymous. In other cases, KVM will split the huge block + * mappings into normal page mappings if memory backing src type + * is THP or HUGETLB. + */ + case KVM_UPDATE_MAPPINGS: + if (p->src_type == VM_MEM_SRC_ANONYMOUS) { + for (i = 0; i < p->host_num_pages; i++) { + *(uint64_t *)addr = 0x0123456789ABCDEF; + addr += p->host_page_size; + } + break; + } + + for (i = 0; i < p->large_num_pages; i++) { + /* + * Write to the first host page in each large + * page region, and triger break of large pages. + */ + *(uint64_t *)addr = 0x0123456789ABCDEF; + + /* + * Access the middle host pages in each large + * page region. Since dirty logging is enabled, + * this will create new mappings at the smallest + * granularity. + */ + addr += p->large_page_size / 2; + for (j = 0; j < p->host_pages_per_lpage / 2; j++) { + READ_ONCE(*(uint64_t *)addr); + addr += p->host_page_size; + } + } + break; + + /* + * After dirty logging is stopped, vCPUs concurrently read + * from every single host page. Then KVM will coalesce the + * split page mappings back to block mappings. And a TLB + * conflict abort could occur here if TLB entries of the + * page mappings are not fully invalidated. + */ + case KVM_ADJUST_MAPPINGS: + for (i = 0; i < p->host_num_pages; i++) { + READ_ONCE(*(uint64_t *)addr); + addr += p->host_page_size; + } + break; + + default: + GUEST_ASSERT(0); + } + + GUEST_SYNC(1); + } +} + +static void *vcpu_worker(void *data) +{ + int ret; + struct vcpu_args *vcpu_args = data; + struct kvm_vm *vm = test_args.vm; + int vcpu_id = vcpu_args->vcpu_id; + struct kvm_run *run; + struct timespec start; + struct timespec ts_diff; + enum test_stage stage; + + vcpu_args_set(vm, vcpu_id, 1, vcpu_id); + run = vcpu_state(vm, vcpu_id); + + while (!READ_ONCE(host_quit)) { + ret = sem_wait(&test_stage_updated); + TEST_ASSERT(ret == 0, "Error in sem_wait"); + + if (READ_ONCE(host_quit)) + return NULL; + + clock_gettime(CLOCK_MONOTONIC_RAW, &start); + ret = _vcpu_run(vm, vcpu_id); + ts_diff = timespec_elapsed(start); + + TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret); + TEST_ASSERT(get_ucall(vm, vcpu_id, NULL) == UCALL_SYNC, + "Invalid guest sync status: exit_reason=%s\n", + exit_reason_str(run->exit_reason)); + + pr_debug("Got sync event from vCPU %d\n", vcpu_id); + stage = READ_ONCE(*current_stage); + + /* + * Here we can know the execution time of every + * single vcpu running in different test stages. + */ + pr_debug("vCPU %d has completed stage %s\n" + "execution time is: %ld.%.9lds\n\n", + vcpu_id, test_stage_string[stage], + ts_diff.tv_sec, ts_diff.tv_nsec); + + ret = sem_post(&test_stage_completed); + TEST_ASSERT(ret == 0, "Error in sem_post"); + } + + return NULL; +} + +struct test_params { + uint64_t phys_offset; + uint64_t test_mem_size; + enum vm_mem_backing_src_type src_type; +}; + +static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) +{ + int ret; + struct test_params *p = arg; + struct vcpu_args *vcpu_args; + enum vm_mem_backing_src_type src_type = p->src_type; + uint64_t large_page_size = get_backing_src_pagesz(src_type); + uint64_t guest_page_size = vm_guest_mode_params[mode].page_size; + uint64_t host_page_size = getpagesize(); + uint64_t test_mem_size = p->test_mem_size; + uint64_t guest_num_pages; + uint64_t alignment; + void *host_test_mem; + struct kvm_vm *vm; + int vcpu_id; + + /* Align up the test memory size */ + alignment = max(large_page_size, guest_page_size); + test_mem_size = (test_mem_size + alignment - 1) & ~(alignment - 1); + + /* Create a VM with enough guest pages */ + guest_num_pages = test_mem_size / guest_page_size; + vm = vm_create_with_vcpus(mode, nr_vcpus, + guest_num_pages, 0, guest_code, NULL); + + /* Align down GPA of the testing memslot */ + if (!p->phys_offset) + guest_test_phys_mem = (vm_get_max_gfn(vm) - guest_num_pages) * + guest_page_size; + else + guest_test_phys_mem = p->phys_offset; +#ifdef __s390x__ + alignment = max(0x100000, alignment); +#endif + guest_test_phys_mem &= ~(alignment - 1); + + /* Set up the shared data structure test_args */ + test_args.vm = vm; + test_args.guest_test_virt_mem = guest_test_virt_mem; + test_args.host_page_size = host_page_size; + test_args.host_num_pages = test_mem_size / host_page_size; + test_args.large_page_size = large_page_size; + test_args.large_num_pages = test_mem_size / large_page_size; + test_args.host_pages_per_lpage = large_page_size / host_page_size; + test_args.src_type = src_type; + + for (vcpu_id = 0; vcpu_id < KVM_MAX_VCPUS; vcpu_id++) { + vcpu_args = &test_args.vcpu_args[vcpu_id]; + vcpu_args->vcpu_id = vcpu_id; + vcpu_args->vcpu_write = !(vcpu_id % 2); + } + + /* Add an extra memory slot with specified backing src type */ + vm_userspace_mem_region_add(vm, src_type, guest_test_phys_mem, + TEST_MEM_SLOT_INDEX, guest_num_pages, 0); + + /* Do mapping(GVA->GPA) for the testing memory slot */ + virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages, 0); + + /* Cache the HVA pointer of the region */ + host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem); + + /* Export shared structure test_args to guest */ + ucall_init(vm, NULL); + sync_global_to_guest(vm, test_args); + + ret = sem_init(&test_stage_updated, 0, 0); + TEST_ASSERT(ret == 0, "Error in sem_init"); + + ret = sem_init(&test_stage_completed, 0, 0); + TEST_ASSERT(ret == 0, "Error in sem_init"); + + current_stage = addr_gva2hva(vm, (vm_vaddr_t)(&guest_test_stage)); + *current_stage = NUM_TEST_STAGES; + + pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); + pr_info("Testing memory backing src type: %s\n", + vm_mem_backing_src_alias(src_type)->name); + pr_info("Testing memory backing src granularity: 0x%lx\n", + large_page_size); + pr_info("Testing memory size(aligned): 0x%lx\n", test_mem_size); + pr_info("Guest physical test memory offset: 0x%lx\n", + guest_test_phys_mem); + pr_info("Host virtual test memory offset: 0x%lx\n", + (uint64_t)host_test_mem); + pr_info("Number of testing vCPUs: %d\n", nr_vcpus); + + return vm; +} + +static void vcpus_complete_new_stage(enum test_stage stage) +{ + int ret; + int vcpus; + + /* Wake up all the vcpus to run new test stage */ + for (vcpus = 0; vcpus < nr_vcpus; vcpus++) { + ret = sem_post(&test_stage_updated); + TEST_ASSERT(ret == 0, "Error in sem_post"); + } + pr_debug("All vcpus have been notified to continue\n"); + + /* Wait for all the vcpus to complete new test stage */ + for (vcpus = 0; vcpus < nr_vcpus; vcpus++) { + ret = sem_wait(&test_stage_completed); + TEST_ASSERT(ret == 0, "Error in sem_wait"); + + pr_debug("%d vcpus have completed stage %s\n", + vcpus + 1, test_stage_string[stage]); + } + + pr_debug("All vcpus have completed stage %s\n", + test_stage_string[stage]); +} + +static void run_test(enum vm_guest_mode mode, void *arg) +{ + int ret; + pthread_t *vcpu_threads; + struct kvm_vm *vm; + int vcpu_id; + struct timespec start; + struct timespec ts_diff; + + /* Create VM with vCPUs and make some pre-initialization */ + vm = pre_init_before_test(mode, arg); + + vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); + TEST_ASSERT(vcpu_threads, "Memory allocation failed"); + + host_quit = false; + *current_stage = KVM_BEFORE_MAPPINGS; + + for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { + pthread_create(&vcpu_threads[vcpu_id], NULL, vcpu_worker, + &test_args.vcpu_args[vcpu_id]); + } + + vcpus_complete_new_stage(*current_stage); + pr_info("Started all vCPUs successfully\n"); + + /* Test the stage of KVM creating mappings */ + *current_stage = KVM_CREATE_MAPPINGS; + + clock_gettime(CLOCK_MONOTONIC_RAW, &start); + vcpus_complete_new_stage(*current_stage); + ts_diff = timespec_elapsed(start); + + pr_info("KVM_CREATE_MAPPINGS: total execution time: %ld.%.9lds\n\n", + ts_diff.tv_sec, ts_diff.tv_nsec); + + /* Test the stage of KVM updating mappings */ + vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX, + KVM_MEM_LOG_DIRTY_PAGES); + + *current_stage = KVM_UPDATE_MAPPINGS; + + clock_gettime(CLOCK_MONOTONIC_RAW, &start); + vcpus_complete_new_stage(*current_stage); + ts_diff = timespec_elapsed(start); + + pr_info("KVM_UPDATE_MAPPINGS: total execution time: %ld.%.9lds\n\n", + ts_diff.tv_sec, ts_diff.tv_nsec); + + /* Test the stage of KVM adjusting mappings */ + vm_mem_region_set_flags(vm, TEST_MEM_SLOT_INDEX, 0); + + *current_stage = KVM_ADJUST_MAPPINGS; + + clock_gettime(CLOCK_MONOTONIC_RAW, &start); + vcpus_complete_new_stage(*current_stage); + ts_diff = timespec_elapsed(start); + + pr_info("KVM_ADJUST_MAPPINGS: total execution time: %ld.%.9lds\n\n", + ts_diff.tv_sec, ts_diff.tv_nsec); + + /* Tell the vcpu thread to quit */ + host_quit = true; + for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { + ret = sem_post(&test_stage_updated); + TEST_ASSERT(ret == 0, "Error in sem_post"); + } + + for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) + pthread_join(vcpu_threads[vcpu_id], NULL); + + ret = sem_destroy(&test_stage_updated); + TEST_ASSERT(ret == 0, "Error in sem_destroy"); + + ret = sem_destroy(&test_stage_completed); + TEST_ASSERT(ret == 0, "Error in sem_destroy"); + + free(vcpu_threads); + ucall_uninit(vm); + kvm_vm_free(vm); +} + +static void help(char *name) +{ + puts(""); + printf("usage: %s [-h] [-p offset] [-m mode] " + "[-b mem-size] [-v vcpus] [-s mem-type]\n", name); + puts(""); + printf(" -p: specify guest physical test memory offset\n" + " Warning: a low offset can conflict with the loaded test code.\n"); + guest_modes_help(); + printf(" -b: specify size of the memory region for testing. e.g. 10M or 3G.\n" + " (default: 1G)\n"); + printf(" -v: specify the number of vCPUs to run\n" + " (default: 1)\n"); + printf(" -s: specify the type of memory that should be used to\n" + " back the guest data region.\n" + " (default: anonymous)\n\n"); + backing_src_help(); + puts(""); +} + +int main(int argc, char *argv[]) +{ + int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); + struct test_params p = { + .test_mem_size = DEFAULT_TEST_MEM_SIZE, + .src_type = VM_MEM_SRC_ANONYMOUS, + }; + int opt; + + guest_modes_append_default(); + + while ((opt = getopt(argc, argv, "hp:m:b:v:s:")) != -1) { + switch (opt) { + case 'p': + p.phys_offset = strtoull(optarg, NULL, 0); + break; + case 'm': + guest_modes_cmdline(optarg); + break; + case 'b': + p.test_mem_size = parse_size(optarg); + break; + case 'v': + nr_vcpus = atoi(optarg); + TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus, + "Invalid number of vcpus, must be between 1 and %d", max_vcpus); + break; + case 's': + p.src_type = parse_backing_src_type(optarg); + break; + case 'h': + default: + help(argv[0]); + exit(0); + } + } + + for_each_guest_mode(run_test, &p); + + return 0; +} diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c index 5ebbd0d6b472..71ade6100fd3 100644 --- a/tools/testing/selftests/kvm/lib/assert.c +++ b/tools/testing/selftests/kvm/lib/assert.c @@ -71,9 +71,9 @@ test_assert(bool exp, const char *exp_str, fprintf(stderr, "==== Test Assertion Failure ====\n" " %s:%u: %s\n" - " pid=%d tid=%d - %s\n", + " pid=%d tid=%d errno=%d - %s\n", file, line, exp_str, getpid(), _gettid(), - strerror(errno)); + errno, strerror(errno)); test_dump_stack(); if (fmt) { fputs(" ", stderr); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index b8849a1aca79..28e528c19d28 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -18,7 +18,6 @@ #include <unistd.h> #include <linux/kernel.h> -#define KVM_UTIL_PGS_PER_HUGEPG 512 #define KVM_UTIL_MIN_PFN 2 static int vcpu_mmap_sz(void); @@ -33,6 +32,34 @@ static void *align(void *x, size_t size) } /* + * Open KVM_DEV_PATH if available, otherwise exit the entire program. + * + * Input Args: + * flags - The flags to pass when opening KVM_DEV_PATH. + * + * Return: + * The opened file descriptor of /dev/kvm. + */ +static int _open_kvm_dev_path_or_exit(int flags) +{ + int fd; + + fd = open(KVM_DEV_PATH, flags); + if (fd < 0) { + print_skip("%s not available, is KVM loaded? (errno: %d)", + KVM_DEV_PATH, errno); + exit(KSFT_SKIP); + } + + return fd; +} + +int open_kvm_dev_path_or_exit(void) +{ + return _open_kvm_dev_path_or_exit(O_RDONLY); +} + +/* * Capability * * Input Args: @@ -53,10 +80,7 @@ int kvm_check_cap(long cap) int ret; int kvm_fd; - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); - + kvm_fd = open_kvm_dev_path_or_exit(); ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" " rc: %i errno: %i", ret, errno); @@ -129,9 +153,7 @@ void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) static void vm_open(struct kvm_vm *vm, int perm) { - vm->kvm_fd = open(KVM_DEV_PATH, perm); - if (vm->kvm_fd < 0) - exit(KSFT_SKIP); + vm->kvm_fd = _open_kvm_dev_path_or_exit(perm); if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) { print_skip("immediate_exit not available"); @@ -143,17 +165,24 @@ static void vm_open(struct kvm_vm *vm, int perm) "rc: %i errno: %i", vm->fd, errno); } -const char * const vm_guest_mode_string[] = { - "PA-bits:52, VA-bits:48, 4K pages", - "PA-bits:52, VA-bits:48, 64K pages", - "PA-bits:48, VA-bits:48, 4K pages", - "PA-bits:48, VA-bits:48, 64K pages", - "PA-bits:40, VA-bits:48, 4K pages", - "PA-bits:40, VA-bits:48, 64K pages", - "PA-bits:ANY, VA-bits:48, 4K pages", -}; -_Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES, - "Missing new mode strings?"); +const char *vm_guest_mode_string(uint32_t i) +{ + static const char * const strings[] = { + [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", + [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages", + [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages", + [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages", + [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages", + [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages", + [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages", + }; + _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES, + "Missing new mode strings?"); + + TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i); + + return strings[i]; +} const struct vm_guest_mode_params vm_guest_mode_params[] = { { 52, 48, 0x1000, 12 }, @@ -197,7 +226,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) TEST_ASSERT(vm != NULL, "Insufficient Memory"); INIT_LIST_HEAD(&vm->vcpus); - INIT_LIST_HEAD(&vm->userspace_mem_regions); + vm->regions.gpa_tree = RB_ROOT; + vm->regions.hva_tree = RB_ROOT; + hash_init(vm->regions.slot_hash); vm->mode = mode; vm->type = 0; @@ -289,7 +320,7 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, */ uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus; uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2; - uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages; + uint64_t pages = DEFAULT_GUEST_PHY_PAGES + extra_mem_pages + vcpu_pages + extra_pg_pages; struct kvm_vm *vm; int i; @@ -349,13 +380,14 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages, */ void kvm_vm_restart(struct kvm_vm *vmp, int perm) { + int ctr; struct userspace_mem_region *region; vm_open(vmp, perm); if (vmp->has_irqchip) vm_create_irqchip(vmp); - list_for_each_entry(region, &vmp->userspace_mem_regions, list) { + hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" " rc: %i errno: %i\n" @@ -418,14 +450,21 @@ uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) static struct userspace_mem_region * userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) { - struct userspace_mem_region *region; + struct rb_node *node; - list_for_each_entry(region, &vm->userspace_mem_regions, list) { + for (node = vm->regions.gpa_tree.rb_node; node; ) { + struct userspace_mem_region *region = + container_of(node, struct userspace_mem_region, gpa_node); uint64_t existing_start = region->region.guest_phys_addr; uint64_t existing_end = region->region.guest_phys_addr + region->region.memory_size - 1; if (start <= existing_end && end >= existing_start) return region; + + if (start < existing_start) + node = node->rb_left; + else + node = node->rb_right; } return NULL; @@ -514,7 +553,7 @@ static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu) ret = munmap(vcpu->state, vcpu_mmap_sz()); TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i " "errno: %i", ret, errno); - close(vcpu->fd); + ret = close(vcpu->fd); TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i " "errno: %i", ret, errno); @@ -534,17 +573,22 @@ void kvm_vm_release(struct kvm_vm *vmp) TEST_ASSERT(ret == 0, "Close of vm fd failed,\n" " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno); - close(vmp->kvm_fd); + ret = close(vmp->kvm_fd); TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n" " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno); } static void __vm_mem_region_delete(struct kvm_vm *vm, - struct userspace_mem_region *region) + struct userspace_mem_region *region, + bool unlink) { int ret; - list_del(®ion->list); + if (unlink) { + rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); + rb_erase(®ion->hva_node, &vm->regions.hva_tree); + hash_del(®ion->slot_node); + } region->region.memory_size = 0; ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); @@ -563,14 +607,16 @@ static void __vm_mem_region_delete(struct kvm_vm *vm, */ void kvm_vm_free(struct kvm_vm *vmp) { - struct userspace_mem_region *region, *tmp; + int ctr; + struct hlist_node *node; + struct userspace_mem_region *region; if (vmp == NULL) return; /* Free userspace_mem_regions. */ - list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list) - __vm_mem_region_delete(vmp, region); + hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) + __vm_mem_region_delete(vmp, region, false); /* Free sparsebit arrays. */ sparsebit_free(&vmp->vpages_valid); @@ -652,13 +698,64 @@ int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) return 0; } +static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree, + struct userspace_mem_region *region) +{ + struct rb_node **cur, *parent; + + for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) { + struct userspace_mem_region *cregion; + + cregion = container_of(*cur, typeof(*cregion), gpa_node); + parent = *cur; + if (region->region.guest_phys_addr < + cregion->region.guest_phys_addr) + cur = &(*cur)->rb_left; + else { + TEST_ASSERT(region->region.guest_phys_addr != + cregion->region.guest_phys_addr, + "Duplicate GPA in region tree"); + + cur = &(*cur)->rb_right; + } + } + + rb_link_node(®ion->gpa_node, parent, cur); + rb_insert_color(®ion->gpa_node, gpa_tree); +} + +static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree, + struct userspace_mem_region *region) +{ + struct rb_node **cur, *parent; + + for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) { + struct userspace_mem_region *cregion; + + cregion = container_of(*cur, typeof(*cregion), hva_node); + parent = *cur; + if (region->host_mem < cregion->host_mem) + cur = &(*cur)->rb_left; + else { + TEST_ASSERT(region->host_mem != + cregion->host_mem, + "Duplicate HVA in region tree"); + + cur = &(*cur)->rb_right; + } + } + + rb_link_node(®ion->hva_node, parent, cur); + rb_insert_color(®ion->hva_node, hva_tree); +} + /* * VM Userspace Memory Region Add * * Input Args: * vm - Virtual Machine - * backing_src - Storage source for this region. - * NULL to use anonymous memory. + * src_type - Storage source for this region. + * NULL to use anonymous memory. * guest_paddr - Starting guest physical address * slot - KVM region slot * npages - Number of physical pages @@ -681,7 +778,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, { int ret; struct userspace_mem_region *region; - size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size; + size_t backing_src_pagesz = get_backing_src_pagesz(src_type); size_t alignment; TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, @@ -716,7 +813,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, (uint64_t) region->region.memory_size); /* Confirm no region with the requested slot already exists. */ - list_for_each_entry(region, &vm->userspace_mem_regions, list) { + hash_for_each_possible(vm->regions.slot_hash, region, slot_node, + slot) { if (region->region.slot != slot) continue; @@ -743,17 +841,36 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, #endif if (src_type == VM_MEM_SRC_ANONYMOUS_THP) - alignment = max(huge_page_size, alignment); + alignment = max(backing_src_pagesz, alignment); /* Add enough memory to align up if necessary */ if (alignment > 1) region->mmap_size += alignment; + region->fd = -1; + if (backing_src_is_shared(src_type)) { + int memfd_flags = MFD_CLOEXEC; + + if (src_type == VM_MEM_SRC_SHARED_HUGETLB) + memfd_flags |= MFD_HUGETLB; + + region->fd = memfd_create("kvm_selftest", memfd_flags); + TEST_ASSERT(region->fd != -1, + "memfd_create failed, errno: %i", errno); + + ret = ftruncate(region->fd, region->mmap_size); + TEST_ASSERT(ret == 0, "ftruncate failed, errno: %i", errno); + + ret = fallocate(region->fd, + FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, + region->mmap_size); + TEST_ASSERT(ret == 0, "fallocate failed, errno: %i", errno); + } + region->mmap_start = mmap(NULL, region->mmap_size, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS - | (src_type == VM_MEM_SRC_ANONYMOUS_HUGETLB ? MAP_HUGETLB : 0), - -1, 0); + vm_mem_backing_src_alias(src_type)->flag, + region->fd, 0); TEST_ASSERT(region->mmap_start != MAP_FAILED, "test_malloc failed, mmap_start: %p errno: %i", region->mmap_start, errno); @@ -762,22 +879,13 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, region->host_mem = align(region->mmap_start, alignment); /* As needed perform madvise */ - if (src_type == VM_MEM_SRC_ANONYMOUS || src_type == VM_MEM_SRC_ANONYMOUS_THP) { - struct stat statbuf; - - ret = stat("/sys/kernel/mm/transparent_hugepage", &statbuf); - TEST_ASSERT(ret == 0 || (ret == -1 && errno == ENOENT), - "stat /sys/kernel/mm/transparent_hugepage"); - - TEST_ASSERT(ret == 0 || src_type != VM_MEM_SRC_ANONYMOUS_THP, - "VM_MEM_SRC_ANONYMOUS_THP requires THP to be configured in the host kernel"); - - if (ret == 0) { - ret = madvise(region->host_mem, npages * vm->page_size, - src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE); - TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %x", - region->host_mem, npages * vm->page_size, src_type); - } + if ((src_type == VM_MEM_SRC_ANONYMOUS || + src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) { + ret = madvise(region->host_mem, npages * vm->page_size, + src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE); + TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s", + region->host_mem, npages * vm->page_size, + vm_mem_backing_src_alias(src_type)->name); } region->unused_phy_pages = sparsebit_alloc(); @@ -796,8 +904,23 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, ret, errno, slot, flags, guest_paddr, (uint64_t) region->region.memory_size); - /* Add to linked-list of memory regions. */ - list_add(®ion->list, &vm->userspace_mem_regions); + /* Add to quick lookup data structures */ + vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); + vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); + hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); + + /* If shared memory, create an alias. */ + if (region->fd >= 0) { + region->mmap_alias = mmap(NULL, region->mmap_size, + PROT_READ | PROT_WRITE, + vm_mem_backing_src_alias(src_type)->flag, + region->fd, 0); + TEST_ASSERT(region->mmap_alias != MAP_FAILED, + "mmap of alias failed, errno: %i", errno); + + /* Align host alias address */ + region->host_alias = align(region->mmap_alias, alignment); + } } /* @@ -820,10 +943,10 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot) { struct userspace_mem_region *region; - list_for_each_entry(region, &vm->userspace_mem_regions, list) { + hash_for_each_possible(vm->regions.slot_hash, region, slot_node, + memslot) if (region->region.slot == memslot) return region; - } fprintf(stderr, "No mem region with the requested slot found,\n" " requested slot: %u\n", memslot); @@ -908,7 +1031,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) */ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) { - __vm_mem_region_delete(vm, memslot2region(vm, slot)); + __vm_mem_region_delete(vm, memslot2region(vm, slot), true); } /* @@ -928,9 +1051,7 @@ static int vcpu_mmap_sz(void) { int dev_fd, ret; - dev_fd = open(KVM_DEV_PATH, O_RDONLY); - if (dev_fd < 0) - exit(KSFT_SKIP); + dev_fd = open_kvm_dev_path_or_exit(); ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); TEST_ASSERT(ret >= sizeof(struct kvm_run), @@ -1102,6 +1223,9 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); virt_pgd_alloc(vm, pgd_memslot); + vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages, + KVM_UTIL_MIN_PFN * vm->page_size, + data_memslot); /* * Find an unused range of virtual page addresses of at least @@ -1111,11 +1235,7 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, /* Map the virtual pages. */ for (vm_vaddr_t vaddr = vaddr_start; pages > 0; - pages--, vaddr += vm->page_size) { - vm_paddr_t paddr; - - paddr = vm_phy_page_alloc(vm, - KVM_UTIL_MIN_PFN * vm->page_size, data_memslot); + pages--, vaddr += vm->page_size, paddr += vm->page_size) { virt_pg_map(vm, vaddr, paddr, pgd_memslot); @@ -1180,16 +1300,14 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) { struct userspace_mem_region *region; - list_for_each_entry(region, &vm->userspace_mem_regions, list) { - if ((gpa >= region->region.guest_phys_addr) - && (gpa <= (region->region.guest_phys_addr - + region->region.memory_size - 1))) - return (void *) ((uintptr_t) region->host_mem - + (gpa - region->region.guest_phys_addr)); + region = userspace_mem_region_find(vm, gpa, gpa); + if (!region) { + TEST_FAIL("No vm physical memory at 0x%lx", gpa); + return NULL; } - TEST_FAIL("No vm physical memory at 0x%lx", gpa); - return NULL; + return (void *)((uintptr_t)region->host_mem + + (gpa - region->region.guest_phys_addr)); } /* @@ -1211,15 +1329,22 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) */ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) { - struct userspace_mem_region *region; + struct rb_node *node; + + for (node = vm->regions.hva_tree.rb_node; node; ) { + struct userspace_mem_region *region = + container_of(node, struct userspace_mem_region, hva_node); + + if (hva >= region->host_mem) { + if (hva <= (region->host_mem + + region->region.memory_size - 1)) + return (vm_paddr_t)((uintptr_t) + region->region.guest_phys_addr + + (hva - (uintptr_t)region->host_mem)); - list_for_each_entry(region, &vm->userspace_mem_regions, list) { - if ((hva >= region->host_mem) - && (hva <= (region->host_mem - + region->region.memory_size - 1))) - return (vm_paddr_t) ((uintptr_t) - region->region.guest_phys_addr - + (hva - (uintptr_t) region->host_mem)); + node = node->rb_right; + } else + node = node->rb_left; } TEST_FAIL("No mapping to a guest physical address, hva: %p", hva); @@ -1227,6 +1352,42 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) } /* + * Address VM physical to Host Virtual *alias*. + * + * Input Args: + * vm - Virtual Machine + * gpa - VM physical address + * + * Output Args: None + * + * Return: + * Equivalent address within the host virtual *alias* area, or NULL + * (without failing the test) if the guest memory is not shared (so + * no alias exists). + * + * When vm_create() and related functions are called with a shared memory + * src_type, we also create a writable, shared alias mapping of the + * underlying guest memory. This allows the host to manipulate guest memory + * without mapping that memory in the guest's address space. And, for + * userfaultfd-based demand paging, we can do so without triggering userfaults. + */ +void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) +{ + struct userspace_mem_region *region; + uintptr_t offset; + + region = userspace_mem_region_find(vm, gpa, gpa); + if (!region) + return NULL; + + if (!region->host_alias) + return NULL; + + offset = gpa - region->region.guest_phys_addr; + return (void *) ((uintptr_t) region->host_alias + offset); +} + +/* * VM Create IRQ Chip * * Input Args: @@ -1734,6 +1895,81 @@ int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) } /* + * Device Ioctl + */ + +int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr) +{ + struct kvm_device_attr attribute = { + .group = group, + .attr = attr, + .flags = 0, + }; + + return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute); +} + +int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr) +{ + int ret = _kvm_device_check_attr(dev_fd, group, attr); + + TEST_ASSERT(ret >= 0, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno); + return ret; +} + +int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd) +{ + struct kvm_create_device create_dev; + int ret; + + create_dev.type = type; + create_dev.fd = -1; + create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0; + ret = ioctl(vm_get_fd(vm), KVM_CREATE_DEVICE, &create_dev); + *fd = create_dev.fd; + return ret; +} + +int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test) +{ + int fd, ret; + + ret = _kvm_create_device(vm, type, test, &fd); + + if (!test) { + TEST_ASSERT(ret >= 0, + "KVM_CREATE_DEVICE IOCTL failed, rc: %i errno: %i", ret, errno); + return fd; + } + return ret; +} + +int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, + void *val, bool write) +{ + struct kvm_device_attr kvmattr = { + .group = group, + .attr = attr, + .flags = 0, + .addr = (uintptr_t)val, + }; + int ret; + + ret = ioctl(dev_fd, write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR, + &kvmattr); + return ret; +} + +int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, + void *val, bool write) +{ + int ret = _kvm_device_access(dev_fd, group, attr, val, write); + + TEST_ASSERT(ret >= 0, "KVM_SET|GET_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno); + return ret; +} + +/* * VM Dump * * Input Args: @@ -1750,6 +1986,7 @@ int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) */ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) { + int ctr; struct userspace_mem_region *region; struct vcpu *vcpu; @@ -1757,7 +1994,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); fprintf(stream, "%*sMem Regions:\n", indent, ""); - list_for_each_entry(region, &vm->userspace_mem_regions, list) { + hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " "host_virt: %p\n", indent + 2, "", (uint64_t) region->region.guest_phys_addr, @@ -1943,10 +2180,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm) if (vm == NULL) { /* Ensure that the KVM vendor-specific module is loaded. */ - f = fopen(KVM_DEV_PATH, "r"); - TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d", - errno); - fclose(f); + close(open_kvm_dev_path_or_exit()); } f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r"); @@ -1969,7 +2203,7 @@ unsigned int vm_get_page_shift(struct kvm_vm *vm) return vm->page_shift; } -unsigned int vm_get_max_gfn(struct kvm_vm *vm) +uint64_t vm_get_max_gfn(struct kvm_vm *vm) { return vm->max_gfn; } diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h index 91ce1b5d480b..a03febc24ba6 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h +++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h @@ -8,6 +8,9 @@ #ifndef SELFTEST_KVM_UTIL_INTERNAL_H #define SELFTEST_KVM_UTIL_INTERNAL_H +#include "linux/hashtable.h" +#include "linux/rbtree.h" + #include "sparsebit.h" struct userspace_mem_region { @@ -16,9 +19,13 @@ struct userspace_mem_region { int fd; off_t offset; void *host_mem; + void *host_alias; void *mmap_start; + void *mmap_alias; size_t mmap_size; - struct list_head list; + struct rb_node gpa_node; + struct rb_node hva_node; + struct hlist_node slot_node; }; struct vcpu { @@ -31,6 +38,12 @@ struct vcpu { uint32_t dirty_gfns_count; }; +struct userspace_mem_regions { + struct rb_root gpa_tree; + struct rb_root hva_tree; + DECLARE_HASHTABLE(slot_hash, 9); +}; + struct kvm_vm { int mode; unsigned long type; @@ -43,7 +56,7 @@ struct kvm_vm { unsigned int va_bits; uint64_t max_gfn; struct list_head vcpus; - struct list_head userspace_mem_regions; + struct userspace_mem_regions regions; struct sparsebit *vpages_valid; struct sparsebit *vpages_mapped; bool has_irqchip; diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c index 81490b9b4e32..abf381800a59 100644 --- a/tools/testing/selftests/kvm/lib/perf_test_util.c +++ b/tools/testing/selftests/kvm/lib/perf_test_util.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2020, Google LLC. */ +#include <inttypes.h> #include "kvm_util.h" #include "perf_test_util.h" @@ -80,7 +81,8 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, */ TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm), "Requested more guest memory than address space allows.\n" - " guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n", + " guest pages: %" PRIx64 " max gfn: %" PRIx64 + " vcpus: %d wss: %" PRIx64 "]\n", guest_num_pages, vm_get_max_gfn(vm), vcpus, vcpu_memory_bytes); diff --git a/tools/testing/selftests/kvm/lib/rbtree.c b/tools/testing/selftests/kvm/lib/rbtree.c new file mode 100644 index 000000000000..a703f0194ea3 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/rbtree.c @@ -0,0 +1 @@ +#include "../../../../lib/rbtree.c" diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c index 031ba3c932ed..a0d0c83d83de 100644 --- a/tools/testing/selftests/kvm/lib/sparsebit.c +++ b/tools/testing/selftests/kvm/lib/sparsebit.c @@ -1890,7 +1890,6 @@ void sparsebit_validate_internal(struct sparsebit *s) */ #include <stdlib.h> -#include <assert.h> struct range { sparsebit_idx_t first, last; diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c index 906c955384e2..6ad6c8276b2e 100644 --- a/tools/testing/selftests/kvm/lib/test_util.c +++ b/tools/testing/selftests/kvm/lib/test_util.c @@ -10,6 +10,8 @@ #include <limits.h> #include <stdlib.h> #include <time.h> +#include <sys/stat.h> +#include <linux/mman.h> #include "linux/kernel.h" #include "test_util.h" @@ -111,28 +113,188 @@ void print_skip(const char *fmt, ...) puts(", skipping test"); } -const struct vm_mem_backing_src_alias backing_src_aliases[] = { - {"anonymous", VM_MEM_SRC_ANONYMOUS,}, - {"anonymous_thp", VM_MEM_SRC_ANONYMOUS_THP,}, - {"anonymous_hugetlb", VM_MEM_SRC_ANONYMOUS_HUGETLB,}, -}; +bool thp_configured(void) +{ + int ret; + struct stat statbuf; + + ret = stat("/sys/kernel/mm/transparent_hugepage", &statbuf); + TEST_ASSERT(ret == 0 || (ret == -1 && errno == ENOENT), + "Error in stating /sys/kernel/mm/transparent_hugepage"); + + return ret == 0; +} + +size_t get_trans_hugepagesz(void) +{ + size_t size; + FILE *f; + + TEST_ASSERT(thp_configured(), "THP is not configured in host kernel"); + + f = fopen("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", "r"); + TEST_ASSERT(f != NULL, "Error in opening transparent_hugepage/hpage_pmd_size"); + + fscanf(f, "%ld", &size); + fclose(f); + + return size; +} + +size_t get_def_hugetlb_pagesz(void) +{ + char buf[64]; + const char *tag = "Hugepagesize:"; + FILE *f; + + f = fopen("/proc/meminfo", "r"); + TEST_ASSERT(f != NULL, "Error in opening /proc/meminfo"); + + while (fgets(buf, sizeof(buf), f) != NULL) { + if (strstr(buf, tag) == buf) { + fclose(f); + return strtoull(buf + strlen(tag), NULL, 10) << 10; + } + } + + if (feof(f)) + TEST_FAIL("HUGETLB is not configured in host kernel"); + else + TEST_FAIL("Error in reading /proc/meminfo"); + + fclose(f); + return 0; +} + +const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i) +{ + static const int anon_flags = MAP_PRIVATE | MAP_ANONYMOUS; + static const int anon_huge_flags = anon_flags | MAP_HUGETLB; + + static const struct vm_mem_backing_src_alias aliases[] = { + [VM_MEM_SRC_ANONYMOUS] = { + .name = "anonymous", + .flag = anon_flags, + }, + [VM_MEM_SRC_ANONYMOUS_THP] = { + .name = "anonymous_thp", + .flag = anon_flags, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB] = { + .name = "anonymous_hugetlb", + .flag = anon_huge_flags, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_16KB] = { + .name = "anonymous_hugetlb_16kb", + .flag = anon_huge_flags | MAP_HUGE_16KB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_64KB] = { + .name = "anonymous_hugetlb_64kb", + .flag = anon_huge_flags | MAP_HUGE_64KB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_512KB] = { + .name = "anonymous_hugetlb_512kb", + .flag = anon_huge_flags | MAP_HUGE_512KB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_1MB] = { + .name = "anonymous_hugetlb_1mb", + .flag = anon_huge_flags | MAP_HUGE_1MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB] = { + .name = "anonymous_hugetlb_2mb", + .flag = anon_huge_flags | MAP_HUGE_2MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_8MB] = { + .name = "anonymous_hugetlb_8mb", + .flag = anon_huge_flags | MAP_HUGE_8MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_16MB] = { + .name = "anonymous_hugetlb_16mb", + .flag = anon_huge_flags | MAP_HUGE_16MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_32MB] = { + .name = "anonymous_hugetlb_32mb", + .flag = anon_huge_flags | MAP_HUGE_32MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_256MB] = { + .name = "anonymous_hugetlb_256mb", + .flag = anon_huge_flags | MAP_HUGE_256MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_512MB] = { + .name = "anonymous_hugetlb_512mb", + .flag = anon_huge_flags | MAP_HUGE_512MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB] = { + .name = "anonymous_hugetlb_1gb", + .flag = anon_huge_flags | MAP_HUGE_1GB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB] = { + .name = "anonymous_hugetlb_2gb", + .flag = anon_huge_flags | MAP_HUGE_2GB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB] = { + .name = "anonymous_hugetlb_16gb", + .flag = anon_huge_flags | MAP_HUGE_16GB, + }, + [VM_MEM_SRC_SHMEM] = { + .name = "shmem", + .flag = MAP_SHARED, + }, + [VM_MEM_SRC_SHARED_HUGETLB] = { + .name = "shared_hugetlb", + /* + * No MAP_HUGETLB, we use MFD_HUGETLB instead. Since + * we're using "file backed" memory, we need to specify + * this when the FD is created, not when the area is + * mapped. + */ + .flag = MAP_SHARED, + }, + }; + _Static_assert(ARRAY_SIZE(aliases) == NUM_SRC_TYPES, + "Missing new backing src types?"); + + TEST_ASSERT(i < NUM_SRC_TYPES, "Backing src type ID %d too big", i); + + return &aliases[i]; +} + +#define MAP_HUGE_PAGE_SIZE(x) (1ULL << ((x >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK)) + +size_t get_backing_src_pagesz(uint32_t i) +{ + uint32_t flag = vm_mem_backing_src_alias(i)->flag; + + switch (i) { + case VM_MEM_SRC_ANONYMOUS: + case VM_MEM_SRC_SHMEM: + return getpagesize(); + case VM_MEM_SRC_ANONYMOUS_THP: + return get_trans_hugepagesz(); + case VM_MEM_SRC_ANONYMOUS_HUGETLB: + case VM_MEM_SRC_SHARED_HUGETLB: + return get_def_hugetlb_pagesz(); + default: + return MAP_HUGE_PAGE_SIZE(flag); + } +} void backing_src_help(void) { int i; printf("Available backing src types:\n"); - for (i = 0; i < ARRAY_SIZE(backing_src_aliases); i++) - printf("\t%s\n", backing_src_aliases[i].name); + for (i = 0; i < NUM_SRC_TYPES; i++) + printf("\t%s\n", vm_mem_backing_src_alias(i)->name); } enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name) { int i; - for (i = 0; i < ARRAY_SIZE(backing_src_aliases); i++) - if (!strcmp(type_name, backing_src_aliases[i].name)) - return backing_src_aliases[i].type; + for (i = 0; i < NUM_SRC_TYPES; i++) + if (!strcmp(type_name, vm_mem_backing_src_alias(i)->name)) + return i; backing_src_help(); TEST_FAIL("Unknown backing src type: %s", type_name); diff --git a/tools/testing/selftests/kvm/lib/x86_64/handlers.S b/tools/testing/selftests/kvm/lib/x86_64/handlers.S index aaf7bc7d2ce1..7629819734af 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/handlers.S +++ b/tools/testing/selftests/kvm/lib/x86_64/handlers.S @@ -54,9 +54,9 @@ idt_handlers: .align 8 /* Fetch current address and append it to idt_handlers. */ - current_handler = . +666 : .pushsection .rodata -.quad current_handler + .quad 666b .popsection .if ! \has_error diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index a8906e60a108..efe235044421 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -657,9 +657,7 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void) return cpuid; cpuid = allocate_kvm_cpuid2(); - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); + kvm_fd = open_kvm_dev_path_or_exit(); ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid); TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n", @@ -691,9 +689,7 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index) buffer.header.nmsrs = 1; buffer.entry.index = msr_index; - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); + kvm_fd = open_kvm_dev_path_or_exit(); r = ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header); TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n" @@ -986,9 +982,7 @@ struct kvm_msr_list *kvm_get_msr_index_list(void) struct kvm_msr_list *list; int nmsrs, r, kvm_fd; - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); + kvm_fd = open_kvm_dev_path_or_exit(); nmsrs = kvm_get_num_msrs_fd(kvm_fd); list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0])); @@ -1312,9 +1306,7 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void) return cpuid; cpuid = allocate_kvm_cpuid2(); - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); + kvm_fd = open_kvm_dev_path_or_exit(); ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid); TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_HV_CPUID failed %d %d\n", diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c index 6096bf0a5b34..98351ba0933c 100644 --- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c +++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c @@ -71,14 +71,22 @@ struct memslot_antagonist_args { }; static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, - uint64_t nr_modifications, uint64_t gpa) + uint64_t nr_modifications) { + const uint64_t pages = 1; + uint64_t gpa; int i; + /* + * Add the dummy memslot just below the perf_test_util memslot, which is + * at the top of the guest physical address space. + */ + gpa = guest_test_phys_mem - pages * vm_get_page_size(vm); + for (i = 0; i < nr_modifications; i++) { usleep(delay); vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa, - DUMMY_MEMSLOT_INDEX, 1, 0); + DUMMY_MEMSLOT_INDEX, pages, 0); vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX); } @@ -120,11 +128,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) pr_info("Started all vCPUs\n"); add_remove_memslot(vm, p->memslot_modification_delay, - p->nr_memslot_modifications, - guest_test_phys_mem + - (guest_percpu_mem_size * nr_vcpus) + - perf_test_args.host_page_size + - perf_test_args.guest_page_size); + p->nr_memslot_modifications); run_vcpus = false; diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c new file mode 100644 index 000000000000..9307f25d8130 --- /dev/null +++ b/tools/testing/selftests/kvm/memslot_perf_test.c @@ -0,0 +1,1037 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * A memslot-related performance benchmark. + * + * Copyright (C) 2021 Oracle and/or its affiliates. + * + * Basic guest setup / host vCPU thread code lifted from set_memory_region_test. + */ +#include <pthread.h> +#include <sched.h> +#include <semaphore.h> +#include <stdatomic.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/mman.h> +#include <time.h> +#include <unistd.h> + +#include <linux/compiler.h> + +#include <test_util.h> +#include <kvm_util.h> +#include <processor.h> + +#define VCPU_ID 0 + +#define MEM_SIZE ((512U << 20) + 4096) +#define MEM_SIZE_PAGES (MEM_SIZE / 4096) +#define MEM_GPA 0x10000000UL +#define MEM_AUX_GPA MEM_GPA +#define MEM_SYNC_GPA MEM_AUX_GPA +#define MEM_TEST_GPA (MEM_AUX_GPA + 4096) +#define MEM_TEST_SIZE (MEM_SIZE - 4096) +static_assert(MEM_SIZE % 4096 == 0, "invalid mem size"); +static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size"); + +/* + * 32 MiB is max size that gets well over 100 iterations on 509 slots. + * Considering that each slot needs to have at least one page up to + * 8194 slots in use can then be tested (although with slightly + * limited resolution). + */ +#define MEM_SIZE_MAP ((32U << 20) + 4096) +#define MEM_SIZE_MAP_PAGES (MEM_SIZE_MAP / 4096) +#define MEM_TEST_MAP_SIZE (MEM_SIZE_MAP - 4096) +#define MEM_TEST_MAP_SIZE_PAGES (MEM_TEST_MAP_SIZE / 4096) +static_assert(MEM_SIZE_MAP % 4096 == 0, "invalid map test region size"); +static_assert(MEM_TEST_MAP_SIZE % 4096 == 0, "invalid map test region size"); +static_assert(MEM_TEST_MAP_SIZE_PAGES % 2 == 0, "invalid map test region size"); +static_assert(MEM_TEST_MAP_SIZE_PAGES > 2, "invalid map test region size"); + +/* + * 128 MiB is min size that fills 32k slots with at least one page in each + * while at the same time gets 100+ iterations in such test + */ +#define MEM_TEST_UNMAP_SIZE (128U << 20) +#define MEM_TEST_UNMAP_SIZE_PAGES (MEM_TEST_UNMAP_SIZE / 4096) +/* 2 MiB chunk size like a typical huge page */ +#define MEM_TEST_UNMAP_CHUNK_PAGES (2U << (20 - 12)) +static_assert(MEM_TEST_UNMAP_SIZE <= MEM_TEST_SIZE, + "invalid unmap test region size"); +static_assert(MEM_TEST_UNMAP_SIZE % 4096 == 0, + "invalid unmap test region size"); +static_assert(MEM_TEST_UNMAP_SIZE_PAGES % + (2 * MEM_TEST_UNMAP_CHUNK_PAGES) == 0, + "invalid unmap test region size"); + +/* + * For the move active test the middle of the test area is placed on + * a memslot boundary: half lies in the memslot being moved, half in + * other memslot(s). + * + * When running this test with 32k memslots (32764, really) each memslot + * contains 4 pages. + * The last one additionally contains the remaining 21 pages of memory, + * for the total size of 25 pages. + * Hence, the maximum size here is 50 pages. + */ +#define MEM_TEST_MOVE_SIZE_PAGES (50) +#define MEM_TEST_MOVE_SIZE (MEM_TEST_MOVE_SIZE_PAGES * 4096) +#define MEM_TEST_MOVE_GPA_DEST (MEM_GPA + MEM_SIZE) +static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE, + "invalid move test region size"); + +#define MEM_TEST_VAL_1 0x1122334455667788 +#define MEM_TEST_VAL_2 0x99AABBCCDDEEFF00 + +struct vm_data { + struct kvm_vm *vm; + pthread_t vcpu_thread; + uint32_t nslots; + uint64_t npages; + uint64_t pages_per_slot; + void **hva_slots; + bool mmio_ok; + uint64_t mmio_gpa_min; + uint64_t mmio_gpa_max; +}; + +struct sync_area { + atomic_bool start_flag; + atomic_bool exit_flag; + atomic_bool sync_flag; + void *move_area_ptr; +}; + +/* + * Technically, we need also for the atomic bool to be address-free, which + * is recommended, but not strictly required, by C11 for lockless + * implementations. + * However, in practice both GCC and Clang fulfill this requirement on + * all KVM-supported platforms. + */ +static_assert(ATOMIC_BOOL_LOCK_FREE == 2, "atomic bool is not lockless"); + +static sem_t vcpu_ready; + +static bool map_unmap_verify; + +static bool verbose; +#define pr_info_v(...) \ + do { \ + if (verbose) \ + pr_info(__VA_ARGS__); \ + } while (0) + +static void *vcpu_worker(void *data) +{ + struct vm_data *vm = data; + struct kvm_run *run; + struct ucall uc; + uint64_t cmd; + + run = vcpu_state(vm->vm, VCPU_ID); + while (1) { + vcpu_run(vm->vm, VCPU_ID); + + if (run->exit_reason == KVM_EXIT_IO) { + cmd = get_ucall(vm->vm, VCPU_ID, &uc); + if (cmd != UCALL_SYNC) + break; + + sem_post(&vcpu_ready); + continue; + } + + if (run->exit_reason != KVM_EXIT_MMIO) + break; + + TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit"); + TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read"); + TEST_ASSERT(run->mmio.len == 8, + "Unexpected exit mmio size = %u", run->mmio.len); + TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min && + run->mmio.phys_addr <= vm->mmio_gpa_max, + "Unexpected exit mmio address = 0x%llx", + run->mmio.phys_addr); + } + + if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT) + TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0], + __FILE__, uc.args[1], uc.args[2]); + + return NULL; +} + +static void wait_for_vcpu(void) +{ + struct timespec ts; + + TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts), + "clock_gettime() failed: %d\n", errno); + + ts.tv_sec += 2; + TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts), + "sem_timedwait() failed: %d\n", errno); +} + +static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) +{ + uint64_t gpage, pgoffs; + uint32_t slot, slotoffs; + void *base; + + TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate"); + TEST_ASSERT(gpa < MEM_GPA + data->npages * 4096, + "Too high gpa to translate"); + gpa -= MEM_GPA; + + gpage = gpa / 4096; + pgoffs = gpa % 4096; + slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1); + slotoffs = gpage - (slot * data->pages_per_slot); + + if (rempages) { + uint64_t slotpages; + + if (slot == data->nslots - 1) + slotpages = data->npages - slot * data->pages_per_slot; + else + slotpages = data->pages_per_slot; + + TEST_ASSERT(!pgoffs, + "Asking for remaining pages in slot but gpa not page aligned"); + *rempages = slotpages - slotoffs; + } + + base = data->hva_slots[slot]; + return (uint8_t *)base + slotoffs * 4096 + pgoffs; +} + +static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot) +{ + TEST_ASSERT(slot < data->nslots, "Too high slot number"); + + return MEM_GPA + slot * data->pages_per_slot * 4096; +} + +static struct vm_data *alloc_vm(void) +{ + struct vm_data *data; + + data = malloc(sizeof(*data)); + TEST_ASSERT(data, "malloc(vmdata) failed"); + + data->vm = NULL; + data->hva_slots = NULL; + + return data; +} + +static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, + void *guest_code, uint64_t mempages, + struct timespec *slot_runtime) +{ + uint32_t max_mem_slots; + uint64_t rempages; + uint64_t guest_addr; + uint32_t slot; + struct timespec tstart; + struct sync_area *sync; + + max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS); + TEST_ASSERT(max_mem_slots > 1, + "KVM_CAP_NR_MEMSLOTS should be greater than 1"); + TEST_ASSERT(nslots > 1 || nslots == -1, + "Slot count cap should be greater than 1"); + if (nslots != -1) + max_mem_slots = min(max_mem_slots, (uint32_t)nslots); + pr_info_v("Allowed number of memory slots: %"PRIu32"\n", max_mem_slots); + + TEST_ASSERT(mempages > 1, + "Can't test without any memory"); + + data->npages = mempages; + data->nslots = max_mem_slots - 1; + data->pages_per_slot = mempages / data->nslots; + if (!data->pages_per_slot) { + *maxslots = mempages + 1; + return false; + } + + rempages = mempages % data->nslots; + data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots); + TEST_ASSERT(data->hva_slots, "malloc() fail"); + + data->vm = vm_create_default(VCPU_ID, 1024, guest_code); + + pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n", + max_mem_slots - 1, data->pages_per_slot, rempages); + + clock_gettime(CLOCK_MONOTONIC, &tstart); + for (slot = 1, guest_addr = MEM_GPA; slot < max_mem_slots; slot++) { + uint64_t npages; + + npages = data->pages_per_slot; + if (slot == max_mem_slots - 1) + npages += rempages; + + vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS, + guest_addr, slot, npages, + 0); + guest_addr += npages * 4096; + } + *slot_runtime = timespec_elapsed(tstart); + + for (slot = 0, guest_addr = MEM_GPA; slot < max_mem_slots - 1; slot++) { + uint64_t npages; + uint64_t gpa; + + npages = data->pages_per_slot; + if (slot == max_mem_slots - 2) + npages += rempages; + + gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, + slot + 1); + TEST_ASSERT(gpa == guest_addr, + "vm_phy_pages_alloc() failed\n"); + + data->hva_slots[slot] = addr_gpa2hva(data->vm, guest_addr); + memset(data->hva_slots[slot], 0, npages * 4096); + + guest_addr += npages * 4096; + } + + virt_map(data->vm, MEM_GPA, MEM_GPA, mempages, 0); + + sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); + atomic_init(&sync->start_flag, false); + atomic_init(&sync->exit_flag, false); + atomic_init(&sync->sync_flag, false); + + data->mmio_ok = false; + + return true; +} + +static void launch_vm(struct vm_data *data) +{ + pr_info_v("Launching the test VM\n"); + + pthread_create(&data->vcpu_thread, NULL, vcpu_worker, data); + + /* Ensure the guest thread is spun up. */ + wait_for_vcpu(); +} + +static void free_vm(struct vm_data *data) +{ + kvm_vm_free(data->vm); + free(data->hva_slots); + free(data); +} + +static void wait_guest_exit(struct vm_data *data) +{ + pthread_join(data->vcpu_thread, NULL); +} + +static void let_guest_run(struct sync_area *sync) +{ + atomic_store_explicit(&sync->start_flag, true, memory_order_release); +} + +static void guest_spin_until_start(void) +{ + struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; + + while (!atomic_load_explicit(&sync->start_flag, memory_order_acquire)) + ; +} + +static void make_guest_exit(struct sync_area *sync) +{ + atomic_store_explicit(&sync->exit_flag, true, memory_order_release); +} + +static bool _guest_should_exit(void) +{ + struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; + + return atomic_load_explicit(&sync->exit_flag, memory_order_acquire); +} + +#define guest_should_exit() unlikely(_guest_should_exit()) + +/* + * noinline so we can easily see how much time the host spends waiting + * for the guest. + * For the same reason use alarm() instead of polling clock_gettime() + * to implement a wait timeout. + */ +static noinline void host_perform_sync(struct sync_area *sync) +{ + alarm(2); + + atomic_store_explicit(&sync->sync_flag, true, memory_order_release); + while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire)) + ; + + alarm(0); +} + +static bool guest_perform_sync(void) +{ + struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; + bool expected; + + do { + if (guest_should_exit()) + return false; + + expected = true; + } while (!atomic_compare_exchange_weak_explicit(&sync->sync_flag, + &expected, false, + memory_order_acq_rel, + memory_order_relaxed)); + + return true; +} + +static void guest_code_test_memslot_move(void) +{ + struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; + uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr); + + GUEST_SYNC(0); + + guest_spin_until_start(); + + while (!guest_should_exit()) { + uintptr_t ptr; + + for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE; + ptr += 4096) + *(uint64_t *)ptr = MEM_TEST_VAL_1; + + /* + * No host sync here since the MMIO exits are so expensive + * that the host would spend most of its time waiting for + * the guest and so instead of measuring memslot move + * performance we would measure the performance and + * likelihood of MMIO exits + */ + } + + GUEST_DONE(); +} + +static void guest_code_test_memslot_map(void) +{ + struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; + + GUEST_SYNC(0); + + guest_spin_until_start(); + + while (1) { + uintptr_t ptr; + + for (ptr = MEM_TEST_GPA; + ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; ptr += 4096) + *(uint64_t *)ptr = MEM_TEST_VAL_1; + + if (!guest_perform_sync()) + break; + + for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; + ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE; ptr += 4096) + *(uint64_t *)ptr = MEM_TEST_VAL_2; + + if (!guest_perform_sync()) + break; + } + + GUEST_DONE(); +} + +static void guest_code_test_memslot_unmap(void) +{ + struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; + + GUEST_SYNC(0); + + guest_spin_until_start(); + + while (1) { + uintptr_t ptr = MEM_TEST_GPA; + + /* + * We can afford to access (map) just a small number of pages + * per host sync as otherwise the host will spend + * a significant amount of its time waiting for the guest + * (instead of doing unmap operations), so this will + * effectively turn this test into a map performance test. + * + * Just access a single page to be on the safe side. + */ + *(uint64_t *)ptr = MEM_TEST_VAL_1; + + if (!guest_perform_sync()) + break; + + ptr += MEM_TEST_UNMAP_SIZE / 2; + *(uint64_t *)ptr = MEM_TEST_VAL_2; + + if (!guest_perform_sync()) + break; + } + + GUEST_DONE(); +} + +static void guest_code_test_memslot_rw(void) +{ + GUEST_SYNC(0); + + guest_spin_until_start(); + + while (1) { + uintptr_t ptr; + + for (ptr = MEM_TEST_GPA; + ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096) + *(uint64_t *)ptr = MEM_TEST_VAL_1; + + if (!guest_perform_sync()) + break; + + for (ptr = MEM_TEST_GPA + 4096 / 2; + ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096) { + uint64_t val = *(uint64_t *)ptr; + + GUEST_ASSERT_1(val == MEM_TEST_VAL_2, val); + *(uint64_t *)ptr = 0; + } + + if (!guest_perform_sync()) + break; + } + + GUEST_DONE(); +} + +static bool test_memslot_move_prepare(struct vm_data *data, + struct sync_area *sync, + uint64_t *maxslots, bool isactive) +{ + uint64_t movesrcgpa, movetestgpa; + + movesrcgpa = vm_slot2gpa(data, data->nslots - 1); + + if (isactive) { + uint64_t lastpages; + + vm_gpa2hva(data, movesrcgpa, &lastpages); + if (lastpages < MEM_TEST_MOVE_SIZE_PAGES / 2) { + *maxslots = 0; + return false; + } + } + + movetestgpa = movesrcgpa - (MEM_TEST_MOVE_SIZE / (isactive ? 2 : 1)); + sync->move_area_ptr = (void *)movetestgpa; + + if (isactive) { + data->mmio_ok = true; + data->mmio_gpa_min = movesrcgpa; + data->mmio_gpa_max = movesrcgpa + MEM_TEST_MOVE_SIZE / 2 - 1; + } + + return true; +} + +static bool test_memslot_move_prepare_active(struct vm_data *data, + struct sync_area *sync, + uint64_t *maxslots) +{ + return test_memslot_move_prepare(data, sync, maxslots, true); +} + +static bool test_memslot_move_prepare_inactive(struct vm_data *data, + struct sync_area *sync, + uint64_t *maxslots) +{ + return test_memslot_move_prepare(data, sync, maxslots, false); +} + +static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync) +{ + uint64_t movesrcgpa; + + movesrcgpa = vm_slot2gpa(data, data->nslots - 1); + vm_mem_region_move(data->vm, data->nslots - 1 + 1, + MEM_TEST_MOVE_GPA_DEST); + vm_mem_region_move(data->vm, data->nslots - 1 + 1, movesrcgpa); +} + +static void test_memslot_do_unmap(struct vm_data *data, + uint64_t offsp, uint64_t count) +{ + uint64_t gpa, ctr; + + for (gpa = MEM_TEST_GPA + offsp * 4096, ctr = 0; ctr < count; ) { + uint64_t npages; + void *hva; + int ret; + + hva = vm_gpa2hva(data, gpa, &npages); + TEST_ASSERT(npages, "Empty memory slot at gptr 0x%"PRIx64, gpa); + npages = min(npages, count - ctr); + ret = madvise(hva, npages * 4096, MADV_DONTNEED); + TEST_ASSERT(!ret, + "madvise(%p, MADV_DONTNEED) on VM memory should not fail for gptr 0x%"PRIx64, + hva, gpa); + ctr += npages; + gpa += npages * 4096; + } + TEST_ASSERT(ctr == count, + "madvise(MADV_DONTNEED) should exactly cover all of the requested area"); +} + +static void test_memslot_map_unmap_check(struct vm_data *data, + uint64_t offsp, uint64_t valexp) +{ + uint64_t gpa; + uint64_t *val; + + if (!map_unmap_verify) + return; + + gpa = MEM_TEST_GPA + offsp * 4096; + val = (typeof(val))vm_gpa2hva(data, gpa, NULL); + TEST_ASSERT(*val == valexp, + "Guest written values should read back correctly before unmap (%"PRIu64" vs %"PRIu64" @ %"PRIx64")", + *val, valexp, gpa); + *val = 0; +} + +static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync) +{ + /* + * Unmap the second half of the test area while guest writes to (maps) + * the first half. + */ + test_memslot_do_unmap(data, MEM_TEST_MAP_SIZE_PAGES / 2, + MEM_TEST_MAP_SIZE_PAGES / 2); + + /* + * Wait for the guest to finish writing the first half of the test + * area, verify the written value on the first and the last page of + * this area and then unmap it. + * Meanwhile, the guest is writing to (mapping) the second half of + * the test area. + */ + host_perform_sync(sync); + test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1); + test_memslot_map_unmap_check(data, + MEM_TEST_MAP_SIZE_PAGES / 2 - 1, + MEM_TEST_VAL_1); + test_memslot_do_unmap(data, 0, MEM_TEST_MAP_SIZE_PAGES / 2); + + + /* + * Wait for the guest to finish writing the second half of the test + * area and verify the written value on the first and the last page + * of this area. + * The area will be unmapped at the beginning of the next loop + * iteration. + * Meanwhile, the guest is writing to (mapping) the first half of + * the test area. + */ + host_perform_sync(sync); + test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES / 2, + MEM_TEST_VAL_2); + test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES - 1, + MEM_TEST_VAL_2); +} + +static void test_memslot_unmap_loop_common(struct vm_data *data, + struct sync_area *sync, + uint64_t chunk) +{ + uint64_t ctr; + + /* + * Wait for the guest to finish mapping page(s) in the first half + * of the test area, verify the written value and then perform unmap + * of this area. + * Meanwhile, the guest is writing to (mapping) page(s) in the second + * half of the test area. + */ + host_perform_sync(sync); + test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1); + for (ctr = 0; ctr < MEM_TEST_UNMAP_SIZE_PAGES / 2; ctr += chunk) + test_memslot_do_unmap(data, ctr, chunk); + + /* Likewise, but for the opposite host / guest areas */ + host_perform_sync(sync); + test_memslot_map_unmap_check(data, MEM_TEST_UNMAP_SIZE_PAGES / 2, + MEM_TEST_VAL_2); + for (ctr = MEM_TEST_UNMAP_SIZE_PAGES / 2; + ctr < MEM_TEST_UNMAP_SIZE_PAGES; ctr += chunk) + test_memslot_do_unmap(data, ctr, chunk); +} + +static void test_memslot_unmap_loop(struct vm_data *data, + struct sync_area *sync) +{ + test_memslot_unmap_loop_common(data, sync, 1); +} + +static void test_memslot_unmap_loop_chunked(struct vm_data *data, + struct sync_area *sync) +{ + test_memslot_unmap_loop_common(data, sync, MEM_TEST_UNMAP_CHUNK_PAGES); +} + +static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync) +{ + uint64_t gptr; + + for (gptr = MEM_TEST_GPA + 4096 / 2; + gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096) + *(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2; + + host_perform_sync(sync); + + for (gptr = MEM_TEST_GPA; + gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096) { + uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL); + uint64_t val = *vptr; + + TEST_ASSERT(val == MEM_TEST_VAL_1, + "Guest written values should read back correctly (is %"PRIu64" @ %"PRIx64")", + val, gptr); + *vptr = 0; + } + + host_perform_sync(sync); +} + +struct test_data { + const char *name; + uint64_t mem_size; + void (*guest_code)(void); + bool (*prepare)(struct vm_data *data, struct sync_area *sync, + uint64_t *maxslots); + void (*loop)(struct vm_data *data, struct sync_area *sync); +}; + +static bool test_execute(int nslots, uint64_t *maxslots, + unsigned int maxtime, + const struct test_data *tdata, + uint64_t *nloops, + struct timespec *slot_runtime, + struct timespec *guest_runtime) +{ + uint64_t mem_size = tdata->mem_size ? : MEM_SIZE_PAGES; + struct vm_data *data; + struct sync_area *sync; + struct timespec tstart; + bool ret = true; + + data = alloc_vm(); + if (!prepare_vm(data, nslots, maxslots, tdata->guest_code, + mem_size, slot_runtime)) { + ret = false; + goto exit_free; + } + + sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); + + if (tdata->prepare && + !tdata->prepare(data, sync, maxslots)) { + ret = false; + goto exit_free; + } + + launch_vm(data); + + clock_gettime(CLOCK_MONOTONIC, &tstart); + let_guest_run(sync); + + while (1) { + *guest_runtime = timespec_elapsed(tstart); + if (guest_runtime->tv_sec >= maxtime) + break; + + tdata->loop(data, sync); + + (*nloops)++; + } + + make_guest_exit(sync); + wait_guest_exit(data); + +exit_free: + free_vm(data); + + return ret; +} + +static const struct test_data tests[] = { + { + .name = "map", + .mem_size = MEM_SIZE_MAP_PAGES, + .guest_code = guest_code_test_memslot_map, + .loop = test_memslot_map_loop, + }, + { + .name = "unmap", + .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1, + .guest_code = guest_code_test_memslot_unmap, + .loop = test_memslot_unmap_loop, + }, + { + .name = "unmap chunked", + .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1, + .guest_code = guest_code_test_memslot_unmap, + .loop = test_memslot_unmap_loop_chunked, + }, + { + .name = "move active area", + .guest_code = guest_code_test_memslot_move, + .prepare = test_memslot_move_prepare_active, + .loop = test_memslot_move_loop, + }, + { + .name = "move inactive area", + .guest_code = guest_code_test_memslot_move, + .prepare = test_memslot_move_prepare_inactive, + .loop = test_memslot_move_loop, + }, + { + .name = "RW", + .guest_code = guest_code_test_memslot_rw, + .loop = test_memslot_rw_loop + }, +}; + +#define NTESTS ARRAY_SIZE(tests) + +struct test_args { + int tfirst; + int tlast; + int nslots; + int seconds; + int runs; +}; + +static void help(char *name, struct test_args *targs) +{ + int ctr; + + pr_info("usage: %s [-h] [-v] [-d] [-s slots] [-f first_test] [-e last_test] [-l test_length] [-r run_count]\n", + name); + pr_info(" -h: print this help screen.\n"); + pr_info(" -v: enable verbose mode (not for benchmarking).\n"); + pr_info(" -d: enable extra debug checks.\n"); + pr_info(" -s: specify memslot count cap (-1 means no cap; currently: %i)\n", + targs->nslots); + pr_info(" -f: specify the first test to run (currently: %i; max %zu)\n", + targs->tfirst, NTESTS - 1); + pr_info(" -e: specify the last test to run (currently: %i; max %zu)\n", + targs->tlast, NTESTS - 1); + pr_info(" -l: specify the test length in seconds (currently: %i)\n", + targs->seconds); + pr_info(" -r: specify the number of runs per test (currently: %i)\n", + targs->runs); + + pr_info("\nAvailable tests:\n"); + for (ctr = 0; ctr < NTESTS; ctr++) + pr_info("%d: %s\n", ctr, tests[ctr].name); +} + +static bool parse_args(int argc, char *argv[], + struct test_args *targs) +{ + int opt; + + while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) { + switch (opt) { + case 'h': + default: + help(argv[0], targs); + return false; + case 'v': + verbose = true; + break; + case 'd': + map_unmap_verify = true; + break; + case 's': + targs->nslots = atoi(optarg); + if (targs->nslots <= 0 && targs->nslots != -1) { + pr_info("Slot count cap has to be positive or -1 for no cap\n"); + return false; + } + break; + case 'f': + targs->tfirst = atoi(optarg); + if (targs->tfirst < 0) { + pr_info("First test to run has to be non-negative\n"); + return false; + } + break; + case 'e': + targs->tlast = atoi(optarg); + if (targs->tlast < 0 || targs->tlast >= NTESTS) { + pr_info("Last test to run has to be non-negative and less than %zu\n", + NTESTS); + return false; + } + break; + case 'l': + targs->seconds = atoi(optarg); + if (targs->seconds < 0) { + pr_info("Test length in seconds has to be non-negative\n"); + return false; + } + break; + case 'r': + targs->runs = atoi(optarg); + if (targs->runs <= 0) { + pr_info("Runs per test has to be positive\n"); + return false; + } + break; + } + } + + if (optind < argc) { + help(argv[0], targs); + return false; + } + + if (targs->tfirst > targs->tlast) { + pr_info("First test to run cannot be greater than the last test to run\n"); + return false; + } + + return true; +} + +struct test_result { + struct timespec slot_runtime, guest_runtime, iter_runtime; + int64_t slottimens, runtimens; + uint64_t nloops; +}; + +static bool test_loop(const struct test_data *data, + const struct test_args *targs, + struct test_result *rbestslottime, + struct test_result *rbestruntime) +{ + uint64_t maxslots; + struct test_result result; + + result.nloops = 0; + if (!test_execute(targs->nslots, &maxslots, targs->seconds, data, + &result.nloops, + &result.slot_runtime, &result.guest_runtime)) { + if (maxslots) + pr_info("Memslot count too high for this test, decrease the cap (max is %"PRIu64")\n", + maxslots); + else + pr_info("Memslot count may be too high for this test, try adjusting the cap\n"); + + return false; + } + + pr_info("Test took %ld.%.9lds for slot setup + %ld.%.9lds all iterations\n", + result.slot_runtime.tv_sec, result.slot_runtime.tv_nsec, + result.guest_runtime.tv_sec, result.guest_runtime.tv_nsec); + if (!result.nloops) { + pr_info("No full loops done - too short test time or system too loaded?\n"); + return true; + } + + result.iter_runtime = timespec_div(result.guest_runtime, + result.nloops); + pr_info("Done %"PRIu64" iterations, avg %ld.%.9lds each\n", + result.nloops, + result.iter_runtime.tv_sec, + result.iter_runtime.tv_nsec); + result.slottimens = timespec_to_ns(result.slot_runtime); + result.runtimens = timespec_to_ns(result.iter_runtime); + + /* + * Only rank the slot setup time for tests using the whole test memory + * area so they are comparable + */ + if (!data->mem_size && + (!rbestslottime->slottimens || + result.slottimens < rbestslottime->slottimens)) + *rbestslottime = result; + if (!rbestruntime->runtimens || + result.runtimens < rbestruntime->runtimens) + *rbestruntime = result; + + return true; +} + +int main(int argc, char *argv[]) +{ + struct test_args targs = { + .tfirst = 0, + .tlast = NTESTS - 1, + .nslots = -1, + .seconds = 5, + .runs = 1, + }; + struct test_result rbestslottime; + int tctr; + + /* Tell stdout not to buffer its content */ + setbuf(stdout, NULL); + + if (!parse_args(argc, argv, &targs)) + return -1; + + rbestslottime.slottimens = 0; + for (tctr = targs.tfirst; tctr <= targs.tlast; tctr++) { + const struct test_data *data = &tests[tctr]; + unsigned int runctr; + struct test_result rbestruntime; + + if (tctr > targs.tfirst) + pr_info("\n"); + + pr_info("Testing %s performance with %i runs, %d seconds each\n", + data->name, targs.runs, targs.seconds); + + rbestruntime.runtimens = 0; + for (runctr = 0; runctr < targs.runs; runctr++) + if (!test_loop(data, &targs, + &rbestslottime, &rbestruntime)) + break; + + if (rbestruntime.runtimens) + pr_info("Best runtime result was %ld.%.9lds per iteration (with %"PRIu64" iterations)\n", + rbestruntime.iter_runtime.tv_sec, + rbestruntime.iter_runtime.tv_nsec, + rbestruntime.nloops); + } + + if (rbestslottime.slottimens) + pr_info("Best slot setup time for the whole test area was %ld.%.9lds\n", + rbestslottime.slot_runtime.tv_sec, + rbestslottime.slot_runtime.tv_nsec); + + return 0; +} diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c index f127ed31dba7..978f5b5f4dc0 100644 --- a/tools/testing/selftests/kvm/set_memory_region_test.c +++ b/tools/testing/selftests/kvm/set_memory_region_test.c @@ -329,6 +329,22 @@ static void test_zero_memory_regions(void) } #endif /* __x86_64__ */ +static int test_memory_region_add(struct kvm_vm *vm, void *mem, uint32_t slot, + uint32_t size, uint64_t guest_addr) +{ + struct kvm_userspace_memory_region region; + int ret; + + region.slot = slot; + region.flags = 0; + region.guest_phys_addr = guest_addr; + region.memory_size = size; + region.userspace_addr = (uintptr_t) mem; + ret = ioctl(vm_get_fd(vm), KVM_SET_USER_MEMORY_REGION, ®ion); + + return ret; +} + /* * Test it can be added memory slots up to KVM_CAP_NR_MEMSLOTS, then any * tentative to add further slots should fail. @@ -339,9 +355,15 @@ static void test_add_max_memory_regions(void) struct kvm_vm *vm; uint32_t max_mem_slots; uint32_t slot; - uint64_t guest_addr = 0x0; - uint64_t mem_reg_npages; - void *mem; + void *mem, *mem_aligned, *mem_extra; + size_t alignment; + +#ifdef __s390x__ + /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */ + alignment = 0x100000; +#else + alignment = 1; +#endif max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS); TEST_ASSERT(max_mem_slots > 0, @@ -350,30 +372,37 @@ static void test_add_max_memory_regions(void) vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR); - mem_reg_npages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, MEM_REGION_SIZE); - /* Check it can be added memory slots up to the maximum allowed */ pr_info("Adding slots 0..%i, each memory region with %dK size\n", (max_mem_slots - 1), MEM_REGION_SIZE >> 10); + + mem = mmap(NULL, MEM_REGION_SIZE * max_mem_slots + alignment, + PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host"); + mem_aligned = (void *)(((size_t) mem + alignment - 1) & ~(alignment - 1)); + for (slot = 0; slot < max_mem_slots; slot++) { - vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, - guest_addr, slot, mem_reg_npages, - 0); - guest_addr += MEM_REGION_SIZE; + ret = test_memory_region_add(vm, mem_aligned + + ((uint64_t)slot * MEM_REGION_SIZE), + slot, MEM_REGION_SIZE, + (uint64_t)slot * MEM_REGION_SIZE); + TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" + " rc: %i errno: %i slot: %i\n", + ret, errno, slot); } /* Check it cannot be added memory slots beyond the limit */ - mem = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - TEST_ASSERT(mem != MAP_FAILED, "Failed to mmap() host"); + mem_extra = mmap(NULL, MEM_REGION_SIZE, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + TEST_ASSERT(mem_extra != MAP_FAILED, "Failed to mmap() host"); - ret = ioctl(vm_get_fd(vm), KVM_SET_USER_MEMORY_REGION, - &(struct kvm_userspace_memory_region) {slot, 0, guest_addr, - MEM_REGION_SIZE, (uint64_t) mem}); + ret = test_memory_region_add(vm, mem_extra, max_mem_slots, MEM_REGION_SIZE, + (uint64_t)max_mem_slots * MEM_REGION_SIZE); TEST_ASSERT(ret == -1 && errno == EINVAL, "Adding one more memory slot should fail with EINVAL"); - munmap(mem, MEM_REGION_SIZE); + munmap(mem, MEM_REGION_SIZE * max_mem_slots + alignment); + munmap(mem_extra, MEM_REGION_SIZE); kvm_vm_free(vm); } diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c index ca22ee6d19cb..63096cea26c6 100644 --- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c +++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c @@ -18,6 +18,28 @@ #include "vmx.h" #define VCPU_ID 5 +#define NMI_VECTOR 2 + +static int ud_count; + +void enable_x2apic(void) +{ + uint32_t spiv_reg = APIC_BASE_MSR + (APIC_SPIV >> 4); + + wrmsr(MSR_IA32_APICBASE, rdmsr(MSR_IA32_APICBASE) | + MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD); + wrmsr(spiv_reg, rdmsr(spiv_reg) | APIC_SPIV_APIC_ENABLED); +} + +static void guest_ud_handler(struct ex_regs *regs) +{ + ud_count++; + regs->rip += 3; /* VMLAUNCH */ +} + +static void guest_nmi_handler(struct ex_regs *regs) +{ +} void l2_guest_code(void) { @@ -25,15 +47,23 @@ void l2_guest_code(void) GUEST_SYNC(8); + /* Forced exit to L1 upon restore */ + GUEST_SYNC(9); + /* Done, exit to L1 and never come back. */ vmcall(); } -void l1_guest_code(struct vmx_pages *vmx_pages) +void guest_code(struct vmx_pages *vmx_pages) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; + enable_x2apic(); + + GUEST_SYNC(1); + GUEST_SYNC(2); + enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist); GUEST_ASSERT(vmx_pages->vmcs_gpa); @@ -55,27 +85,40 @@ void l1_guest_code(struct vmx_pages *vmx_pages) current_evmcs->revision_id = EVMCS_VERSION; GUEST_SYNC(6); + current_evmcs->pin_based_vm_exec_control |= + PIN_BASED_NMI_EXITING; GUEST_ASSERT(!vmlaunch()); GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa); - GUEST_SYNC(9); + + /* + * NMI forces L2->L1 exit, resuming L2 and hope that EVMCS is + * up-to-date (RIP points where it should and not at the beginning + * of l2_guest_code(). GUEST_SYNC(9) checkes that. + */ GUEST_ASSERT(!vmresume()); - GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); + GUEST_SYNC(10); + + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); + GUEST_SYNC(11); + + /* Try enlightened vmptrld with an incorrect GPA */ + evmcs_vmptrld(0xdeadbeef, vmx_pages->enlightened_vmcs); + GUEST_ASSERT(vmlaunch()); + GUEST_ASSERT(ud_count == 1); + GUEST_DONE(); } -void guest_code(struct vmx_pages *vmx_pages) +void inject_nmi(struct kvm_vm *vm) { - GUEST_SYNC(1); - GUEST_SYNC(2); + struct kvm_vcpu_events events; - if (vmx_pages) - l1_guest_code(vmx_pages); + vcpu_events_get(vm, VCPU_ID, &events); - GUEST_DONE(); + events.nmi.pending = 1; + events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING; - /* Try enlightened vmptrld with an incorrect GPA */ - evmcs_vmptrld(0xdeadbeef, vmx_pages->enlightened_vmcs); - GUEST_ASSERT(vmlaunch()); + vcpu_events_set(vm, VCPU_ID, &events); } int main(int argc, char *argv[]) @@ -109,6 +152,13 @@ int main(int argc, char *argv[]) vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vm, VCPU_ID); + vm_handle_exception(vm, UD_VECTOR, guest_ud_handler); + vm_handle_exception(vm, NMI_VECTOR, guest_nmi_handler); + + pr_info("Running L1 which uses EVMCS to run L2\n"); + for (stage = 1;; stage++) { _vcpu_run(vm, VCPU_ID); TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, @@ -124,7 +174,7 @@ int main(int argc, char *argv[]) case UCALL_SYNC: break; case UCALL_DONE: - goto part1_done; + goto done; default: TEST_FAIL("Unknown ucall %lu", uc.cmd); } @@ -154,12 +204,14 @@ int main(int argc, char *argv[]) TEST_ASSERT(!memcmp(®s1, ®s2, sizeof(regs2)), "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx", (ulong) regs2.rdi, (ulong) regs2.rsi); - } -part1_done: - _vcpu_run(vm, VCPU_ID); - TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, - "Unexpected successful VMEnter with invalid eVMCS pointer!"); + /* Force immediate L2->L1 exit before resuming */ + if (stage == 8) { + pr_info("Injecting NMI into L1 before L2 had a chance to run after restore\n"); + inject_nmi(vm); + } + } +done: kvm_vm_free(vm); } diff --git a/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c index 9b78e8889638..8c77537af5a1 100644 --- a/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c +++ b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c @@ -19,7 +19,12 @@ struct { u32 function; u32 index; } mangled_cpuids[] = { + /* + * These entries depend on the vCPU's XCR0 register and IA32_XSS MSR, + * which are not controlled for by this test. + */ {.function = 0xd, .index = 0}, + {.function = 0xd, .index = 1}, }; static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid) diff --git a/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c b/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c index cb953df4d7d0..8aed0db1331d 100644 --- a/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c +++ b/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c @@ -37,9 +37,7 @@ static void test_get_msr_index(void) int old_res, res, kvm_fd, r; struct kvm_msr_list *list; - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); + kvm_fd = open_kvm_dev_path_or_exit(); old_res = kvm_num_index_msrs(kvm_fd, 0); TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0"); @@ -101,9 +99,7 @@ static void test_get_msr_feature(void) int res, old_res, i, kvm_fd; struct kvm_msr_list *feature_list; - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); + kvm_fd = open_kvm_dev_path_or_exit(); old_res = kvm_num_feature_msrs(kvm_fd, 0); TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0"); diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index 804ff5ff022d..1f4a0599683c 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -186,7 +186,7 @@ int main(int argc, char *argv[]) vcpu_ioctl(vm, VCPU_ID, KVM_XEN_VCPU_SET_ATTR, &st); } - struct vcpu_runstate_info *rs = addr_gpa2hva(vm, RUNSTATE_ADDR);; + struct vcpu_runstate_info *rs = addr_gpa2hva(vm, RUNSTATE_ADDR); rs->state = 0x5a; for (;;) { diff --git a/tools/testing/selftests/landlock/.gitignore b/tools/testing/selftests/landlock/.gitignore new file mode 100644 index 000000000000..470203a7cd73 --- /dev/null +++ b/tools/testing/selftests/landlock/.gitignore @@ -0,0 +1,2 @@ +/*_test +/true diff --git a/tools/testing/selftests/landlock/Makefile b/tools/testing/selftests/landlock/Makefile new file mode 100644 index 000000000000..a99596ca9882 --- /dev/null +++ b/tools/testing/selftests/landlock/Makefile @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0 + +CFLAGS += -Wall -O2 + +src_test := $(wildcard *_test.c) + +TEST_GEN_PROGS := $(src_test:.c=) + +TEST_GEN_PROGS_EXTENDED := true + +KSFT_KHDR_INSTALL := 1 +OVERRIDE_TARGETS := 1 +include ../lib.mk + +khdr_dir = $(top_srcdir)/usr/include + +$(khdr_dir)/linux/landlock.h: khdr + @: + +$(OUTPUT)/true: true.c + $(LINK.c) $< $(LDLIBS) -o $@ -static + +$(OUTPUT)/%_test: %_test.c $(khdr_dir)/linux/landlock.h ../kselftest_harness.h common.h + $(LINK.c) $< $(LDLIBS) -o $@ -lcap -I$(khdr_dir) diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c new file mode 100644 index 000000000000..ca40abe9daa8 --- /dev/null +++ b/tools/testing/selftests/landlock/base_test.c @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Landlock tests - Common user space base + * + * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net> + * Copyright © 2019-2020 ANSSI + */ + +#define _GNU_SOURCE +#include <errno.h> +#include <fcntl.h> +#include <linux/landlock.h> +#include <string.h> +#include <sys/prctl.h> +#include <sys/socket.h> +#include <sys/types.h> + +#include "common.h" + +#ifndef O_PATH +#define O_PATH 010000000 +#endif + +TEST(inconsistent_attr) { + const long page_size = sysconf(_SC_PAGESIZE); + char *const buf = malloc(page_size + 1); + struct landlock_ruleset_attr *const ruleset_attr = (void *)buf; + + ASSERT_NE(NULL, buf); + + /* Checks copy_from_user(). */ + ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, 0, 0)); + /* The size if less than sizeof(struct landlock_attr_enforce). */ + ASSERT_EQ(EINVAL, errno); + ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, 1, 0)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, landlock_create_ruleset(NULL, 1, 0)); + /* The size if less than sizeof(struct landlock_attr_enforce). */ + ASSERT_EQ(EFAULT, errno); + + ASSERT_EQ(-1, landlock_create_ruleset(NULL, + sizeof(struct landlock_ruleset_attr), 0)); + ASSERT_EQ(EFAULT, errno); + + ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size + 1, 0)); + ASSERT_EQ(E2BIG, errno); + + ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, + sizeof(struct landlock_ruleset_attr), 0)); + ASSERT_EQ(ENOMSG, errno); + ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size, 0)); + ASSERT_EQ(ENOMSG, errno); + + /* Checks non-zero value. */ + buf[page_size - 2] = '.'; + ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size, 0)); + ASSERT_EQ(E2BIG, errno); + + ASSERT_EQ(-1, landlock_create_ruleset(ruleset_attr, page_size + 1, 0)); + ASSERT_EQ(E2BIG, errno); + + free(buf); +} + +TEST(abi_version) { + const struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE, + }; + ASSERT_EQ(1, landlock_create_ruleset(NULL, 0, + LANDLOCK_CREATE_RULESET_VERSION)); + + ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0, + LANDLOCK_CREATE_RULESET_VERSION)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, landlock_create_ruleset(NULL, sizeof(ruleset_attr), + LANDLOCK_CREATE_RULESET_VERSION)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), + LANDLOCK_CREATE_RULESET_VERSION)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, landlock_create_ruleset(NULL, 0, + LANDLOCK_CREATE_RULESET_VERSION | 1 << 31)); + ASSERT_EQ(EINVAL, errno); +} + +TEST(inval_create_ruleset_flags) { + const int last_flag = LANDLOCK_CREATE_RULESET_VERSION; + const int invalid_flag = last_flag << 1; + const struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE, + }; + + ASSERT_EQ(-1, landlock_create_ruleset(NULL, 0, invalid_flag)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0, invalid_flag)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, landlock_create_ruleset(NULL, sizeof(ruleset_attr), + invalid_flag)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), invalid_flag)); + ASSERT_EQ(EINVAL, errno); +} + +TEST(empty_path_beneath_attr) { + const struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = LANDLOCK_ACCESS_FS_EXECUTE, + }; + const int ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + + ASSERT_LE(0, ruleset_fd); + + /* Similar to struct landlock_path_beneath_attr.parent_fd = 0 */ + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + NULL, 0)); + ASSERT_EQ(EFAULT, errno); + ASSERT_EQ(0, close(ruleset_fd)); +} + +TEST(inval_fd_enforce) { + ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); + + ASSERT_EQ(-1, landlock_restrict_self(-1, 0)); + ASSERT_EQ(EBADF, errno); +} + +TEST(unpriv_enforce_without_no_new_privs) { + int err; + + drop_caps(_metadata); + err = landlock_restrict_self(-1, 0); + ASSERT_EQ(EPERM, errno); + ASSERT_EQ(err, -1); +} + +TEST(ruleset_fd_io) +{ + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE, + }; + int ruleset_fd; + char buf; + + drop_caps(_metadata); + ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + + ASSERT_EQ(-1, write(ruleset_fd, ".", 1)); + ASSERT_EQ(EINVAL, errno); + ASSERT_EQ(-1, read(ruleset_fd, &buf, 1)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(0, close(ruleset_fd)); +} + +/* Tests enforcement of a ruleset FD transferred through a UNIX socket. */ +TEST(ruleset_fd_transfer) +{ + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = LANDLOCK_ACCESS_FS_READ_DIR, + }; + struct landlock_path_beneath_attr path_beneath_attr = { + .allowed_access = LANDLOCK_ACCESS_FS_READ_DIR, + }; + int ruleset_fd_tx, dir_fd; + union { + /* Aligned ancillary data buffer. */ + char buf[CMSG_SPACE(sizeof(ruleset_fd_tx))]; + struct cmsghdr _align; + } cmsg_tx = {}; + char data_tx = '.'; + struct iovec io = { + .iov_base = &data_tx, + .iov_len = sizeof(data_tx), + }; + struct msghdr msg = { + .msg_iov = &io, + .msg_iovlen = 1, + .msg_control = &cmsg_tx.buf, + .msg_controllen = sizeof(cmsg_tx.buf), + }; + struct cmsghdr *cmsg; + int socket_fds[2]; + pid_t child; + int status; + + drop_caps(_metadata); + + /* Creates a test ruleset with a simple rule. */ + ruleset_fd_tx = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd_tx); + path_beneath_attr.parent_fd = open("/tmp", O_PATH | O_NOFOLLOW | + O_DIRECTORY | O_CLOEXEC); + ASSERT_LE(0, path_beneath_attr.parent_fd); + ASSERT_EQ(0, landlock_add_rule(ruleset_fd_tx, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath_attr, 0)); + ASSERT_EQ(0, close(path_beneath_attr.parent_fd)); + + cmsg = CMSG_FIRSTHDR(&msg); + ASSERT_NE(NULL, cmsg); + cmsg->cmsg_len = CMSG_LEN(sizeof(ruleset_fd_tx)); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + memcpy(CMSG_DATA(cmsg), &ruleset_fd_tx, sizeof(ruleset_fd_tx)); + + /* Sends the ruleset FD over a socketpair and then close it. */ + ASSERT_EQ(0, socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, socket_fds)); + ASSERT_EQ(sizeof(data_tx), sendmsg(socket_fds[0], &msg, 0)); + ASSERT_EQ(0, close(socket_fds[0])); + ASSERT_EQ(0, close(ruleset_fd_tx)); + + child = fork(); + ASSERT_LE(0, child); + if (child == 0) { + int ruleset_fd_rx; + + *(char *)msg.msg_iov->iov_base = '\0'; + ASSERT_EQ(sizeof(data_tx), recvmsg(socket_fds[1], &msg, MSG_CMSG_CLOEXEC)); + ASSERT_EQ('.', *(char *)msg.msg_iov->iov_base); + ASSERT_EQ(0, close(socket_fds[1])); + cmsg = CMSG_FIRSTHDR(&msg); + ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(ruleset_fd_tx))); + memcpy(&ruleset_fd_rx, CMSG_DATA(cmsg), sizeof(ruleset_fd_tx)); + + /* Enforces the received ruleset on the child. */ + ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); + ASSERT_EQ(0, landlock_restrict_self(ruleset_fd_rx, 0)); + ASSERT_EQ(0, close(ruleset_fd_rx)); + + /* Checks that the ruleset enforcement. */ + ASSERT_EQ(-1, open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC)); + ASSERT_EQ(EACCES, errno); + dir_fd = open("/tmp", O_RDONLY | O_DIRECTORY | O_CLOEXEC); + ASSERT_LE(0, dir_fd); + ASSERT_EQ(0, close(dir_fd)); + _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); + return; + } + + ASSERT_EQ(0, close(socket_fds[1])); + + /* Checks that the parent is unrestricted. */ + dir_fd = open("/", O_RDONLY | O_DIRECTORY | O_CLOEXEC); + ASSERT_LE(0, dir_fd); + ASSERT_EQ(0, close(dir_fd)); + dir_fd = open("/tmp", O_RDONLY | O_DIRECTORY | O_CLOEXEC); + ASSERT_LE(0, dir_fd); + ASSERT_EQ(0, close(dir_fd)); + + ASSERT_EQ(child, waitpid(child, &status, 0)); + ASSERT_EQ(1, WIFEXITED(status)); + ASSERT_EQ(EXIT_SUCCESS, WEXITSTATUS(status)); +} + +TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h new file mode 100644 index 000000000000..20e2a9286d71 --- /dev/null +++ b/tools/testing/selftests/landlock/common.h @@ -0,0 +1,183 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Landlock test helpers + * + * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net> + * Copyright © 2019-2020 ANSSI + * Copyright © 2021 Microsoft Corporation + */ + +#include <errno.h> +#include <linux/landlock.h> +#include <sys/capability.h> +#include <sys/syscall.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "../kselftest_harness.h" + +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +/* + * TEST_F_FORK() is useful when a test drop privileges but the corresponding + * FIXTURE_TEARDOWN() requires them (e.g. to remove files from a directory + * where write actions are denied). For convenience, FIXTURE_TEARDOWN() is + * also called when the test failed, but not when FIXTURE_SETUP() failed. For + * this to be possible, we must not call abort() but instead exit smoothly + * (hence the step print). + */ +#define TEST_F_FORK(fixture_name, test_name) \ + static void fixture_name##_##test_name##_child( \ + struct __test_metadata *_metadata, \ + FIXTURE_DATA(fixture_name) *self, \ + const FIXTURE_VARIANT(fixture_name) *variant); \ + TEST_F(fixture_name, test_name) \ + { \ + int status; \ + const pid_t child = fork(); \ + if (child < 0) \ + abort(); \ + if (child == 0) { \ + _metadata->no_print = 1; \ + fixture_name##_##test_name##_child(_metadata, self, variant); \ + if (_metadata->skip) \ + _exit(255); \ + if (_metadata->passed) \ + _exit(0); \ + _exit(_metadata->step); \ + } \ + if (child != waitpid(child, &status, 0)) \ + abort(); \ + if (WIFSIGNALED(status) || !WIFEXITED(status)) { \ + _metadata->passed = 0; \ + _metadata->step = 1; \ + return; \ + } \ + switch (WEXITSTATUS(status)) { \ + case 0: \ + _metadata->passed = 1; \ + break; \ + case 255: \ + _metadata->passed = 1; \ + _metadata->skip = 1; \ + break; \ + default: \ + _metadata->passed = 0; \ + _metadata->step = WEXITSTATUS(status); \ + break; \ + } \ + } \ + static void fixture_name##_##test_name##_child( \ + struct __test_metadata __attribute__((unused)) *_metadata, \ + FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \ + const FIXTURE_VARIANT(fixture_name) \ + __attribute__((unused)) *variant) + +#ifndef landlock_create_ruleset +static inline int landlock_create_ruleset( + const struct landlock_ruleset_attr *const attr, + const size_t size, const __u32 flags) +{ + return syscall(__NR_landlock_create_ruleset, attr, size, flags); +} +#endif + +#ifndef landlock_add_rule +static inline int landlock_add_rule(const int ruleset_fd, + const enum landlock_rule_type rule_type, + const void *const rule_attr, const __u32 flags) +{ + return syscall(__NR_landlock_add_rule, ruleset_fd, rule_type, + rule_attr, flags); +} +#endif + +#ifndef landlock_restrict_self +static inline int landlock_restrict_self(const int ruleset_fd, + const __u32 flags) +{ + return syscall(__NR_landlock_restrict_self, ruleset_fd, flags); +} +#endif + +static void _init_caps(struct __test_metadata *const _metadata, bool drop_all) +{ + cap_t cap_p; + /* Only these three capabilities are useful for the tests. */ + const cap_value_t caps[] = { + CAP_DAC_OVERRIDE, + CAP_MKNOD, + CAP_SYS_ADMIN, + CAP_SYS_CHROOT, + }; + + cap_p = cap_get_proc(); + EXPECT_NE(NULL, cap_p) { + TH_LOG("Failed to cap_get_proc: %s", strerror(errno)); + } + EXPECT_NE(-1, cap_clear(cap_p)) { + TH_LOG("Failed to cap_clear: %s", strerror(errno)); + } + if (!drop_all) { + EXPECT_NE(-1, cap_set_flag(cap_p, CAP_PERMITTED, + ARRAY_SIZE(caps), caps, CAP_SET)) { + TH_LOG("Failed to cap_set_flag: %s", strerror(errno)); + } + } + EXPECT_NE(-1, cap_set_proc(cap_p)) { + TH_LOG("Failed to cap_set_proc: %s", strerror(errno)); + } + EXPECT_NE(-1, cap_free(cap_p)) { + TH_LOG("Failed to cap_free: %s", strerror(errno)); + } +} + +/* We cannot put such helpers in a library because of kselftest_harness.h . */ +__attribute__((__unused__)) +static void disable_caps(struct __test_metadata *const _metadata) +{ + _init_caps(_metadata, false); +} + +__attribute__((__unused__)) +static void drop_caps(struct __test_metadata *const _metadata) +{ + _init_caps(_metadata, true); +} + +static void _effective_cap(struct __test_metadata *const _metadata, + const cap_value_t caps, const cap_flag_value_t value) +{ + cap_t cap_p; + + cap_p = cap_get_proc(); + EXPECT_NE(NULL, cap_p) { + TH_LOG("Failed to cap_get_proc: %s", strerror(errno)); + } + EXPECT_NE(-1, cap_set_flag(cap_p, CAP_EFFECTIVE, 1, &caps, value)) { + TH_LOG("Failed to cap_set_flag: %s", strerror(errno)); + } + EXPECT_NE(-1, cap_set_proc(cap_p)) { + TH_LOG("Failed to cap_set_proc: %s", strerror(errno)); + } + EXPECT_NE(-1, cap_free(cap_p)) { + TH_LOG("Failed to cap_free: %s", strerror(errno)); + } +} + +__attribute__((__unused__)) +static void set_cap(struct __test_metadata *const _metadata, + const cap_value_t caps) +{ + _effective_cap(_metadata, caps, CAP_SET); +} + +__attribute__((__unused__)) +static void clear_cap(struct __test_metadata *const _metadata, + const cap_value_t caps) +{ + _effective_cap(_metadata, caps, CAP_CLEAR); +} diff --git a/tools/testing/selftests/landlock/config b/tools/testing/selftests/landlock/config new file mode 100644 index 000000000000..0f0a65287bac --- /dev/null +++ b/tools/testing/selftests/landlock/config @@ -0,0 +1,7 @@ +CONFIG_OVERLAY_FS=y +CONFIG_SECURITY_LANDLOCK=y +CONFIG_SECURITY_PATH=y +CONFIG_SECURITY=y +CONFIG_SHMEM=y +CONFIG_TMPFS_XATTR=y +CONFIG_TMPFS=y diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c new file mode 100644 index 000000000000..10c9a1e4ebd9 --- /dev/null +++ b/tools/testing/selftests/landlock/fs_test.c @@ -0,0 +1,2791 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Landlock tests - Filesystem + * + * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net> + * Copyright © 2020 ANSSI + * Copyright © 2020-2021 Microsoft Corporation + */ + +#define _GNU_SOURCE +#include <fcntl.h> +#include <linux/landlock.h> +#include <sched.h> +#include <string.h> +#include <sys/capability.h> +#include <sys/mount.h> +#include <sys/prctl.h> +#include <sys/sendfile.h> +#include <sys/stat.h> +#include <sys/sysmacros.h> +#include <unistd.h> + +#include "common.h" + +#define TMP_DIR "tmp" +#define BINARY_PATH "./true" + +/* Paths (sibling number and depth) */ +static const char dir_s1d1[] = TMP_DIR "/s1d1"; +static const char file1_s1d1[] = TMP_DIR "/s1d1/f1"; +static const char file2_s1d1[] = TMP_DIR "/s1d1/f2"; +static const char dir_s1d2[] = TMP_DIR "/s1d1/s1d2"; +static const char file1_s1d2[] = TMP_DIR "/s1d1/s1d2/f1"; +static const char file2_s1d2[] = TMP_DIR "/s1d1/s1d2/f2"; +static const char dir_s1d3[] = TMP_DIR "/s1d1/s1d2/s1d3"; +static const char file1_s1d3[] = TMP_DIR "/s1d1/s1d2/s1d3/f1"; +static const char file2_s1d3[] = TMP_DIR "/s1d1/s1d2/s1d3/f2"; + +static const char dir_s2d1[] = TMP_DIR "/s2d1"; +static const char file1_s2d1[] = TMP_DIR "/s2d1/f1"; +static const char dir_s2d2[] = TMP_DIR "/s2d1/s2d2"; +static const char file1_s2d2[] = TMP_DIR "/s2d1/s2d2/f1"; +static const char dir_s2d3[] = TMP_DIR "/s2d1/s2d2/s2d3"; +static const char file1_s2d3[] = TMP_DIR "/s2d1/s2d2/s2d3/f1"; +static const char file2_s2d3[] = TMP_DIR "/s2d1/s2d2/s2d3/f2"; + +static const char dir_s3d1[] = TMP_DIR "/s3d1"; +/* dir_s3d2 is a mount point. */ +static const char dir_s3d2[] = TMP_DIR "/s3d1/s3d2"; +static const char dir_s3d3[] = TMP_DIR "/s3d1/s3d2/s3d3"; + +/* + * layout1 hierarchy: + * + * tmp + * ├── s1d1 + * │  ├── f1 + * │  ├── f2 + * │  └── s1d2 + * │  ├── f1 + * │  ├── f2 + * │  └── s1d3 + * │  ├── f1 + * │  └── f2 + * ├── s2d1 + * │  ├── f1 + * │  └── s2d2 + * │  ├── f1 + * │  └── s2d3 + * │  ├── f1 + * │  └── f2 + * └── s3d1 + * └── s3d2 + * └── s3d3 + */ + +static void mkdir_parents(struct __test_metadata *const _metadata, + const char *const path) +{ + char *walker; + const char *parent; + int i, err; + + ASSERT_NE(path[0], '\0'); + walker = strdup(path); + ASSERT_NE(NULL, walker); + parent = walker; + for (i = 1; walker[i]; i++) { + if (walker[i] != '/') + continue; + walker[i] = '\0'; + err = mkdir(parent, 0700); + ASSERT_FALSE(err && errno != EEXIST) { + TH_LOG("Failed to create directory \"%s\": %s", + parent, strerror(errno)); + } + walker[i] = '/'; + } + free(walker); +} + +static void create_directory(struct __test_metadata *const _metadata, + const char *const path) +{ + mkdir_parents(_metadata, path); + ASSERT_EQ(0, mkdir(path, 0700)) { + TH_LOG("Failed to create directory \"%s\": %s", path, + strerror(errno)); + } +} + +static void create_file(struct __test_metadata *const _metadata, + const char *const path) +{ + mkdir_parents(_metadata, path); + ASSERT_EQ(0, mknod(path, S_IFREG | 0700, 0)) { + TH_LOG("Failed to create file \"%s\": %s", path, + strerror(errno)); + } +} + +static int remove_path(const char *const path) +{ + char *walker; + int i, ret, err = 0; + + walker = strdup(path); + if (!walker) { + err = ENOMEM; + goto out; + } + if (unlink(path) && rmdir(path)) { + if (errno != ENOENT) + err = errno; + goto out; + } + for (i = strlen(walker); i > 0; i--) { + if (walker[i] != '/') + continue; + walker[i] = '\0'; + ret = rmdir(walker); + if (ret) { + if (errno != ENOTEMPTY && errno != EBUSY) + err = errno; + goto out; + } + if (strcmp(walker, TMP_DIR) == 0) + goto out; + } + +out: + free(walker); + return err; +} + +static void prepare_layout(struct __test_metadata *const _metadata) +{ + disable_caps(_metadata); + umask(0077); + create_directory(_metadata, TMP_DIR); + + /* + * Do not pollute the rest of the system: creates a private mount point + * for tests relying on pivot_root(2) and move_mount(2). + */ + set_cap(_metadata, CAP_SYS_ADMIN); + ASSERT_EQ(0, unshare(CLONE_NEWNS)); + ASSERT_EQ(0, mount("tmp", TMP_DIR, "tmpfs", 0, "size=4m,mode=700")); + ASSERT_EQ(0, mount(NULL, TMP_DIR, NULL, MS_PRIVATE | MS_REC, NULL)); + clear_cap(_metadata, CAP_SYS_ADMIN); +} + +static void cleanup_layout(struct __test_metadata *const _metadata) +{ + set_cap(_metadata, CAP_SYS_ADMIN); + EXPECT_EQ(0, umount(TMP_DIR)); + clear_cap(_metadata, CAP_SYS_ADMIN); + EXPECT_EQ(0, remove_path(TMP_DIR)); +} + +static void create_layout1(struct __test_metadata *const _metadata) +{ + create_file(_metadata, file1_s1d1); + create_file(_metadata, file1_s1d2); + create_file(_metadata, file1_s1d3); + create_file(_metadata, file2_s1d1); + create_file(_metadata, file2_s1d2); + create_file(_metadata, file2_s1d3); + + create_file(_metadata, file1_s2d1); + create_file(_metadata, file1_s2d2); + create_file(_metadata, file1_s2d3); + create_file(_metadata, file2_s2d3); + + create_directory(_metadata, dir_s3d2); + set_cap(_metadata, CAP_SYS_ADMIN); + ASSERT_EQ(0, mount("tmp", dir_s3d2, "tmpfs", 0, "size=4m,mode=700")); + clear_cap(_metadata, CAP_SYS_ADMIN); + + ASSERT_EQ(0, mkdir(dir_s3d3, 0700)); +} + +static void remove_layout1(struct __test_metadata *const _metadata) +{ + EXPECT_EQ(0, remove_path(file2_s1d3)); + EXPECT_EQ(0, remove_path(file2_s1d2)); + EXPECT_EQ(0, remove_path(file2_s1d1)); + EXPECT_EQ(0, remove_path(file1_s1d3)); + EXPECT_EQ(0, remove_path(file1_s1d2)); + EXPECT_EQ(0, remove_path(file1_s1d1)); + + EXPECT_EQ(0, remove_path(file2_s2d3)); + EXPECT_EQ(0, remove_path(file1_s2d3)); + EXPECT_EQ(0, remove_path(file1_s2d2)); + EXPECT_EQ(0, remove_path(file1_s2d1)); + + EXPECT_EQ(0, remove_path(dir_s3d3)); + set_cap(_metadata, CAP_SYS_ADMIN); + umount(dir_s3d2); + clear_cap(_metadata, CAP_SYS_ADMIN); + EXPECT_EQ(0, remove_path(dir_s3d2)); +} + +FIXTURE(layout1) { +}; + +FIXTURE_SETUP(layout1) +{ + prepare_layout(_metadata); + + create_layout1(_metadata); +} + +FIXTURE_TEARDOWN(layout1) +{ + remove_layout1(_metadata); + + cleanup_layout(_metadata); +} + +/* + * This helper enables to use the ASSERT_* macros and print the line number + * pointing to the test caller. + */ +static int test_open_rel(const int dirfd, const char *const path, const int flags) +{ + int fd; + + /* Works with file and directories. */ + fd = openat(dirfd, path, flags | O_CLOEXEC); + if (fd < 0) + return errno; + /* + * Mixing error codes from close(2) and open(2) should not lead to any + * (access type) confusion for this test. + */ + if (close(fd) != 0) + return errno; + return 0; +} + +static int test_open(const char *const path, const int flags) +{ + return test_open_rel(AT_FDCWD, path, flags); +} + +TEST_F_FORK(layout1, no_restriction) +{ + ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY)); + ASSERT_EQ(0, test_open(file2_s1d1, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY)); + ASSERT_EQ(0, test_open(file2_s1d2, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); + + ASSERT_EQ(0, test_open(dir_s2d1, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s2d1, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s2d2, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s2d3, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s2d3, O_RDONLY)); + + ASSERT_EQ(0, test_open(dir_s3d1, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s3d2, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s3d3, O_RDONLY)); +} + +TEST_F_FORK(layout1, inval) +{ + struct landlock_path_beneath_attr path_beneath = { + .allowed_access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + .parent_fd = -1, + }; + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + }; + int ruleset_fd; + + path_beneath.parent_fd = open(dir_s1d2, O_PATH | O_DIRECTORY | + O_CLOEXEC); + ASSERT_LE(0, path_beneath.parent_fd); + + ruleset_fd = open(dir_s1d1, O_PATH | O_DIRECTORY | O_CLOEXEC); + ASSERT_LE(0, ruleset_fd); + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath, 0)); + /* Returns EBADF because ruleset_fd is not a landlock-ruleset FD. */ + ASSERT_EQ(EBADF, errno); + ASSERT_EQ(0, close(ruleset_fd)); + + ruleset_fd = open(dir_s1d1, O_DIRECTORY | O_CLOEXEC); + ASSERT_LE(0, ruleset_fd); + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath, 0)); + /* Returns EBADFD because ruleset_fd is not a valid ruleset. */ + ASSERT_EQ(EBADFD, errno); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Gets a real ruleset. */ + ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath, 0)); + ASSERT_EQ(0, close(path_beneath.parent_fd)); + + /* Tests without O_PATH. */ + path_beneath.parent_fd = open(dir_s1d2, O_DIRECTORY | O_CLOEXEC); + ASSERT_LE(0, path_beneath.parent_fd); + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath, 0)); + ASSERT_EQ(0, close(path_beneath.parent_fd)); + + /* Tests with a ruleset FD. */ + path_beneath.parent_fd = ruleset_fd; + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath, 0)); + ASSERT_EQ(EBADFD, errno); + + /* Checks unhandled allowed_access. */ + path_beneath.parent_fd = open(dir_s1d2, O_PATH | O_DIRECTORY | + O_CLOEXEC); + ASSERT_LE(0, path_beneath.parent_fd); + + /* Test with legitimate values. */ + path_beneath.allowed_access |= LANDLOCK_ACCESS_FS_EXECUTE; + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath, 0)); + ASSERT_EQ(EINVAL, errno); + path_beneath.allowed_access &= ~LANDLOCK_ACCESS_FS_EXECUTE; + + /* Test with unknown (64-bits) value. */ + path_beneath.allowed_access |= (1ULL << 60); + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath, 0)); + ASSERT_EQ(EINVAL, errno); + path_beneath.allowed_access &= ~(1ULL << 60); + + /* Test with no access. */ + path_beneath.allowed_access = 0; + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath, 0)); + ASSERT_EQ(ENOMSG, errno); + path_beneath.allowed_access &= ~(1ULL << 60); + + ASSERT_EQ(0, close(path_beneath.parent_fd)); + + /* Enforces the ruleset. */ + ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); + ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0)); + + ASSERT_EQ(0, close(ruleset_fd)); +} + +#define ACCESS_FILE ( \ + LANDLOCK_ACCESS_FS_EXECUTE | \ + LANDLOCK_ACCESS_FS_WRITE_FILE | \ + LANDLOCK_ACCESS_FS_READ_FILE) + +#define ACCESS_LAST LANDLOCK_ACCESS_FS_MAKE_SYM + +#define ACCESS_ALL ( \ + ACCESS_FILE | \ + LANDLOCK_ACCESS_FS_READ_DIR | \ + LANDLOCK_ACCESS_FS_REMOVE_DIR | \ + LANDLOCK_ACCESS_FS_REMOVE_FILE | \ + LANDLOCK_ACCESS_FS_MAKE_CHAR | \ + LANDLOCK_ACCESS_FS_MAKE_DIR | \ + LANDLOCK_ACCESS_FS_MAKE_REG | \ + LANDLOCK_ACCESS_FS_MAKE_SOCK | \ + LANDLOCK_ACCESS_FS_MAKE_FIFO | \ + LANDLOCK_ACCESS_FS_MAKE_BLOCK | \ + ACCESS_LAST) + +TEST_F_FORK(layout1, file_access_rights) +{ + __u64 access; + int err; + struct landlock_path_beneath_attr path_beneath = {}; + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = ACCESS_ALL, + }; + const int ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + + ASSERT_LE(0, ruleset_fd); + + /* Tests access rights for files. */ + path_beneath.parent_fd = open(file1_s1d2, O_PATH | O_CLOEXEC); + ASSERT_LE(0, path_beneath.parent_fd); + for (access = 1; access <= ACCESS_LAST; access <<= 1) { + path_beneath.allowed_access = access; + err = landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath, 0); + if ((access | ACCESS_FILE) == ACCESS_FILE) { + ASSERT_EQ(0, err); + } else { + ASSERT_EQ(-1, err); + ASSERT_EQ(EINVAL, errno); + } + } + ASSERT_EQ(0, close(path_beneath.parent_fd)); +} + +static void add_path_beneath(struct __test_metadata *const _metadata, + const int ruleset_fd, const __u64 allowed_access, + const char *const path) +{ + struct landlock_path_beneath_attr path_beneath = { + .allowed_access = allowed_access, + }; + + path_beneath.parent_fd = open(path, O_PATH | O_CLOEXEC); + ASSERT_LE(0, path_beneath.parent_fd) { + TH_LOG("Failed to open directory \"%s\": %s", path, + strerror(errno)); + } + ASSERT_EQ(0, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath, 0)) { + TH_LOG("Failed to update the ruleset with \"%s\": %s", path, + strerror(errno)); + } + ASSERT_EQ(0, close(path_beneath.parent_fd)); +} + +struct rule { + const char *path; + __u64 access; +}; + +#define ACCESS_RO ( \ + LANDLOCK_ACCESS_FS_READ_FILE | \ + LANDLOCK_ACCESS_FS_READ_DIR) + +#define ACCESS_RW ( \ + ACCESS_RO | \ + LANDLOCK_ACCESS_FS_WRITE_FILE) + +static int create_ruleset(struct __test_metadata *const _metadata, + const __u64 handled_access_fs, const struct rule rules[]) +{ + int ruleset_fd, i; + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = handled_access_fs, + }; + + ASSERT_NE(NULL, rules) { + TH_LOG("No rule list"); + } + ASSERT_NE(NULL, rules[0].path) { + TH_LOG("Empty rule list"); + } + + ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd) { + TH_LOG("Failed to create a ruleset: %s", strerror(errno)); + } + + for (i = 0; rules[i].path; i++) { + add_path_beneath(_metadata, ruleset_fd, rules[i].access, + rules[i].path); + } + return ruleset_fd; +} + +static void enforce_ruleset(struct __test_metadata *const _metadata, + const int ruleset_fd) +{ + ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); + ASSERT_EQ(0, landlock_restrict_self(ruleset_fd, 0)) { + TH_LOG("Failed to enforce ruleset: %s", strerror(errno)); + } +} + +TEST_F_FORK(layout1, proc_nsfs) +{ + const struct rule rules[] = { + { + .path = "/dev/null", + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + {} + }; + struct landlock_path_beneath_attr path_beneath; + const int ruleset_fd = create_ruleset(_metadata, rules[0].access | + LANDLOCK_ACCESS_FS_READ_DIR, rules); + + ASSERT_LE(0, ruleset_fd); + ASSERT_EQ(0, test_open("/proc/self/ns/mnt", O_RDONLY)); + + enforce_ruleset(_metadata, ruleset_fd); + + ASSERT_EQ(EACCES, test_open("/", O_RDONLY)); + ASSERT_EQ(EACCES, test_open("/dev", O_RDONLY)); + ASSERT_EQ(0, test_open("/dev/null", O_RDONLY)); + ASSERT_EQ(EACCES, test_open("/dev/full", O_RDONLY)); + + ASSERT_EQ(EACCES, test_open("/proc", O_RDONLY)); + ASSERT_EQ(EACCES, test_open("/proc/self", O_RDONLY)); + ASSERT_EQ(EACCES, test_open("/proc/self/ns", O_RDONLY)); + /* + * Because nsfs is an internal filesystem, /proc/self/ns/mnt is a + * disconnected path. Such path cannot be identified and must then be + * allowed. + */ + ASSERT_EQ(0, test_open("/proc/self/ns/mnt", O_RDONLY)); + + /* + * Checks that it is not possible to add nsfs-like filesystem + * references to a ruleset. + */ + path_beneath.allowed_access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + path_beneath.parent_fd = open("/proc/self/ns/mnt", O_PATH | O_CLOEXEC); + ASSERT_LE(0, path_beneath.parent_fd); + ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH, + &path_beneath, 0)); + ASSERT_EQ(EBADFD, errno); + ASSERT_EQ(0, close(path_beneath.parent_fd)); +} + +TEST_F_FORK(layout1, unpriv) { + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = ACCESS_RO, + }, + {} + }; + int ruleset_fd; + + drop_caps(_metadata); + + ruleset_fd = create_ruleset(_metadata, ACCESS_RO, rules); + ASSERT_LE(0, ruleset_fd); + ASSERT_EQ(-1, landlock_restrict_self(ruleset_fd, 0)); + ASSERT_EQ(EPERM, errno); + + /* enforce_ruleset() calls prctl(no_new_privs). */ + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); +} + +TEST_F_FORK(layout1, effective_access) +{ + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = ACCESS_RO, + }, + { + .path = file1_s2d2, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + char buf; + int reg_fd; + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Tests on a directory. */ + ASSERT_EQ(EACCES, test_open("/", O_RDONLY)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); + + /* Tests on a file. */ + ASSERT_EQ(EACCES, test_open(dir_s2d2, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s2d2, O_RDONLY)); + + /* Checks effective read and write actions. */ + reg_fd = open(file1_s2d2, O_RDWR | O_CLOEXEC); + ASSERT_LE(0, reg_fd); + ASSERT_EQ(1, write(reg_fd, ".", 1)); + ASSERT_LE(0, lseek(reg_fd, 0, SEEK_SET)); + ASSERT_EQ(1, read(reg_fd, &buf, 1)); + ASSERT_EQ('.', buf); + ASSERT_EQ(0, close(reg_fd)); + + /* Just in case, double-checks effective actions. */ + reg_fd = open(file1_s2d2, O_RDONLY | O_CLOEXEC); + ASSERT_LE(0, reg_fd); + ASSERT_EQ(-1, write(reg_fd, &buf, 1)); + ASSERT_EQ(EBADF, errno); + ASSERT_EQ(0, close(reg_fd)); +} + +TEST_F_FORK(layout1, unhandled_access) +{ + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = ACCESS_RO, + }, + {} + }; + /* Here, we only handle read accesses, not write accesses. */ + const int ruleset_fd = create_ruleset(_metadata, ACCESS_RO, rules); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* + * Because the policy does not handle LANDLOCK_ACCESS_FS_WRITE_FILE, + * opening for write-only should be allowed, but not read-write. + */ + ASSERT_EQ(0, test_open(file1_s1d1, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR)); + + ASSERT_EQ(0, test_open(file1_s1d2, O_WRONLY)); + ASSERT_EQ(0, test_open(file1_s1d2, O_RDWR)); +} + +TEST_F_FORK(layout1, ruleset_overlap) +{ + const struct rule rules[] = { + /* These rules should be ORed among them. */ + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_READ_DIR, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks s1d1 hierarchy. */ + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + /* Checks s1d2 hierarchy. */ + ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d2, O_WRONLY)); + ASSERT_EQ(0, test_open(file1_s1d2, O_RDWR)); + ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY)); + + /* Checks s1d3 hierarchy. */ + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d3, O_WRONLY)); + ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR)); + ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY)); +} + +TEST_F_FORK(layout1, non_overlapping_accesses) +{ + const struct rule layer1[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_MAKE_REG, + }, + {} + }; + const struct rule layer2[] = { + { + .path = dir_s1d3, + .access = LANDLOCK_ACCESS_FS_REMOVE_FILE, + }, + {} + }; + int ruleset_fd; + + ASSERT_EQ(0, unlink(file1_s1d1)); + ASSERT_EQ(0, unlink(file1_s1d2)); + + ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_MAKE_REG, + layer1); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(-1, mknod(file1_s1d1, S_IFREG | 0700, 0)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(0, mknod(file1_s1d2, S_IFREG | 0700, 0)); + ASSERT_EQ(0, unlink(file1_s1d2)); + + ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_REMOVE_FILE, + layer2); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Unchanged accesses for file creation. */ + ASSERT_EQ(-1, mknod(file1_s1d1, S_IFREG | 0700, 0)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(0, mknod(file1_s1d2, S_IFREG | 0700, 0)); + + /* Checks file removing. */ + ASSERT_EQ(-1, unlink(file1_s1d2)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(0, unlink(file1_s1d3)); +} + +TEST_F_FORK(layout1, interleaved_masked_accesses) +{ + /* + * Checks overly restrictive rules: + * layer 1: allows R s1d1/s1d2/s1d3/file1 + * layer 2: allows RW s1d1/s1d2/s1d3 + * allows W s1d1/s1d2 + * denies R s1d1/s1d2 + * layer 3: allows R s1d1 + * layer 4: allows R s1d1/s1d2 + * denies W s1d1/s1d2 + * layer 5: allows R s1d1/s1d2 + * layer 6: allows X ---- + * layer 7: allows W s1d1/s1d2 + * denies R s1d1/s1d2 + */ + const struct rule layer1_read[] = { + /* Allows read access to file1_s1d3 with the first layer. */ + { + .path = file1_s1d3, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + {} + }; + /* First rule with write restrictions. */ + const struct rule layer2_read_write[] = { + /* Start by granting read-write access via its parent directory... */ + { + .path = dir_s1d3, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + /* ...but also denies read access via its grandparent directory. */ + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + {} + }; + const struct rule layer3_read[] = { + /* Allows read access via its great-grandparent directory. */ + { + .path = dir_s1d1, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + {} + }; + const struct rule layer4_read_write[] = { + /* + * Try to confuse the deny access by denying write (but not + * read) access via its grandparent directory. + */ + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + {} + }; + const struct rule layer5_read[] = { + /* + * Try to override layer2's deny read access by explicitly + * allowing read access via file1_s1d3's grandparent. + */ + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + {} + }; + const struct rule layer6_execute[] = { + /* + * Restricts an unrelated file hierarchy with a new access + * (non-overlapping) type. + */ + { + .path = dir_s2d1, + .access = LANDLOCK_ACCESS_FS_EXECUTE, + }, + {} + }; + const struct rule layer7_read_write[] = { + /* + * Finally, denies read access to file1_s1d3 via its + * grandparent. + */ + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + {} + }; + int ruleset_fd; + + ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE, + layer1_read); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks that read access is granted for file1_s1d3 with layer 1. */ + ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR)); + ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY)); + ASSERT_EQ(0, test_open(file2_s1d3, O_WRONLY)); + + ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, layer2_read_write); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks that previous access rights are unchanged with layer 2. */ + ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR)); + ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY)); + ASSERT_EQ(0, test_open(file2_s1d3, O_WRONLY)); + + ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE, + layer3_read); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks that previous access rights are unchanged with layer 3. */ + ASSERT_EQ(0, test_open(file1_s1d3, O_RDWR)); + ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY)); + ASSERT_EQ(0, test_open(file2_s1d3, O_WRONLY)); + + /* This time, denies write access for the file hierarchy. */ + ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, layer4_read_write); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* + * Checks that the only change with layer 4 is that write access is + * denied. + */ + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY)); + + ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE, + layer5_read); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks that previous access rights are unchanged with layer 5. */ + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY)); + + ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_EXECUTE, + layer6_execute); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks that previous access rights are unchanged with layer 6. */ + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY)); + + ruleset_fd = create_ruleset(_metadata, LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, layer7_read_write); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks read access is now denied with layer 7. */ + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file2_s1d3, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(file2_s1d3, O_RDONLY)); +} + +TEST_F_FORK(layout1, inherit_subset) +{ + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_READ_DIR, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + /* Write access is forbidden. */ + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY)); + /* Readdir access is allowed. */ + ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY)); + + /* Write access is forbidden. */ + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY)); + /* Readdir access is allowed. */ + ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY)); + + /* + * Tests shared rule extension: the following rules should not grant + * any new access, only remove some. Once enforced, these rules are + * ANDed with the previous ones. + */ + add_path_beneath(_metadata, ruleset_fd, LANDLOCK_ACCESS_FS_WRITE_FILE, + dir_s1d2); + /* + * According to ruleset_fd, dir_s1d2 should now have the + * LANDLOCK_ACCESS_FS_READ_FILE and LANDLOCK_ACCESS_FS_WRITE_FILE + * access rights (even if this directory is opened a second time). + * However, when enforcing this updated ruleset, the ruleset tied to + * the current process (i.e. its domain) will still only have the + * dir_s1d2 with LANDLOCK_ACCESS_FS_READ_FILE and + * LANDLOCK_ACCESS_FS_READ_DIR accesses, but + * LANDLOCK_ACCESS_FS_WRITE_FILE must not be allowed because it would + * be a privilege escalation. + */ + enforce_ruleset(_metadata, ruleset_fd); + + /* Same tests and results as above. */ + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + /* It is still forbidden to write in file1_s1d2. */ + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY)); + /* Readdir access is still allowed. */ + ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY)); + + /* It is still forbidden to write in file1_s1d3. */ + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY)); + /* Readdir access is still allowed. */ + ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY)); + + /* + * Try to get more privileges by adding new access rights to the parent + * directory: dir_s1d1. + */ + add_path_beneath(_metadata, ruleset_fd, ACCESS_RW, dir_s1d1); + enforce_ruleset(_metadata, ruleset_fd); + + /* Same tests and results as above. */ + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + /* It is still forbidden to write in file1_s1d2. */ + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY)); + /* Readdir access is still allowed. */ + ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY)); + + /* It is still forbidden to write in file1_s1d3. */ + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY)); + /* Readdir access is still allowed. */ + ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY)); + + /* + * Now, dir_s1d3 get a new rule tied to it, only allowing + * LANDLOCK_ACCESS_FS_WRITE_FILE. The (kernel internal) difference is + * that there was no rule tied to it before. + */ + add_path_beneath(_metadata, ruleset_fd, LANDLOCK_ACCESS_FS_WRITE_FILE, + dir_s1d3); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* + * Same tests and results as above, except for open(dir_s1d3) which is + * now denied because the new rule mask the rule previously inherited + * from dir_s1d2. + */ + + /* Same tests and results as above. */ + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + /* It is still forbidden to write in file1_s1d2. */ + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY)); + /* Readdir access is still allowed. */ + ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY)); + + /* It is still forbidden to write in file1_s1d3. */ + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY)); + /* + * Readdir of dir_s1d3 is still allowed because of the OR policy inside + * the same layer. + */ + ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY)); +} + +TEST_F_FORK(layout1, inherit_superset) +{ + const struct rule rules[] = { + { + .path = dir_s1d3, + .access = ACCESS_RO, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + + /* Readdir access is denied for dir_s1d2. */ + ASSERT_EQ(EACCES, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY)); + /* Readdir access is allowed for dir_s1d3. */ + ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY)); + /* File access is allowed for file1_s1d3. */ + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); + + /* Now dir_s1d2, parent of dir_s1d3, gets a new rule tied to it. */ + add_path_beneath(_metadata, ruleset_fd, LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_READ_DIR, dir_s1d2); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Readdir access is still denied for dir_s1d2. */ + ASSERT_EQ(EACCES, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY)); + /* Readdir access is still allowed for dir_s1d3. */ + ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY)); + /* File access is still allowed for file1_s1d3. */ + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); +} + +TEST_F_FORK(layout1, max_layers) +{ + int i, err; + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = ACCESS_RO, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + + ASSERT_LE(0, ruleset_fd); + for (i = 0; i < 64; i++) + enforce_ruleset(_metadata, ruleset_fd); + + for (i = 0; i < 2; i++) { + err = landlock_restrict_self(ruleset_fd, 0); + ASSERT_EQ(-1, err); + ASSERT_EQ(E2BIG, errno); + } + ASSERT_EQ(0, close(ruleset_fd)); +} + +TEST_F_FORK(layout1, empty_or_same_ruleset) +{ + struct landlock_ruleset_attr ruleset_attr = {}; + int ruleset_fd; + + /* Tests empty handled_access_fs. */ + ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + ASSERT_LE(-1, ruleset_fd); + ASSERT_EQ(ENOMSG, errno); + + /* Enforces policy which deny read access to all files. */ + ruleset_attr.handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE; + ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY)); + + /* Nests a policy which deny read access to all directories. */ + ruleset_attr.handled_access_fs = LANDLOCK_ACCESS_FS_READ_DIR; + ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY)); + + /* Enforces a second time with the same ruleset. */ + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); +} + +TEST_F_FORK(layout1, rule_on_mountpoint) +{ + const struct rule rules[] = { + { + .path = dir_s1d1, + .access = ACCESS_RO, + }, + { + /* dir_s3d2 is a mount point. */ + .path = dir_s3d2, + .access = ACCESS_RO, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY)); + + ASSERT_EQ(EACCES, test_open(dir_s2d1, O_RDONLY)); + + ASSERT_EQ(EACCES, test_open(dir_s3d1, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s3d2, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s3d3, O_RDONLY)); +} + +TEST_F_FORK(layout1, rule_over_mountpoint) +{ + const struct rule rules[] = { + { + .path = dir_s1d1, + .access = ACCESS_RO, + }, + { + /* dir_s3d2 is a mount point. */ + .path = dir_s3d1, + .access = ACCESS_RO, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY)); + + ASSERT_EQ(EACCES, test_open(dir_s2d1, O_RDONLY)); + + ASSERT_EQ(0, test_open(dir_s3d1, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s3d2, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s3d3, O_RDONLY)); +} + +/* + * This test verifies that we can apply a landlock rule on the root directory + * (which might require special handling). + */ +TEST_F_FORK(layout1, rule_over_root_allow_then_deny) +{ + struct rule rules[] = { + { + .path = "/", + .access = ACCESS_RO, + }, + {} + }; + int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks allowed access. */ + ASSERT_EQ(0, test_open("/", O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY)); + + rules[0].access = LANDLOCK_ACCESS_FS_READ_FILE; + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks denied access (on a directory). */ + ASSERT_EQ(EACCES, test_open("/", O_RDONLY)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY)); +} + +TEST_F_FORK(layout1, rule_over_root_deny) +{ + const struct rule rules[] = { + { + .path = "/", + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks denied access (on a directory). */ + ASSERT_EQ(EACCES, test_open("/", O_RDONLY)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY)); +} + +TEST_F_FORK(layout1, rule_inside_mount_ns) +{ + const struct rule rules[] = { + { + .path = "s3d3", + .access = ACCESS_RO, + }, + {} + }; + int ruleset_fd; + + set_cap(_metadata, CAP_SYS_ADMIN); + ASSERT_EQ(0, syscall(SYS_pivot_root, dir_s3d2, dir_s3d3)) { + TH_LOG("Failed to pivot root: %s", strerror(errno)); + }; + ASSERT_EQ(0, chdir("/")); + clear_cap(_metadata, CAP_SYS_ADMIN); + + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(0, test_open("s3d3", O_RDONLY)); + ASSERT_EQ(EACCES, test_open("/", O_RDONLY)); +} + +TEST_F_FORK(layout1, mount_and_pivot) +{ + const struct rule rules[] = { + { + .path = dir_s3d2, + .access = ACCESS_RO, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + set_cap(_metadata, CAP_SYS_ADMIN); + ASSERT_EQ(-1, mount(NULL, dir_s3d2, NULL, MS_RDONLY, NULL)); + ASSERT_EQ(EPERM, errno); + ASSERT_EQ(-1, syscall(SYS_pivot_root, dir_s3d2, dir_s3d3)); + ASSERT_EQ(EPERM, errno); + clear_cap(_metadata, CAP_SYS_ADMIN); +} + +TEST_F_FORK(layout1, move_mount) +{ + const struct rule rules[] = { + { + .path = dir_s3d2, + .access = ACCESS_RO, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + + ASSERT_LE(0, ruleset_fd); + + set_cap(_metadata, CAP_SYS_ADMIN); + ASSERT_EQ(0, syscall(SYS_move_mount, AT_FDCWD, dir_s3d2, AT_FDCWD, + dir_s1d2, 0)) { + TH_LOG("Failed to move mount: %s", strerror(errno)); + } + + ASSERT_EQ(0, syscall(SYS_move_mount, AT_FDCWD, dir_s1d2, AT_FDCWD, + dir_s3d2, 0)); + clear_cap(_metadata, CAP_SYS_ADMIN); + + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + set_cap(_metadata, CAP_SYS_ADMIN); + ASSERT_EQ(-1, syscall(SYS_move_mount, AT_FDCWD, dir_s3d2, AT_FDCWD, + dir_s1d2, 0)); + ASSERT_EQ(EPERM, errno); + clear_cap(_metadata, CAP_SYS_ADMIN); +} + +TEST_F_FORK(layout1, release_inodes) +{ + const struct rule rules[] = { + { + .path = dir_s1d1, + .access = ACCESS_RO, + }, + { + .path = dir_s3d2, + .access = ACCESS_RO, + }, + { + .path = dir_s3d3, + .access = ACCESS_RO, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, ACCESS_RW, rules); + + ASSERT_LE(0, ruleset_fd); + /* Unmount a file hierarchy while it is being used by a ruleset. */ + set_cap(_metadata, CAP_SYS_ADMIN); + ASSERT_EQ(0, umount(dir_s3d2)); + clear_cap(_metadata, CAP_SYS_ADMIN); + + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(dir_s3d2, O_RDONLY)); + /* This dir_s3d3 would not be allowed and does not exist anyway. */ + ASSERT_EQ(ENOENT, test_open(dir_s3d3, O_RDONLY)); +} + +enum relative_access { + REL_OPEN, + REL_CHDIR, + REL_CHROOT_ONLY, + REL_CHROOT_CHDIR, +}; + +static void test_relative_path(struct __test_metadata *const _metadata, + const enum relative_access rel) +{ + /* + * Common layer to check that chroot doesn't ignore it (i.e. a chroot + * is not a disconnected root directory). + */ + const struct rule layer1_base[] = { + { + .path = TMP_DIR, + .access = ACCESS_RO, + }, + {} + }; + const struct rule layer2_subs[] = { + { + .path = dir_s1d2, + .access = ACCESS_RO, + }, + { + .path = dir_s2d2, + .access = ACCESS_RO, + }, + {} + }; + int dirfd, ruleset_fd; + + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1_base); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer2_subs); + + ASSERT_LE(0, ruleset_fd); + switch (rel) { + case REL_OPEN: + case REL_CHDIR: + break; + case REL_CHROOT_ONLY: + ASSERT_EQ(0, chdir(dir_s2d2)); + break; + case REL_CHROOT_CHDIR: + ASSERT_EQ(0, chdir(dir_s1d2)); + break; + default: + ASSERT_TRUE(false); + return; + } + + set_cap(_metadata, CAP_SYS_CHROOT); + enforce_ruleset(_metadata, ruleset_fd); + + switch (rel) { + case REL_OPEN: + dirfd = open(dir_s1d2, O_DIRECTORY); + ASSERT_LE(0, dirfd); + break; + case REL_CHDIR: + ASSERT_EQ(0, chdir(dir_s1d2)); + dirfd = AT_FDCWD; + break; + case REL_CHROOT_ONLY: + /* Do chroot into dir_s1d2 (relative to dir_s2d2). */ + ASSERT_EQ(0, chroot("../../s1d1/s1d2")) { + TH_LOG("Failed to chroot: %s", strerror(errno)); + } + dirfd = AT_FDCWD; + break; + case REL_CHROOT_CHDIR: + /* Do chroot into dir_s1d2. */ + ASSERT_EQ(0, chroot(".")) { + TH_LOG("Failed to chroot: %s", strerror(errno)); + } + dirfd = AT_FDCWD; + break; + } + + ASSERT_EQ((rel == REL_CHROOT_CHDIR) ? 0 : EACCES, + test_open_rel(dirfd, "..", O_RDONLY)); + ASSERT_EQ(0, test_open_rel(dirfd, ".", O_RDONLY)); + + if (rel == REL_CHROOT_ONLY) { + /* The current directory is dir_s2d2. */ + ASSERT_EQ(0, test_open_rel(dirfd, "./s2d3", O_RDONLY)); + } else { + /* The current directory is dir_s1d2. */ + ASSERT_EQ(0, test_open_rel(dirfd, "./s1d3", O_RDONLY)); + } + + if (rel == REL_CHROOT_ONLY || rel == REL_CHROOT_CHDIR) { + /* Checks the root dir_s1d2. */ + ASSERT_EQ(0, test_open_rel(dirfd, "/..", O_RDONLY)); + ASSERT_EQ(0, test_open_rel(dirfd, "/", O_RDONLY)); + ASSERT_EQ(0, test_open_rel(dirfd, "/f1", O_RDONLY)); + ASSERT_EQ(0, test_open_rel(dirfd, "/s1d3", O_RDONLY)); + } + + if (rel != REL_CHROOT_CHDIR) { + ASSERT_EQ(EACCES, test_open_rel(dirfd, "../../s1d1", O_RDONLY)); + ASSERT_EQ(0, test_open_rel(dirfd, "../../s1d1/s1d2", O_RDONLY)); + ASSERT_EQ(0, test_open_rel(dirfd, "../../s1d1/s1d2/s1d3", O_RDONLY)); + + ASSERT_EQ(EACCES, test_open_rel(dirfd, "../../s2d1", O_RDONLY)); + ASSERT_EQ(0, test_open_rel(dirfd, "../../s2d1/s2d2", O_RDONLY)); + ASSERT_EQ(0, test_open_rel(dirfd, "../../s2d1/s2d2/s2d3", O_RDONLY)); + } + + if (rel == REL_OPEN) + ASSERT_EQ(0, close(dirfd)); + ASSERT_EQ(0, close(ruleset_fd)); +} + +TEST_F_FORK(layout1, relative_open) +{ + test_relative_path(_metadata, REL_OPEN); +} + +TEST_F_FORK(layout1, relative_chdir) +{ + test_relative_path(_metadata, REL_CHDIR); +} + +TEST_F_FORK(layout1, relative_chroot_only) +{ + test_relative_path(_metadata, REL_CHROOT_ONLY); +} + +TEST_F_FORK(layout1, relative_chroot_chdir) +{ + test_relative_path(_metadata, REL_CHROOT_CHDIR); +} + +static void copy_binary(struct __test_metadata *const _metadata, + const char *const dst_path) +{ + int dst_fd, src_fd; + struct stat statbuf; + + dst_fd = open(dst_path, O_WRONLY | O_TRUNC | O_CLOEXEC); + ASSERT_LE(0, dst_fd) { + TH_LOG("Failed to open \"%s\": %s", dst_path, + strerror(errno)); + } + src_fd = open(BINARY_PATH, O_RDONLY | O_CLOEXEC); + ASSERT_LE(0, src_fd) { + TH_LOG("Failed to open \"" BINARY_PATH "\": %s", + strerror(errno)); + } + ASSERT_EQ(0, fstat(src_fd, &statbuf)); + ASSERT_EQ(statbuf.st_size, sendfile(dst_fd, src_fd, 0, + statbuf.st_size)); + ASSERT_EQ(0, close(src_fd)); + ASSERT_EQ(0, close(dst_fd)); +} + +static void test_execute(struct __test_metadata *const _metadata, + const int err, const char *const path) +{ + int status; + char *const argv[] = {(char *)path, NULL}; + const pid_t child = fork(); + + ASSERT_LE(0, child); + if (child == 0) { + ASSERT_EQ(err ? -1 : 0, execve(path, argv, NULL)) { + TH_LOG("Failed to execute \"%s\": %s", path, + strerror(errno)); + }; + ASSERT_EQ(err, errno); + _exit(_metadata->passed ? 2 : 1); + return; + } + ASSERT_EQ(child, waitpid(child, &status, 0)); + ASSERT_EQ(1, WIFEXITED(status)); + ASSERT_EQ(err ? 2 : 0, WEXITSTATUS(status)) { + TH_LOG("Unexpected return code for \"%s\": %s", path, + strerror(errno)); + }; +} + +TEST_F_FORK(layout1, execute) +{ + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_EXECUTE, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, rules[0].access, + rules); + + ASSERT_LE(0, ruleset_fd); + copy_binary(_metadata, file1_s1d1); + copy_binary(_metadata, file1_s1d2); + copy_binary(_metadata, file1_s1d3); + + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY)); + test_execute(_metadata, EACCES, file1_s1d1); + + ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY)); + test_execute(_metadata, 0, file1_s1d2); + + ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); + test_execute(_metadata, 0, file1_s1d3); +} + +TEST_F_FORK(layout1, link) +{ + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_MAKE_REG, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, rules[0].access, + rules); + + ASSERT_LE(0, ruleset_fd); + + ASSERT_EQ(0, unlink(file1_s1d1)); + ASSERT_EQ(0, unlink(file1_s1d2)); + ASSERT_EQ(0, unlink(file1_s1d3)); + + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(-1, link(file2_s1d1, file1_s1d1)); + ASSERT_EQ(EACCES, errno); + /* Denies linking because of reparenting. */ + ASSERT_EQ(-1, link(file1_s2d1, file1_s1d2)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, link(file2_s1d2, file1_s1d3)); + ASSERT_EQ(EXDEV, errno); + + ASSERT_EQ(0, link(file2_s1d2, file1_s1d2)); + ASSERT_EQ(0, link(file2_s1d3, file1_s1d3)); +} + +TEST_F_FORK(layout1, rename_file) +{ + const struct rule rules[] = { + { + .path = dir_s1d3, + .access = LANDLOCK_ACCESS_FS_REMOVE_FILE, + }, + { + .path = dir_s2d2, + .access = LANDLOCK_ACCESS_FS_REMOVE_FILE, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, rules[0].access, + rules); + + ASSERT_LE(0, ruleset_fd); + + ASSERT_EQ(0, unlink(file1_s1d1)); + ASSERT_EQ(0, unlink(file1_s1d2)); + + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* + * Tries to replace a file, from a directory that allows file removal, + * but to a different directory (which also allows file removal). + */ + ASSERT_EQ(-1, rename(file1_s2d3, file1_s1d3)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d3, AT_FDCWD, file1_s1d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d3, AT_FDCWD, dir_s1d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EXDEV, errno); + + /* + * Tries to replace a file, from a directory that denies file removal, + * to a different directory (which allows file removal). + */ + ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d3)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d1, AT_FDCWD, file1_s1d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d2, AT_FDCWD, file1_s1d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EXDEV, errno); + + /* Exchanges files and directories that partially allow removal. */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d2, AT_FDCWD, file1_s2d1, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d1, AT_FDCWD, dir_s2d2, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + + /* Renames files with different parents. */ + ASSERT_EQ(-1, rename(file1_s2d2, file1_s1d2)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(0, unlink(file1_s1d3)); + ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d3)); + ASSERT_EQ(EXDEV, errno); + + /* Exchanges and renames files with same parent. */ + ASSERT_EQ(0, renameat2(AT_FDCWD, file2_s2d3, AT_FDCWD, file1_s2d3, + RENAME_EXCHANGE)); + ASSERT_EQ(0, rename(file2_s2d3, file1_s2d3)); + + /* Exchanges files and directories with same parent, twice. */ + ASSERT_EQ(0, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s2d3, + RENAME_EXCHANGE)); + ASSERT_EQ(0, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s2d3, + RENAME_EXCHANGE)); +} + +TEST_F_FORK(layout1, rename_dir) +{ + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_REMOVE_DIR, + }, + { + .path = dir_s2d1, + .access = LANDLOCK_ACCESS_FS_REMOVE_DIR, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, rules[0].access, + rules); + + ASSERT_LE(0, ruleset_fd); + + /* Empties dir_s1d3 to allow renaming. */ + ASSERT_EQ(0, unlink(file1_s1d3)); + ASSERT_EQ(0, unlink(file2_s1d3)); + + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Exchanges and renames directory to a different parent. */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d3, AT_FDCWD, dir_s1d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, rename(dir_s2d3, dir_s1d3)); + ASSERT_EQ(EXDEV, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d2, AT_FDCWD, dir_s1d3, + RENAME_EXCHANGE)); + ASSERT_EQ(EXDEV, errno); + + /* + * Exchanges directory to the same parent, which doesn't allow + * directory removal. + */ + ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s1d1, AT_FDCWD, dir_s2d1, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s1d1, AT_FDCWD, dir_s1d2, + RENAME_EXCHANGE)); + ASSERT_EQ(EACCES, errno); + + /* + * Exchanges and renames directory to the same parent, which allows + * directory removal. + */ + ASSERT_EQ(0, renameat2(AT_FDCWD, dir_s1d3, AT_FDCWD, file1_s1d2, + RENAME_EXCHANGE)); + ASSERT_EQ(0, unlink(dir_s1d3)); + ASSERT_EQ(0, mkdir(dir_s1d3, 0700)); + ASSERT_EQ(0, rename(file1_s1d2, dir_s1d3)); + ASSERT_EQ(0, rmdir(dir_s1d3)); +} + +TEST_F_FORK(layout1, remove_dir) +{ + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_REMOVE_DIR, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, rules[0].access, + rules); + + ASSERT_LE(0, ruleset_fd); + + ASSERT_EQ(0, unlink(file1_s1d1)); + ASSERT_EQ(0, unlink(file1_s1d2)); + ASSERT_EQ(0, unlink(file1_s1d3)); + ASSERT_EQ(0, unlink(file2_s1d3)); + + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(0, rmdir(dir_s1d3)); + ASSERT_EQ(0, mkdir(dir_s1d3, 0700)); + ASSERT_EQ(0, unlinkat(AT_FDCWD, dir_s1d3, AT_REMOVEDIR)); + + /* dir_s1d2 itself cannot be removed. */ + ASSERT_EQ(-1, rmdir(dir_s1d2)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, unlinkat(AT_FDCWD, dir_s1d2, AT_REMOVEDIR)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, rmdir(dir_s1d1)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, unlinkat(AT_FDCWD, dir_s1d1, AT_REMOVEDIR)); + ASSERT_EQ(EACCES, errno); +} + +TEST_F_FORK(layout1, remove_file) +{ + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_REMOVE_FILE, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, rules[0].access, + rules); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(-1, unlink(file1_s1d1)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, unlinkat(AT_FDCWD, file1_s1d1, 0)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(0, unlink(file1_s1d2)); + ASSERT_EQ(0, unlinkat(AT_FDCWD, file1_s1d3, 0)); +} + +static void test_make_file(struct __test_metadata *const _metadata, + const __u64 access, const mode_t mode, const dev_t dev) +{ + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = access, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, access, rules); + + ASSERT_LE(0, ruleset_fd); + + ASSERT_EQ(0, unlink(file1_s1d1)); + ASSERT_EQ(0, unlink(file2_s1d1)); + ASSERT_EQ(0, mknod(file2_s1d1, mode | 0400, dev)) { + TH_LOG("Failed to make file \"%s\": %s", + file2_s1d1, strerror(errno)); + }; + + ASSERT_EQ(0, unlink(file1_s1d2)); + ASSERT_EQ(0, unlink(file2_s1d2)); + + ASSERT_EQ(0, unlink(file1_s1d3)); + ASSERT_EQ(0, unlink(file2_s1d3)); + + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(-1, mknod(file1_s1d1, mode | 0400, dev)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, link(file2_s1d1, file1_s1d1)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, rename(file2_s1d1, file1_s1d1)); + ASSERT_EQ(EACCES, errno); + + ASSERT_EQ(0, mknod(file1_s1d2, mode | 0400, dev)) { + TH_LOG("Failed to make file \"%s\": %s", + file1_s1d2, strerror(errno)); + }; + ASSERT_EQ(0, link(file1_s1d2, file2_s1d2)); + ASSERT_EQ(0, unlink(file2_s1d2)); + ASSERT_EQ(0, rename(file1_s1d2, file2_s1d2)); + + ASSERT_EQ(0, mknod(file1_s1d3, mode | 0400, dev)); + ASSERT_EQ(0, link(file1_s1d3, file2_s1d3)); + ASSERT_EQ(0, unlink(file2_s1d3)); + ASSERT_EQ(0, rename(file1_s1d3, file2_s1d3)); +} + +TEST_F_FORK(layout1, make_char) +{ + /* Creates a /dev/null device. */ + set_cap(_metadata, CAP_MKNOD); + test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_CHAR, S_IFCHR, + makedev(1, 3)); +} + +TEST_F_FORK(layout1, make_block) +{ + /* Creates a /dev/loop0 device. */ + set_cap(_metadata, CAP_MKNOD); + test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_BLOCK, S_IFBLK, + makedev(7, 0)); +} + +TEST_F_FORK(layout1, make_reg_1) +{ + test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_REG, S_IFREG, 0); +} + +TEST_F_FORK(layout1, make_reg_2) +{ + test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_REG, 0, 0); +} + +TEST_F_FORK(layout1, make_sock) +{ + test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_SOCK, S_IFSOCK, 0); +} + +TEST_F_FORK(layout1, make_fifo) +{ + test_make_file(_metadata, LANDLOCK_ACCESS_FS_MAKE_FIFO, S_IFIFO, 0); +} + +TEST_F_FORK(layout1, make_sym) +{ + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_MAKE_SYM, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, rules[0].access, + rules); + + ASSERT_LE(0, ruleset_fd); + + ASSERT_EQ(0, unlink(file1_s1d1)); + ASSERT_EQ(0, unlink(file2_s1d1)); + ASSERT_EQ(0, symlink("none", file2_s1d1)); + + ASSERT_EQ(0, unlink(file1_s1d2)); + ASSERT_EQ(0, unlink(file2_s1d2)); + + ASSERT_EQ(0, unlink(file1_s1d3)); + ASSERT_EQ(0, unlink(file2_s1d3)); + + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(-1, symlink("none", file1_s1d1)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, link(file2_s1d1, file1_s1d1)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(-1, rename(file2_s1d1, file1_s1d1)); + ASSERT_EQ(EACCES, errno); + + ASSERT_EQ(0, symlink("none", file1_s1d2)); + ASSERT_EQ(0, link(file1_s1d2, file2_s1d2)); + ASSERT_EQ(0, unlink(file2_s1d2)); + ASSERT_EQ(0, rename(file1_s1d2, file2_s1d2)); + + ASSERT_EQ(0, symlink("none", file1_s1d3)); + ASSERT_EQ(0, link(file1_s1d3, file2_s1d3)); + ASSERT_EQ(0, unlink(file2_s1d3)); + ASSERT_EQ(0, rename(file1_s1d3, file2_s1d3)); +} + +TEST_F_FORK(layout1, make_dir) +{ + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_MAKE_DIR, + }, + {} + }; + const int ruleset_fd = create_ruleset(_metadata, rules[0].access, + rules); + + ASSERT_LE(0, ruleset_fd); + + ASSERT_EQ(0, unlink(file1_s1d1)); + ASSERT_EQ(0, unlink(file1_s1d2)); + ASSERT_EQ(0, unlink(file1_s1d3)); + + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Uses file_* as directory names. */ + ASSERT_EQ(-1, mkdir(file1_s1d1, 0700)); + ASSERT_EQ(EACCES, errno); + ASSERT_EQ(0, mkdir(file1_s1d2, 0700)); + ASSERT_EQ(0, mkdir(file1_s1d3, 0700)); +} + +static int open_proc_fd(struct __test_metadata *const _metadata, const int fd, + const int open_flags) +{ + static const char path_template[] = "/proc/self/fd/%d"; + char procfd_path[sizeof(path_template) + 10]; + const int procfd_path_size = snprintf(procfd_path, sizeof(procfd_path), + path_template, fd); + + ASSERT_LT(procfd_path_size, sizeof(procfd_path)); + return open(procfd_path, open_flags); +} + +TEST_F_FORK(layout1, proc_unlinked_file) +{ + const struct rule rules[] = { + { + .path = file1_s1d2, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + {} + }; + int reg_fd, proc_fd; + const int ruleset_fd = create_ruleset(_metadata, + LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, rules); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDWR)); + ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY)); + reg_fd = open(file1_s1d2, O_RDONLY | O_CLOEXEC); + ASSERT_LE(0, reg_fd); + ASSERT_EQ(0, unlink(file1_s1d2)); + + proc_fd = open_proc_fd(_metadata, reg_fd, O_RDONLY | O_CLOEXEC); + ASSERT_LE(0, proc_fd); + ASSERT_EQ(0, close(proc_fd)); + + proc_fd = open_proc_fd(_metadata, reg_fd, O_RDWR | O_CLOEXEC); + ASSERT_EQ(-1, proc_fd) { + TH_LOG("Successfully opened /proc/self/fd/%d: %s", + reg_fd, strerror(errno)); + } + ASSERT_EQ(EACCES, errno); + + ASSERT_EQ(0, close(reg_fd)); +} + +TEST_F_FORK(layout1, proc_pipe) +{ + int proc_fd; + int pipe_fds[2]; + char buf = '\0'; + const struct rule rules[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + {} + }; + /* Limits read and write access to files tied to the filesystem. */ + const int ruleset_fd = create_ruleset(_metadata, rules[0].access, + rules); + + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks enforcement for normal files. */ + ASSERT_EQ(0, test_open(file1_s1d2, O_RDWR)); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDWR)); + + /* Checks access to pipes through FD. */ + ASSERT_EQ(0, pipe2(pipe_fds, O_CLOEXEC)); + ASSERT_EQ(1, write(pipe_fds[1], ".", 1)) { + TH_LOG("Failed to write in pipe: %s", strerror(errno)); + } + ASSERT_EQ(1, read(pipe_fds[0], &buf, 1)); + ASSERT_EQ('.', buf); + + /* Checks write access to pipe through /proc/self/fd . */ + proc_fd = open_proc_fd(_metadata, pipe_fds[1], O_WRONLY | O_CLOEXEC); + ASSERT_LE(0, proc_fd); + ASSERT_EQ(1, write(proc_fd, ".", 1)) { + TH_LOG("Failed to write through /proc/self/fd/%d: %s", + pipe_fds[1], strerror(errno)); + } + ASSERT_EQ(0, close(proc_fd)); + + /* Checks read access to pipe through /proc/self/fd . */ + proc_fd = open_proc_fd(_metadata, pipe_fds[0], O_RDONLY | O_CLOEXEC); + ASSERT_LE(0, proc_fd); + buf = '\0'; + ASSERT_EQ(1, read(proc_fd, &buf, 1)) { + TH_LOG("Failed to read through /proc/self/fd/%d: %s", + pipe_fds[1], strerror(errno)); + } + ASSERT_EQ(0, close(proc_fd)); + + ASSERT_EQ(0, close(pipe_fds[0])); + ASSERT_EQ(0, close(pipe_fds[1])); +} + +FIXTURE(layout1_bind) { +}; + +FIXTURE_SETUP(layout1_bind) +{ + prepare_layout(_metadata); + + create_layout1(_metadata); + + set_cap(_metadata, CAP_SYS_ADMIN); + ASSERT_EQ(0, mount(dir_s1d2, dir_s2d2, NULL, MS_BIND, NULL)); + clear_cap(_metadata, CAP_SYS_ADMIN); +} + +FIXTURE_TEARDOWN(layout1_bind) +{ + set_cap(_metadata, CAP_SYS_ADMIN); + EXPECT_EQ(0, umount(dir_s2d2)); + clear_cap(_metadata, CAP_SYS_ADMIN); + + remove_layout1(_metadata); + + cleanup_layout(_metadata); +} + +static const char bind_dir_s1d3[] = TMP_DIR "/s2d1/s2d2/s1d3"; +static const char bind_file1_s1d3[] = TMP_DIR "/s2d1/s2d2/s1d3/f1"; + +/* + * layout1_bind hierarchy: + * + * tmp + * ├── s1d1 + * │  ├── f1 + * │  ├── f2 + * │  └── s1d2 + * │  ├── f1 + * │  ├── f2 + * │  └── s1d3 + * │  ├── f1 + * │  └── f2 + * ├── s2d1 + * │  ├── f1 + * │  └── s2d2 + * │  ├── f1 + * │  ├── f2 + * │  └── s1d3 + * │  ├── f1 + * │  └── f2 + * └── s3d1 + * └── s3d2 + * └── s3d3 + */ + +TEST_F_FORK(layout1_bind, no_restriction) +{ + ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s1d3, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); + + ASSERT_EQ(0, test_open(dir_s2d1, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s2d1, O_RDONLY)); + ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY)); + ASSERT_EQ(0, test_open(file1_s2d2, O_RDONLY)); + ASSERT_EQ(ENOENT, test_open(dir_s2d3, O_RDONLY)); + ASSERT_EQ(ENOENT, test_open(file1_s2d3, O_RDONLY)); + + ASSERT_EQ(0, test_open(bind_dir_s1d3, O_RDONLY)); + ASSERT_EQ(0, test_open(bind_file1_s1d3, O_RDONLY)); + + ASSERT_EQ(0, test_open(dir_s3d1, O_RDONLY)); +} + +TEST_F_FORK(layout1_bind, same_content_same_file) +{ + /* + * Sets access right on parent directories of both source and + * destination mount points. + */ + const struct rule layer1_parent[] = { + { + .path = dir_s1d1, + .access = ACCESS_RO, + }, + { + .path = dir_s2d1, + .access = ACCESS_RW, + }, + {} + }; + /* + * Sets access rights on the same bind-mounted directories. The result + * should be ACCESS_RW for both directories, but not both hierarchies + * because of the first layer. + */ + const struct rule layer2_mount_point[] = { + { + .path = dir_s1d2, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = dir_s2d2, + .access = ACCESS_RW, + }, + {} + }; + /* Only allow read-access to the s1d3 hierarchies. */ + const struct rule layer3_source[] = { + { + .path = dir_s1d3, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + {} + }; + /* Removes all access rights. */ + const struct rule layer4_destination[] = { + { + .path = bind_file1_s1d3, + .access = LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + {} + }; + int ruleset_fd; + + /* Sets rules for the parent directories. */ + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1_parent); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks source hierarchy. */ + ASSERT_EQ(0, test_open(file1_s1d1, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY)); + ASSERT_EQ(0, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY)); + ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY)); + + /* Checks destination hierarchy. */ + ASSERT_EQ(0, test_open(file1_s2d1, O_RDWR)); + ASSERT_EQ(0, test_open(dir_s2d1, O_RDONLY | O_DIRECTORY)); + + ASSERT_EQ(0, test_open(file1_s2d2, O_RDWR)); + ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY | O_DIRECTORY)); + + /* Sets rules for the mount points. */ + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer2_mount_point); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks source hierarchy. */ + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d1, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(dir_s1d1, O_RDONLY | O_DIRECTORY)); + + ASSERT_EQ(0, test_open(file1_s1d2, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY)); + ASSERT_EQ(0, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY)); + + /* Checks destination hierarchy. */ + ASSERT_EQ(EACCES, test_open(file1_s2d1, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s2d1, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(dir_s2d1, O_RDONLY | O_DIRECTORY)); + + ASSERT_EQ(0, test_open(file1_s2d2, O_RDWR)); + ASSERT_EQ(0, test_open(dir_s2d2, O_RDONLY | O_DIRECTORY)); + ASSERT_EQ(0, test_open(bind_dir_s1d3, O_RDONLY | O_DIRECTORY)); + + /* Sets a (shared) rule only on the source. */ + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer3_source); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks source hierarchy. */ + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d2, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(dir_s1d2, O_RDONLY | O_DIRECTORY)); + + ASSERT_EQ(0, test_open(file1_s1d3, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(dir_s1d3, O_RDONLY | O_DIRECTORY)); + + /* Checks destination hierarchy. */ + ASSERT_EQ(EACCES, test_open(file1_s2d2, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s2d2, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(dir_s2d2, O_RDONLY | O_DIRECTORY)); + + ASSERT_EQ(0, test_open(bind_file1_s1d3, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(bind_file1_s1d3, O_WRONLY)); + ASSERT_EQ(EACCES, test_open(bind_dir_s1d3, O_RDONLY | O_DIRECTORY)); + + /* Sets a (shared) rule only on the destination. */ + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer4_destination); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks source hierarchy. */ + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(file1_s1d3, O_WRONLY)); + + /* Checks destination hierarchy. */ + ASSERT_EQ(EACCES, test_open(bind_file1_s1d3, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(bind_file1_s1d3, O_WRONLY)); +} + +#define LOWER_BASE TMP_DIR "/lower" +#define LOWER_DATA LOWER_BASE "/data" +static const char lower_fl1[] = LOWER_DATA "/fl1"; +static const char lower_dl1[] = LOWER_DATA "/dl1"; +static const char lower_dl1_fl2[] = LOWER_DATA "/dl1/fl2"; +static const char lower_fo1[] = LOWER_DATA "/fo1"; +static const char lower_do1[] = LOWER_DATA "/do1"; +static const char lower_do1_fo2[] = LOWER_DATA "/do1/fo2"; +static const char lower_do1_fl3[] = LOWER_DATA "/do1/fl3"; + +static const char (*lower_base_files[])[] = { + &lower_fl1, + &lower_fo1, + NULL +}; +static const char (*lower_base_directories[])[] = { + &lower_dl1, + &lower_do1, + NULL +}; +static const char (*lower_sub_files[])[] = { + &lower_dl1_fl2, + &lower_do1_fo2, + &lower_do1_fl3, + NULL +}; + +#define UPPER_BASE TMP_DIR "/upper" +#define UPPER_DATA UPPER_BASE "/data" +#define UPPER_WORK UPPER_BASE "/work" +static const char upper_fu1[] = UPPER_DATA "/fu1"; +static const char upper_du1[] = UPPER_DATA "/du1"; +static const char upper_du1_fu2[] = UPPER_DATA "/du1/fu2"; +static const char upper_fo1[] = UPPER_DATA "/fo1"; +static const char upper_do1[] = UPPER_DATA "/do1"; +static const char upper_do1_fo2[] = UPPER_DATA "/do1/fo2"; +static const char upper_do1_fu3[] = UPPER_DATA "/do1/fu3"; + +static const char (*upper_base_files[])[] = { + &upper_fu1, + &upper_fo1, + NULL +}; +static const char (*upper_base_directories[])[] = { + &upper_du1, + &upper_do1, + NULL +}; +static const char (*upper_sub_files[])[] = { + &upper_du1_fu2, + &upper_do1_fo2, + &upper_do1_fu3, + NULL +}; + +#define MERGE_BASE TMP_DIR "/merge" +#define MERGE_DATA MERGE_BASE "/data" +static const char merge_fl1[] = MERGE_DATA "/fl1"; +static const char merge_dl1[] = MERGE_DATA "/dl1"; +static const char merge_dl1_fl2[] = MERGE_DATA "/dl1/fl2"; +static const char merge_fu1[] = MERGE_DATA "/fu1"; +static const char merge_du1[] = MERGE_DATA "/du1"; +static const char merge_du1_fu2[] = MERGE_DATA "/du1/fu2"; +static const char merge_fo1[] = MERGE_DATA "/fo1"; +static const char merge_do1[] = MERGE_DATA "/do1"; +static const char merge_do1_fo2[] = MERGE_DATA "/do1/fo2"; +static const char merge_do1_fl3[] = MERGE_DATA "/do1/fl3"; +static const char merge_do1_fu3[] = MERGE_DATA "/do1/fu3"; + +static const char (*merge_base_files[])[] = { + &merge_fl1, + &merge_fu1, + &merge_fo1, + NULL +}; +static const char (*merge_base_directories[])[] = { + &merge_dl1, + &merge_du1, + &merge_do1, + NULL +}; +static const char (*merge_sub_files[])[] = { + &merge_dl1_fl2, + &merge_du1_fu2, + &merge_do1_fo2, + &merge_do1_fl3, + &merge_do1_fu3, + NULL +}; + +/* + * layout2_overlay hierarchy: + * + * tmp + * ├── lower + * │  └── data + * │  ├── dl1 + * │  │  └── fl2 + * │  ├── do1 + * │  │  ├── fl3 + * │  │  └── fo2 + * │  ├── fl1 + * │  └── fo1 + * ├── merge + * │  └── data + * │  ├── dl1 + * │  │  └── fl2 + * │  ├── do1 + * │  │  ├── fl3 + * │  │  ├── fo2 + * │  │  └── fu3 + * │  ├── du1 + * │  │  └── fu2 + * │  ├── fl1 + * │  ├── fo1 + * │  └── fu1 + * └── upper + * ├── data + * │  ├── do1 + * │  │  ├── fo2 + * │  │  └── fu3 + * │  ├── du1 + * │  │  └── fu2 + * │  ├── fo1 + * │  └── fu1 + * └── work + * └── work + */ + +FIXTURE(layout2_overlay) { +}; + +FIXTURE_SETUP(layout2_overlay) +{ + prepare_layout(_metadata); + + create_directory(_metadata, LOWER_BASE); + set_cap(_metadata, CAP_SYS_ADMIN); + /* Creates tmpfs mount points to get deterministic overlayfs. */ + ASSERT_EQ(0, mount("tmp", LOWER_BASE, "tmpfs", 0, "size=4m,mode=700")); + clear_cap(_metadata, CAP_SYS_ADMIN); + create_file(_metadata, lower_fl1); + create_file(_metadata, lower_dl1_fl2); + create_file(_metadata, lower_fo1); + create_file(_metadata, lower_do1_fo2); + create_file(_metadata, lower_do1_fl3); + + create_directory(_metadata, UPPER_BASE); + set_cap(_metadata, CAP_SYS_ADMIN); + ASSERT_EQ(0, mount("tmp", UPPER_BASE, "tmpfs", 0, "size=4m,mode=700")); + clear_cap(_metadata, CAP_SYS_ADMIN); + create_file(_metadata, upper_fu1); + create_file(_metadata, upper_du1_fu2); + create_file(_metadata, upper_fo1); + create_file(_metadata, upper_do1_fo2); + create_file(_metadata, upper_do1_fu3); + ASSERT_EQ(0, mkdir(UPPER_WORK, 0700)); + + create_directory(_metadata, MERGE_DATA); + set_cap(_metadata, CAP_SYS_ADMIN); + set_cap(_metadata, CAP_DAC_OVERRIDE); + ASSERT_EQ(0, mount("overlay", MERGE_DATA, "overlay", 0, + "lowerdir=" LOWER_DATA + ",upperdir=" UPPER_DATA + ",workdir=" UPPER_WORK)); + clear_cap(_metadata, CAP_DAC_OVERRIDE); + clear_cap(_metadata, CAP_SYS_ADMIN); +} + +FIXTURE_TEARDOWN(layout2_overlay) +{ + EXPECT_EQ(0, remove_path(lower_do1_fl3)); + EXPECT_EQ(0, remove_path(lower_dl1_fl2)); + EXPECT_EQ(0, remove_path(lower_fl1)); + EXPECT_EQ(0, remove_path(lower_do1_fo2)); + EXPECT_EQ(0, remove_path(lower_fo1)); + set_cap(_metadata, CAP_SYS_ADMIN); + EXPECT_EQ(0, umount(LOWER_BASE)); + clear_cap(_metadata, CAP_SYS_ADMIN); + EXPECT_EQ(0, remove_path(LOWER_BASE)); + + EXPECT_EQ(0, remove_path(upper_do1_fu3)); + EXPECT_EQ(0, remove_path(upper_du1_fu2)); + EXPECT_EQ(0, remove_path(upper_fu1)); + EXPECT_EQ(0, remove_path(upper_do1_fo2)); + EXPECT_EQ(0, remove_path(upper_fo1)); + EXPECT_EQ(0, remove_path(UPPER_WORK "/work")); + set_cap(_metadata, CAP_SYS_ADMIN); + EXPECT_EQ(0, umount(UPPER_BASE)); + clear_cap(_metadata, CAP_SYS_ADMIN); + EXPECT_EQ(0, remove_path(UPPER_BASE)); + + set_cap(_metadata, CAP_SYS_ADMIN); + EXPECT_EQ(0, umount(MERGE_DATA)); + clear_cap(_metadata, CAP_SYS_ADMIN); + EXPECT_EQ(0, remove_path(MERGE_DATA)); + + cleanup_layout(_metadata); +} + +TEST_F_FORK(layout2_overlay, no_restriction) +{ + ASSERT_EQ(0, test_open(lower_fl1, O_RDONLY)); + ASSERT_EQ(0, test_open(lower_dl1, O_RDONLY)); + ASSERT_EQ(0, test_open(lower_dl1_fl2, O_RDONLY)); + ASSERT_EQ(0, test_open(lower_fo1, O_RDONLY)); + ASSERT_EQ(0, test_open(lower_do1, O_RDONLY)); + ASSERT_EQ(0, test_open(lower_do1_fo2, O_RDONLY)); + ASSERT_EQ(0, test_open(lower_do1_fl3, O_RDONLY)); + + ASSERT_EQ(0, test_open(upper_fu1, O_RDONLY)); + ASSERT_EQ(0, test_open(upper_du1, O_RDONLY)); + ASSERT_EQ(0, test_open(upper_du1_fu2, O_RDONLY)); + ASSERT_EQ(0, test_open(upper_fo1, O_RDONLY)); + ASSERT_EQ(0, test_open(upper_do1, O_RDONLY)); + ASSERT_EQ(0, test_open(upper_do1_fo2, O_RDONLY)); + ASSERT_EQ(0, test_open(upper_do1_fu3, O_RDONLY)); + + ASSERT_EQ(0, test_open(merge_fl1, O_RDONLY)); + ASSERT_EQ(0, test_open(merge_dl1, O_RDONLY)); + ASSERT_EQ(0, test_open(merge_dl1_fl2, O_RDONLY)); + ASSERT_EQ(0, test_open(merge_fu1, O_RDONLY)); + ASSERT_EQ(0, test_open(merge_du1, O_RDONLY)); + ASSERT_EQ(0, test_open(merge_du1_fu2, O_RDONLY)); + ASSERT_EQ(0, test_open(merge_fo1, O_RDONLY)); + ASSERT_EQ(0, test_open(merge_do1, O_RDONLY)); + ASSERT_EQ(0, test_open(merge_do1_fo2, O_RDONLY)); + ASSERT_EQ(0, test_open(merge_do1_fl3, O_RDONLY)); + ASSERT_EQ(0, test_open(merge_do1_fu3, O_RDONLY)); +} + +#define for_each_path(path_list, path_entry, i) \ + for (i = 0, path_entry = *path_list[i]; path_list[i]; \ + path_entry = *path_list[++i]) + +TEST_F_FORK(layout2_overlay, same_content_different_file) +{ + /* Sets access right on parent directories of both layers. */ + const struct rule layer1_base[] = { + { + .path = LOWER_BASE, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = UPPER_BASE, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = MERGE_BASE, + .access = ACCESS_RW, + }, + {} + }; + const struct rule layer2_data[] = { + { + .path = LOWER_DATA, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = UPPER_DATA, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = MERGE_DATA, + .access = ACCESS_RW, + }, + {} + }; + /* Sets access right on directories inside both layers. */ + const struct rule layer3_subdirs[] = { + { + .path = lower_dl1, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = lower_do1, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = upper_du1, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = upper_do1, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = merge_dl1, + .access = ACCESS_RW, + }, + { + .path = merge_du1, + .access = ACCESS_RW, + }, + { + .path = merge_do1, + .access = ACCESS_RW, + }, + {} + }; + /* Tighten access rights to the files. */ + const struct rule layer4_files[] = { + { + .path = lower_dl1_fl2, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = lower_do1_fo2, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = lower_do1_fl3, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = upper_du1_fu2, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = upper_do1_fo2, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = upper_do1_fu3, + .access = LANDLOCK_ACCESS_FS_READ_FILE, + }, + { + .path = merge_dl1_fl2, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + { + .path = merge_du1_fu2, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + { + .path = merge_do1_fo2, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + { + .path = merge_do1_fl3, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + { + .path = merge_do1_fu3, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + {} + }; + const struct rule layer5_merge_only[] = { + { + .path = MERGE_DATA, + .access = LANDLOCK_ACCESS_FS_READ_FILE | + LANDLOCK_ACCESS_FS_WRITE_FILE, + }, + {} + }; + int ruleset_fd; + size_t i; + const char *path_entry; + + /* Sets rules on base directories (i.e. outside overlay scope). */ + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer1_base); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks lower layer. */ + for_each_path(lower_base_files, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY)); + } + for_each_path(lower_base_directories, path_entry, i) { + ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY | O_DIRECTORY)); + } + for_each_path(lower_sub_files, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY)); + } + /* Checks upper layer. */ + for_each_path(upper_base_files, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY)); + } + for_each_path(upper_base_directories, path_entry, i) { + ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY | O_DIRECTORY)); + } + for_each_path(upper_sub_files, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY)); + } + /* + * Checks that access rights are independent from the lower and upper + * layers: write access to upper files viewed through the merge point + * is still allowed, and write access to lower file viewed (and copied) + * through the merge point is still allowed. + */ + for_each_path(merge_base_files, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDWR)); + } + for_each_path(merge_base_directories, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDONLY | O_DIRECTORY)); + } + for_each_path(merge_sub_files, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDWR)); + } + + /* Sets rules on data directories (i.e. inside overlay scope). */ + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer2_data); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks merge. */ + for_each_path(merge_base_files, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDWR)); + } + for_each_path(merge_base_directories, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDONLY | O_DIRECTORY)); + } + for_each_path(merge_sub_files, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDWR)); + } + + /* Same checks with tighter rules. */ + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer3_subdirs); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks changes for lower layer. */ + for_each_path(lower_base_files, path_entry, i) { + ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY)); + } + /* Checks changes for upper layer. */ + for_each_path(upper_base_files, path_entry, i) { + ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY)); + } + /* Checks all merge accesses. */ + for_each_path(merge_base_files, path_entry, i) { + ASSERT_EQ(EACCES, test_open(path_entry, O_RDWR)); + } + for_each_path(merge_base_directories, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDONLY | O_DIRECTORY)); + } + for_each_path(merge_sub_files, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDWR)); + } + + /* Sets rules directly on overlayed files. */ + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer4_files); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks unchanged accesses on lower layer. */ + for_each_path(lower_sub_files, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY)); + } + /* Checks unchanged accesses on upper layer. */ + for_each_path(upper_sub_files, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDONLY)); + ASSERT_EQ(EACCES, test_open(path_entry, O_WRONLY)); + } + /* Checks all merge accesses. */ + for_each_path(merge_base_files, path_entry, i) { + ASSERT_EQ(EACCES, test_open(path_entry, O_RDWR)); + } + for_each_path(merge_base_directories, path_entry, i) { + ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY | O_DIRECTORY)); + } + for_each_path(merge_sub_files, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDWR)); + } + + /* Only allowes access to the merge hierarchy. */ + ruleset_fd = create_ruleset(_metadata, ACCESS_RW, layer5_merge_only); + ASSERT_LE(0, ruleset_fd); + enforce_ruleset(_metadata, ruleset_fd); + ASSERT_EQ(0, close(ruleset_fd)); + + /* Checks new accesses on lower layer. */ + for_each_path(lower_sub_files, path_entry, i) { + ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY)); + } + /* Checks new accesses on upper layer. */ + for_each_path(upper_sub_files, path_entry, i) { + ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY)); + } + /* Checks all merge accesses. */ + for_each_path(merge_base_files, path_entry, i) { + ASSERT_EQ(EACCES, test_open(path_entry, O_RDWR)); + } + for_each_path(merge_base_directories, path_entry, i) { + ASSERT_EQ(EACCES, test_open(path_entry, O_RDONLY | O_DIRECTORY)); + } + for_each_path(merge_sub_files, path_entry, i) { + ASSERT_EQ(0, test_open(path_entry, O_RDWR)); + } +} + +TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/landlock/ptrace_test.c b/tools/testing/selftests/landlock/ptrace_test.c new file mode 100644 index 000000000000..15fbef9cc849 --- /dev/null +++ b/tools/testing/selftests/landlock/ptrace_test.c @@ -0,0 +1,337 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Landlock tests - Ptrace + * + * Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net> + * Copyright © 2019-2020 ANSSI + */ + +#define _GNU_SOURCE +#include <errno.h> +#include <fcntl.h> +#include <linux/landlock.h> +#include <signal.h> +#include <sys/prctl.h> +#include <sys/ptrace.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "common.h" + +static void create_domain(struct __test_metadata *const _metadata) +{ + int ruleset_fd; + struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = LANDLOCK_ACCESS_FS_MAKE_BLOCK, + }; + + ruleset_fd = landlock_create_ruleset(&ruleset_attr, + sizeof(ruleset_attr), 0); + EXPECT_LE(0, ruleset_fd) { + TH_LOG("Failed to create a ruleset: %s", strerror(errno)); + } + EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); + EXPECT_EQ(0, landlock_restrict_self(ruleset_fd, 0)); + EXPECT_EQ(0, close(ruleset_fd)); +} + +static int test_ptrace_read(const pid_t pid) +{ + static const char path_template[] = "/proc/%d/environ"; + char procenv_path[sizeof(path_template) + 10]; + int procenv_path_size, fd; + + procenv_path_size = snprintf(procenv_path, sizeof(procenv_path), + path_template, pid); + if (procenv_path_size >= sizeof(procenv_path)) + return E2BIG; + + fd = open(procenv_path, O_RDONLY | O_CLOEXEC); + if (fd < 0) + return errno; + /* + * Mixing error codes from close(2) and open(2) should not lead to any + * (access type) confusion for this test. + */ + if (close(fd) != 0) + return errno; + return 0; +} + +FIXTURE(hierarchy) { }; + +FIXTURE_VARIANT(hierarchy) { + const bool domain_both; + const bool domain_parent; + const bool domain_child; +}; + +/* + * Test multiple tracing combinations between a parent process P1 and a child + * process P2. + * + * Yama's scoped ptrace is presumed disabled. If enabled, this optional + * restriction is enforced in addition to any Landlock check, which means that + * all P2 requests to trace P1 would be denied. + */ + +/* + * No domain + * + * P1-. P1 -> P2 : allow + * \ P2 -> P1 : allow + * 'P2 + */ +FIXTURE_VARIANT_ADD(hierarchy, allow_without_domain) { + .domain_both = false, + .domain_parent = false, + .domain_child = false, +}; + +/* + * Child domain + * + * P1--. P1 -> P2 : allow + * \ P2 -> P1 : deny + * .'-----. + * | P2 | + * '------' + */ +FIXTURE_VARIANT_ADD(hierarchy, allow_with_one_domain) { + .domain_both = false, + .domain_parent = false, + .domain_child = true, +}; + +/* + * Parent domain + * .------. + * | P1 --. P1 -> P2 : deny + * '------' \ P2 -> P1 : allow + * ' + * P2 + */ +FIXTURE_VARIANT_ADD(hierarchy, deny_with_parent_domain) { + .domain_both = false, + .domain_parent = true, + .domain_child = false, +}; + +/* + * Parent + child domain (siblings) + * .------. + * | P1 ---. P1 -> P2 : deny + * '------' \ P2 -> P1 : deny + * .---'--. + * | P2 | + * '------' + */ +FIXTURE_VARIANT_ADD(hierarchy, deny_with_sibling_domain) { + .domain_both = false, + .domain_parent = true, + .domain_child = true, +}; + +/* + * Same domain (inherited) + * .-------------. + * | P1----. | P1 -> P2 : allow + * | \ | P2 -> P1 : allow + * | ' | + * | P2 | + * '-------------' + */ +FIXTURE_VARIANT_ADD(hierarchy, allow_sibling_domain) { + .domain_both = true, + .domain_parent = false, + .domain_child = false, +}; + +/* + * Inherited + child domain + * .-----------------. + * | P1----. | P1 -> P2 : allow + * | \ | P2 -> P1 : deny + * | .-'----. | + * | | P2 | | + * | '------' | + * '-----------------' + */ +FIXTURE_VARIANT_ADD(hierarchy, allow_with_nested_domain) { + .domain_both = true, + .domain_parent = false, + .domain_child = true, +}; + +/* + * Inherited + parent domain + * .-----------------. + * |.------. | P1 -> P2 : deny + * || P1 ----. | P2 -> P1 : allow + * |'------' \ | + * | ' | + * | P2 | + * '-----------------' + */ +FIXTURE_VARIANT_ADD(hierarchy, deny_with_nested_and_parent_domain) { + .domain_both = true, + .domain_parent = true, + .domain_child = false, +}; + +/* + * Inherited + parent and child domain (siblings) + * .-----------------. + * | .------. | P1 -> P2 : deny + * | | P1 . | P2 -> P1 : deny + * | '------'\ | + * | \ | + * | .--'---. | + * | | P2 | | + * | '------' | + * '-----------------' + */ +FIXTURE_VARIANT_ADD(hierarchy, deny_with_forked_domain) { + .domain_both = true, + .domain_parent = true, + .domain_child = true, +}; + +FIXTURE_SETUP(hierarchy) +{ } + +FIXTURE_TEARDOWN(hierarchy) +{ } + +/* Test PTRACE_TRACEME and PTRACE_ATTACH for parent and child. */ +TEST_F(hierarchy, trace) +{ + pid_t child, parent; + int status, err_proc_read; + int pipe_child[2], pipe_parent[2]; + char buf_parent; + long ret; + + /* + * Removes all effective and permitted capabilities to not interfere + * with cap_ptrace_access_check() in case of PTRACE_MODE_FSCREDS. + */ + drop_caps(_metadata); + + parent = getpid(); + ASSERT_EQ(0, pipe2(pipe_child, O_CLOEXEC)); + ASSERT_EQ(0, pipe2(pipe_parent, O_CLOEXEC)); + if (variant->domain_both) { + create_domain(_metadata); + if (!_metadata->passed) + /* Aborts before forking. */ + return; + } + + child = fork(); + ASSERT_LE(0, child); + if (child == 0) { + char buf_child; + + ASSERT_EQ(0, close(pipe_parent[1])); + ASSERT_EQ(0, close(pipe_child[0])); + if (variant->domain_child) + create_domain(_metadata); + + /* Waits for the parent to be in a domain, if any. */ + ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1)); + + /* Tests PTRACE_ATTACH and PTRACE_MODE_READ on the parent. */ + err_proc_read = test_ptrace_read(parent); + ret = ptrace(PTRACE_ATTACH, parent, NULL, 0); + if (variant->domain_child) { + EXPECT_EQ(-1, ret); + EXPECT_EQ(EPERM, errno); + EXPECT_EQ(EACCES, err_proc_read); + } else { + EXPECT_EQ(0, ret); + EXPECT_EQ(0, err_proc_read); + } + if (ret == 0) { + ASSERT_EQ(parent, waitpid(parent, &status, 0)); + ASSERT_EQ(1, WIFSTOPPED(status)); + ASSERT_EQ(0, ptrace(PTRACE_DETACH, parent, NULL, 0)); + } + + /* Tests child PTRACE_TRACEME. */ + ret = ptrace(PTRACE_TRACEME); + if (variant->domain_parent) { + EXPECT_EQ(-1, ret); + EXPECT_EQ(EPERM, errno); + } else { + EXPECT_EQ(0, ret); + } + + /* + * Signals that the PTRACE_ATTACH test is done and the + * PTRACE_TRACEME test is ongoing. + */ + ASSERT_EQ(1, write(pipe_child[1], ".", 1)); + + if (!variant->domain_parent) { + ASSERT_EQ(0, raise(SIGSTOP)); + } + + /* Waits for the parent PTRACE_ATTACH test. */ + ASSERT_EQ(1, read(pipe_parent[0], &buf_child, 1)); + _exit(_metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); + return; + } + + ASSERT_EQ(0, close(pipe_child[1])); + ASSERT_EQ(0, close(pipe_parent[0])); + if (variant->domain_parent) + create_domain(_metadata); + + /* Signals that the parent is in a domain, if any. */ + ASSERT_EQ(1, write(pipe_parent[1], ".", 1)); + + /* + * Waits for the child to test PTRACE_ATTACH on the parent and start + * testing PTRACE_TRACEME. + */ + ASSERT_EQ(1, read(pipe_child[0], &buf_parent, 1)); + + /* Tests child PTRACE_TRACEME. */ + if (!variant->domain_parent) { + ASSERT_EQ(child, waitpid(child, &status, 0)); + ASSERT_EQ(1, WIFSTOPPED(status)); + ASSERT_EQ(0, ptrace(PTRACE_DETACH, child, NULL, 0)); + } else { + /* The child should not be traced by the parent. */ + EXPECT_EQ(-1, ptrace(PTRACE_DETACH, child, NULL, 0)); + EXPECT_EQ(ESRCH, errno); + } + + /* Tests PTRACE_ATTACH and PTRACE_MODE_READ on the child. */ + err_proc_read = test_ptrace_read(child); + ret = ptrace(PTRACE_ATTACH, child, NULL, 0); + if (variant->domain_parent) { + EXPECT_EQ(-1, ret); + EXPECT_EQ(EPERM, errno); + EXPECT_EQ(EACCES, err_proc_read); + } else { + EXPECT_EQ(0, ret); + EXPECT_EQ(0, err_proc_read); + } + if (ret == 0) { + ASSERT_EQ(child, waitpid(child, &status, 0)); + ASSERT_EQ(1, WIFSTOPPED(status)); + ASSERT_EQ(0, ptrace(PTRACE_DETACH, child, NULL, 0)); + } + + /* Signals that the parent PTRACE_ATTACH test is done. */ + ASSERT_EQ(1, write(pipe_parent[1], ".", 1)); + ASSERT_EQ(child, waitpid(child, &status, 0)); + if (WIFSIGNALED(status) || !WIFEXITED(status) || + WEXITSTATUS(status) != EXIT_SUCCESS) + _metadata->passed = 0; +} + +TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/landlock/true.c b/tools/testing/selftests/landlock/true.c new file mode 100644 index 000000000000..3f9ccbf52783 --- /dev/null +++ b/tools/testing/selftests/landlock/true.c @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 +int main(void) +{ + return 0; +} diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c index 5a1e85ff5d32..e54106643337 100644 --- a/tools/testing/selftests/mincore/mincore_selftest.c +++ b/tools/testing/selftests/mincore/mincore_selftest.c @@ -14,7 +14,6 @@ #include <sys/mman.h> #include <string.h> #include <fcntl.h> -#include <string.h> #include "../kselftest.h" #include "../kselftest_harness.h" diff --git a/tools/testing/selftests/nci/.gitignore b/tools/testing/selftests/nci/.gitignore new file mode 100644 index 000000000000..448eeb4590fc --- /dev/null +++ b/tools/testing/selftests/nci/.gitignore @@ -0,0 +1 @@ +/nci_dev diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh index 9236609731b1..3c4cb72ed8a4 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh @@ -274,7 +274,7 @@ check_mptcp_disabled() ip netns exec ${disabled_ns} sysctl -q net.mptcp.enabled=0 local err=0 - LANG=C ip netns exec ${disabled_ns} ./mptcp_connect -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \ + LC_ALL=C ip netns exec ${disabled_ns} ./mptcp_connect -p 10000 -s MPTCP 127.0.0.1 < "$cin" 2>&1 | \ grep -q "^socket: Protocol not available$" && err=1 ip netns delete ${disabled_ns} diff --git a/tools/testing/selftests/perf_events/sigtrap_threads.c b/tools/testing/selftests/perf_events/sigtrap_threads.c index 78ddf5e11625..8e83cf91513a 100644 --- a/tools/testing/selftests/perf_events/sigtrap_threads.c +++ b/tools/testing/selftests/perf_events/sigtrap_threads.c @@ -43,7 +43,7 @@ static struct { siginfo_t first_siginfo; /* First observed siginfo_t. */ } ctx; -/* Unique value to check si_perf is correctly set from perf_event_attr::sig_data. */ +/* Unique value to check si_perf_data is correctly set from perf_event_attr::sig_data. */ #define TEST_SIG_DATA(addr) (~(unsigned long)(addr)) static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr) @@ -164,8 +164,8 @@ TEST_F(sigtrap_threads, enable_event) EXPECT_EQ(ctx.signal_count, NUM_THREADS); EXPECT_EQ(ctx.tids_want_signal, 0); EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on); - EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT); - EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on)); + EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT); + EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on)); /* Check enabled for parent. */ ctx.iterate_on = 0; @@ -183,8 +183,8 @@ TEST_F(sigtrap_threads, modify_and_enable_event) EXPECT_EQ(ctx.signal_count, NUM_THREADS); EXPECT_EQ(ctx.tids_want_signal, 0); EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on); - EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT); - EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on)); + EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT); + EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on)); /* Check enabled for parent. */ ctx.iterate_on = 0; @@ -203,8 +203,8 @@ TEST_F(sigtrap_threads, signal_stress) EXPECT_EQ(ctx.signal_count, NUM_THREADS * ctx.iterate_on); EXPECT_EQ(ctx.tids_want_signal, 0); EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on); - EXPECT_EQ(ctx.first_siginfo.si_errno, PERF_TYPE_BREAKPOINT); - EXPECT_EQ(ctx.first_siginfo.si_perf, TEST_SIG_DATA(&ctx.iterate_on)); + EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT); + EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on)); } TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/powerpc/mm/tlbie_test.c b/tools/testing/selftests/powerpc/mm/tlbie_test.c index f85a0938ab25..48344a74b212 100644 --- a/tools/testing/selftests/powerpc/mm/tlbie_test.c +++ b/tools/testing/selftests/powerpc/mm/tlbie_test.c @@ -33,7 +33,6 @@ #include <sched.h> #include <time.h> #include <stdarg.h> -#include <sched.h> #include <pthread.h> #include <signal.h> #include <sys/prctl.h> diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile index 8be8a03d2973..1054e40a499a 100644 --- a/tools/testing/selftests/proc/Makefile +++ b/tools/testing/selftests/proc/Makefile @@ -12,6 +12,7 @@ TEST_GEN_PROGS += proc-self-map-files-001 TEST_GEN_PROGS += proc-self-map-files-002 TEST_GEN_PROGS += proc-self-syscall TEST_GEN_PROGS += proc-self-wchan +TEST_GEN_PROGS += proc-subset-pid TEST_GEN_PROGS += proc-uptime-001 TEST_GEN_PROGS += proc-uptime-002 TEST_GEN_PROGS += read diff --git a/tools/testing/selftests/proc/proc-subset-pid.c b/tools/testing/selftests/proc/proc-subset-pid.c new file mode 100644 index 000000000000..d1052bcab039 --- /dev/null +++ b/tools/testing/selftests/proc/proc-subset-pid.c @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2021 Alexey Dobriyan <adobriyan@gmail.com> + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/* + * Test that "mount -t proc -o subset=pid" hides everything but pids, + * /proc/self and /proc/thread-self. + */ +#undef NDEBUG +#include <assert.h> +#include <errno.h> +#include <sched.h> +#include <stdbool.h> +#include <stdlib.h> +#include <string.h> +#include <sys/mount.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <dirent.h> +#include <unistd.h> +#include <stdio.h> + +static inline bool streq(const char *a, const char *b) +{ + return strcmp(a, b) == 0; +} + +static void make_private_proc(void) +{ + if (unshare(CLONE_NEWNS) == -1) { + if (errno == ENOSYS || errno == EPERM) { + exit(4); + } + exit(1); + } + if (mount(NULL, "/", NULL, MS_PRIVATE|MS_REC, NULL) == -1) { + exit(1); + } + if (mount(NULL, "/proc", "proc", 0, "subset=pid") == -1) { + exit(1); + } +} + +static bool string_is_pid(const char *s) +{ + while (1) { + switch (*s++) { + case '0':case '1':case '2':case '3':case '4': + case '5':case '6':case '7':case '8':case '9': + continue; + + case '\0': + return true; + + default: + return false; + } + } +} + +int main(void) +{ + make_private_proc(); + + DIR *d = opendir("/proc"); + assert(d); + + struct dirent *de; + + bool dot = false; + bool dot_dot = false; + bool self = false; + bool thread_self = false; + + while ((de = readdir(d))) { + if (streq(de->d_name, ".")) { + assert(!dot); + dot = true; + assert(de->d_type == DT_DIR); + } else if (streq(de->d_name, "..")) { + assert(!dot_dot); + dot_dot = true; + assert(de->d_type == DT_DIR); + } else if (streq(de->d_name, "self")) { + assert(!self); + self = true; + assert(de->d_type == DT_LNK); + } else if (streq(de->d_name, "thread-self")) { + assert(!thread_self); + thread_self = true; + assert(de->d_type == DT_LNK); + } else { + if (!string_is_pid(de->d_name)) { + fprintf(stderr, "d_name '%s'\n", de->d_name); + assert(0); + } + assert(de->d_type == DT_DIR); + } + } + + char c; + int rv = readlink("/proc/cpuinfo", &c, 1); + assert(rv == -1 && errno == ENOENT); + + int fd = open("/proc/cpuinfo", O_RDONLY); + assert(fd == -1 && errno == ENOENT); + + return 0; +} diff --git a/tools/testing/selftests/proc/read.c b/tools/testing/selftests/proc/read.c index b3ef9e14d6cc..35ee78dff144 100644 --- a/tools/testing/selftests/proc/read.c +++ b/tools/testing/selftests/proc/read.c @@ -14,7 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ // Test -// 1) read of every file in /proc +// 1) read and lseek on every file in /proc // 2) readlink of every symlink in /proc // 3) recursively (1) + (2) for every directory in /proc // 4) write to /proc/*/clear_refs and /proc/*/task/*/clear_refs @@ -45,6 +45,8 @@ static void f_reg(DIR *d, const char *filename) fd = openat(dirfd(d), filename, O_RDONLY|O_NONBLOCK); if (fd == -1) return; + /* struct proc_ops::proc_lseek is mandatory if file is seekable. */ + (void)lseek(fd, 0, SEEK_SET); rv = read(fd, buf, sizeof(buf)); assert((0 <= rv && rv <= sizeof(buf)) || rv == -1); close(fd); diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 98c3b647f54d..e3d5c77a8612 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -1753,16 +1753,25 @@ TEST_F(TRACE_poke, getpid_runs_normally) # define SYSCALL_RET_SET(_regs, _val) \ do { \ typeof(_val) _result = (_val); \ - /* \ - * A syscall error is signaled by CR0 SO bit \ - * and the code is stored as a positive value. \ - */ \ - if (_result < 0) { \ - SYSCALL_RET(_regs) = -_result; \ - (_regs).ccr |= 0x10000000; \ - } else { \ + if ((_regs.trap & 0xfff0) == 0x3000) { \ + /* \ + * scv 0 system call uses -ve result \ + * for error, so no need to adjust. \ + */ \ SYSCALL_RET(_regs) = _result; \ - (_regs).ccr &= ~0x10000000; \ + } else { \ + /* \ + * A syscall error is signaled by the \ + * CR0 SO bit and the code is stored as \ + * a positive value. \ + */ \ + if (_result < 0) { \ + SYSCALL_RET(_regs) = -_result; \ + (_regs).ccr |= 0x10000000; \ + } else { \ + SYSCALL_RET(_regs) = _result; \ + (_regs).ccr &= ~0x10000000; \ + } \ } \ } while (0) # define SYSCALL_RET_SET_ON_PTRACE_EXIT diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json index 1cda2e11b3ad..773c5027553d 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json +++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json @@ -9,11 +9,11 @@ "setup": [ "$IP link add dev $DUMMY type dummy || /bin/true" ], - "cmdUnderTest": "$TC qdisc add dev $DUMMY root fq_pie flows 65536", - "expExitCode": "2", + "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_pie flows 65536", + "expExitCode": "0", "verifyCmd": "$TC qdisc show dev $DUMMY", - "matchPattern": "qdisc", - "matchCount": "0", + "matchPattern": "qdisc fq_pie 1: root refcnt 2 limit 10240p flows 65536", + "matchCount": "1", "teardown": [ "$IP link del dev $DUMMY" ] diff --git a/tools/testing/selftests/vm/.gitignore b/tools/testing/selftests/vm/.gitignore index 9a35c3f6a557..1f651e85ed60 100644 --- a/tools/testing/selftests/vm/.gitignore +++ b/tools/testing/selftests/vm/.gitignore @@ -22,3 +22,4 @@ map_fixed_noreplace write_to_hugetlbfs hmm-tests local_config.* +split_huge_page_test diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 8b0cd421ebd3..73e1cc96d7c2 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -42,6 +42,7 @@ TEST_GEN_FILES += on-fault-limit TEST_GEN_FILES += thuge-gen TEST_GEN_FILES += transhuge-stress TEST_GEN_FILES += userfaultfd +TEST_GEN_FILES += split_huge_page_test ifeq ($(MACHINE),x86_64) CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh $(CC) ../x86/trivial_32bit_program.c -m32) diff --git a/tools/testing/selftests/vm/gup_test.c b/tools/testing/selftests/vm/gup_test.c index 6c6336dd3b7f..1e662d59c502 100644 --- a/tools/testing/selftests/vm/gup_test.c +++ b/tools/testing/selftests/vm/gup_test.c @@ -13,6 +13,7 @@ /* Just the flags we need, copied from mm.h: */ #define FOLL_WRITE 0x01 /* check pte is writable */ +#define FOLL_TOUCH 0x02 /* mark page accessed */ static char *cmd_to_str(unsigned long cmd) { @@ -37,13 +38,13 @@ int main(int argc, char **argv) { struct gup_test gup = { 0 }; unsigned long size = 128 * MB; - int i, fd, filed, opt, nr_pages = 1, thp = -1, repeats = 1, write = 0; + int i, fd, filed, opt, nr_pages = 1, thp = -1, repeats = 1, write = 1; unsigned long cmd = GUP_FAST_BENCHMARK; - int flags = MAP_PRIVATE; + int flags = MAP_PRIVATE, touch = 0; char *file = "/dev/zero"; char *p; - while ((opt = getopt(argc, argv, "m:r:n:F:f:abctTLUuwSH")) != -1) { + while ((opt = getopt(argc, argv, "m:r:n:F:f:abctTLUuwWSHpz")) != -1) { switch (opt) { case 'a': cmd = PIN_FAST_BENCHMARK; @@ -65,9 +66,13 @@ int main(int argc, char **argv) */ gup.which_pages[0] = 1; break; + case 'p': + /* works only with DUMP_USER_PAGES_TEST */ + gup.test_flags |= GUP_TEST_FLAG_DUMP_PAGES_USE_PIN; + break; case 'F': /* strtol, so you can pass flags in hex form */ - gup.flags = strtol(optarg, 0, 0); + gup.gup_flags = strtol(optarg, 0, 0); break; case 'm': size = atoi(optarg) * MB; @@ -93,6 +98,9 @@ int main(int argc, char **argv) case 'w': write = 1; break; + case 'W': + write = 0; + break; case 'f': file = optarg; break; @@ -103,6 +111,10 @@ int main(int argc, char **argv) case 'H': flags |= (MAP_HUGETLB | MAP_ANONYMOUS); break; + case 'z': + /* fault pages in gup, do not fault in userland */ + touch = 1; + break; default: return -1; } @@ -140,7 +152,7 @@ int main(int argc, char **argv) gup.nr_pages_per_call = nr_pages; if (write) - gup.flags |= FOLL_WRITE; + gup.gup_flags |= FOLL_WRITE; fd = open("/sys/kernel/debug/gup_test", O_RDWR); if (fd == -1) { @@ -160,8 +172,18 @@ int main(int argc, char **argv) else if (thp == 0) madvise(p, size, MADV_NOHUGEPAGE); - for (; (unsigned long)p < gup.addr + size; p += PAGE_SIZE) - p[0] = 0; + /* + * FOLL_TOUCH, in gup_test, is used as an either/or case: either + * fault pages in from the kernel via FOLL_TOUCH, or fault them + * in here, from user space. This allows comparison of performance + * between those two cases. + */ + if (touch) { + gup.gup_flags |= FOLL_TOUCH; + } else { + for (; (unsigned long)p < gup.addr + size; p += PAGE_SIZE) + p[0] = 0; + } /* Only report timing information on the *_BENCHMARK commands: */ if ((cmd == PIN_FAST_BENCHMARK) || (cmd == GUP_FAST_BENCHMARK) || diff --git a/tools/testing/selftests/vm/mremap_dontunmap.c b/tools/testing/selftests/vm/mremap_dontunmap.c index 3a7b5ef0b0c6..f01dc4a85b0b 100644 --- a/tools/testing/selftests/vm/mremap_dontunmap.c +++ b/tools/testing/selftests/vm/mremap_dontunmap.c @@ -127,6 +127,57 @@ static void mremap_dontunmap_simple() "unable to unmap source mapping"); } +// This test validates that MREMAP_DONTUNMAP on a shared mapping works as expected. +static void mremap_dontunmap_simple_shmem() +{ + unsigned long num_pages = 5; + + int mem_fd = memfd_create("memfd", MFD_CLOEXEC); + BUG_ON(mem_fd < 0, "memfd_create"); + + BUG_ON(ftruncate(mem_fd, num_pages * page_size) < 0, + "ftruncate"); + + void *source_mapping = + mmap(NULL, num_pages * page_size, PROT_READ | PROT_WRITE, + MAP_FILE | MAP_SHARED, mem_fd, 0); + BUG_ON(source_mapping == MAP_FAILED, "mmap"); + + BUG_ON(close(mem_fd) < 0, "close"); + + memset(source_mapping, 'a', num_pages * page_size); + + // Try to just move the whole mapping anywhere (not fixed). + void *dest_mapping = + mremap(source_mapping, num_pages * page_size, num_pages * page_size, + MREMAP_DONTUNMAP | MREMAP_MAYMOVE, NULL); + if (dest_mapping == MAP_FAILED && errno == EINVAL) { + // Old kernel which doesn't support MREMAP_DONTUNMAP on shmem. + BUG_ON(munmap(source_mapping, num_pages * page_size) == -1, + "unable to unmap source mapping"); + return; + } + + BUG_ON(dest_mapping == MAP_FAILED, "mremap"); + + // Validate that the pages have been moved, we know they were moved if + // the dest_mapping contains a's. + BUG_ON(check_region_contains_byte + (dest_mapping, num_pages * page_size, 'a') != 0, + "pages did not migrate"); + + // Because the region is backed by shmem, we will actually see the same + // memory at the source location still. + BUG_ON(check_region_contains_byte + (source_mapping, num_pages * page_size, 'a') != 0, + "source should have no ptes"); + + BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1, + "unable to unmap destination mapping"); + BUG_ON(munmap(source_mapping, num_pages * page_size) == -1, + "unable to unmap source mapping"); +} + // This test validates MREMAP_DONTUNMAP will move page tables to a specific // destination using MREMAP_FIXED, also while validating that the source // remains intact. @@ -300,6 +351,7 @@ int main(void) BUG_ON(page_buffer == MAP_FAILED, "unable to mmap a page."); mremap_dontunmap_simple(); + mremap_dontunmap_simple_shmem(); mremap_dontunmap_simple_fixed(); mremap_dontunmap_partial_mapping(); mremap_dontunmap_partial_mapping_overwrite(); diff --git a/tools/testing/selftests/vm/split_huge_page_test.c b/tools/testing/selftests/vm/split_huge_page_test.c new file mode 100644 index 000000000000..1af16d2c2a0a --- /dev/null +++ b/tools/testing/selftests/vm/split_huge_page_test.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * A test of splitting PMD THPs and PTE-mapped THPs from a specified virtual + * address range in a process via <debugfs>/split_huge_pages interface. + */ + +#define _GNU_SOURCE +#include <stdio.h> +#include <stdlib.h> +#include <stdarg.h> +#include <unistd.h> +#include <inttypes.h> +#include <string.h> +#include <fcntl.h> +#include <sys/mman.h> +#include <sys/mount.h> +#include <malloc.h> +#include <stdbool.h> + +uint64_t pagesize; +unsigned int pageshift; +uint64_t pmd_pagesize; + +#define PMD_SIZE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size" +#define SPLIT_DEBUGFS "/sys/kernel/debug/split_huge_pages" +#define SMAP_PATH "/proc/self/smaps" +#define INPUT_MAX 80 + +#define PID_FMT "%d,0x%lx,0x%lx" +#define PATH_FMT "%s,0x%lx,0x%lx" + +#define PFN_MASK ((1UL<<55)-1) +#define KPF_THP (1UL<<22) + +int is_backed_by_thp(char *vaddr, int pagemap_file, int kpageflags_file) +{ + uint64_t paddr; + uint64_t page_flags; + + if (pagemap_file) { + pread(pagemap_file, &paddr, sizeof(paddr), + ((long)vaddr >> pageshift) * sizeof(paddr)); + + if (kpageflags_file) { + pread(kpageflags_file, &page_flags, sizeof(page_flags), + (paddr & PFN_MASK) * sizeof(page_flags)); + + return !!(page_flags & KPF_THP); + } + } + return 0; +} + + +static uint64_t read_pmd_pagesize(void) +{ + int fd; + char buf[20]; + ssize_t num_read; + + fd = open(PMD_SIZE_PATH, O_RDONLY); + if (fd == -1) { + perror("Open hpage_pmd_size failed"); + exit(EXIT_FAILURE); + } + num_read = read(fd, buf, 19); + if (num_read < 1) { + close(fd); + perror("Read hpage_pmd_size failed"); + exit(EXIT_FAILURE); + } + buf[num_read] = '\0'; + close(fd); + + return strtoul(buf, NULL, 10); +} + +static int write_file(const char *path, const char *buf, size_t buflen) +{ + int fd; + ssize_t numwritten; + + fd = open(path, O_WRONLY); + if (fd == -1) + return 0; + + numwritten = write(fd, buf, buflen - 1); + close(fd); + if (numwritten < 1) + return 0; + + return (unsigned int) numwritten; +} + +static void write_debugfs(const char *fmt, ...) +{ + char input[INPUT_MAX]; + int ret; + va_list argp; + + va_start(argp, fmt); + ret = vsnprintf(input, INPUT_MAX, fmt, argp); + va_end(argp); + + if (ret >= INPUT_MAX) { + printf("%s: Debugfs input is too long\n", __func__); + exit(EXIT_FAILURE); + } + + if (!write_file(SPLIT_DEBUGFS, input, ret + 1)) { + perror(SPLIT_DEBUGFS); + exit(EXIT_FAILURE); + } +} + +#define MAX_LINE_LENGTH 500 + +static bool check_for_pattern(FILE *fp, const char *pattern, char *buf) +{ + while (fgets(buf, MAX_LINE_LENGTH, fp) != NULL) { + if (!strncmp(buf, pattern, strlen(pattern))) + return true; + } + return false; +} + +static uint64_t check_huge(void *addr) +{ + uint64_t thp = 0; + int ret; + FILE *fp; + char buffer[MAX_LINE_LENGTH]; + char addr_pattern[MAX_LINE_LENGTH]; + + ret = snprintf(addr_pattern, MAX_LINE_LENGTH, "%08lx-", + (unsigned long) addr); + if (ret >= MAX_LINE_LENGTH) { + printf("%s: Pattern is too long\n", __func__); + exit(EXIT_FAILURE); + } + + + fp = fopen(SMAP_PATH, "r"); + if (!fp) { + printf("%s: Failed to open file %s\n", __func__, SMAP_PATH); + exit(EXIT_FAILURE); + } + if (!check_for_pattern(fp, addr_pattern, buffer)) + goto err_out; + + /* + * Fetch the AnonHugePages: in the same block and check the number of + * hugepages. + */ + if (!check_for_pattern(fp, "AnonHugePages:", buffer)) + goto err_out; + + if (sscanf(buffer, "AnonHugePages:%10ld kB", &thp) != 1) { + printf("Reading smap error\n"); + exit(EXIT_FAILURE); + } + +err_out: + fclose(fp); + return thp; +} + +void split_pmd_thp(void) +{ + char *one_page; + size_t len = 4 * pmd_pagesize; + uint64_t thp_size; + size_t i; + + one_page = memalign(pmd_pagesize, len); + + if (!one_page) { + printf("Fail to allocate memory\n"); + exit(EXIT_FAILURE); + } + + madvise(one_page, len, MADV_HUGEPAGE); + + for (i = 0; i < len; i++) + one_page[i] = (char)i; + + thp_size = check_huge(one_page); + if (!thp_size) { + printf("No THP is allocated\n"); + exit(EXIT_FAILURE); + } + + /* split all THPs */ + write_debugfs(PID_FMT, getpid(), (uint64_t)one_page, + (uint64_t)one_page + len); + + for (i = 0; i < len; i++) + if (one_page[i] != (char)i) { + printf("%ld byte corrupted\n", i); + exit(EXIT_FAILURE); + } + + + thp_size = check_huge(one_page); + if (thp_size) { + printf("Still %ld kB AnonHugePages not split\n", thp_size); + exit(EXIT_FAILURE); + } + + printf("Split huge pages successful\n"); + free(one_page); +} + +void split_pte_mapped_thp(void) +{ + char *one_page, *pte_mapped, *pte_mapped2; + size_t len = 4 * pmd_pagesize; + uint64_t thp_size; + size_t i; + const char *pagemap_template = "/proc/%d/pagemap"; + const char *kpageflags_proc = "/proc/kpageflags"; + char pagemap_proc[255]; + int pagemap_fd; + int kpageflags_fd; + + if (snprintf(pagemap_proc, 255, pagemap_template, getpid()) < 0) { + perror("get pagemap proc error"); + exit(EXIT_FAILURE); + } + pagemap_fd = open(pagemap_proc, O_RDONLY); + + if (pagemap_fd == -1) { + perror("read pagemap:"); + exit(EXIT_FAILURE); + } + + kpageflags_fd = open(kpageflags_proc, O_RDONLY); + + if (kpageflags_fd == -1) { + perror("read kpageflags:"); + exit(EXIT_FAILURE); + } + + one_page = mmap((void *)(1UL << 30), len, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + + madvise(one_page, len, MADV_HUGEPAGE); + + for (i = 0; i < len; i++) + one_page[i] = (char)i; + + thp_size = check_huge(one_page); + if (!thp_size) { + printf("No THP is allocated\n"); + exit(EXIT_FAILURE); + } + + /* remap the first pagesize of first THP */ + pte_mapped = mremap(one_page, pagesize, pagesize, MREMAP_MAYMOVE); + + /* remap the Nth pagesize of Nth THP */ + for (i = 1; i < 4; i++) { + pte_mapped2 = mremap(one_page + pmd_pagesize * i + pagesize * i, + pagesize, pagesize, + MREMAP_MAYMOVE|MREMAP_FIXED, + pte_mapped + pagesize * i); + if (pte_mapped2 == (char *)-1) { + perror("mremap failed"); + exit(EXIT_FAILURE); + } + } + + /* smap does not show THPs after mremap, use kpageflags instead */ + thp_size = 0; + for (i = 0; i < pagesize * 4; i++) + if (i % pagesize == 0 && + is_backed_by_thp(&pte_mapped[i], pagemap_fd, kpageflags_fd)) + thp_size++; + + if (thp_size != 4) { + printf("Some THPs are missing during mremap\n"); + exit(EXIT_FAILURE); + } + + /* split all remapped THPs */ + write_debugfs(PID_FMT, getpid(), (uint64_t)pte_mapped, + (uint64_t)pte_mapped + pagesize * 4); + + /* smap does not show THPs after mremap, use kpageflags instead */ + thp_size = 0; + for (i = 0; i < pagesize * 4; i++) { + if (pte_mapped[i] != (char)i) { + printf("%ld byte corrupted\n", i); + exit(EXIT_FAILURE); + } + if (i % pagesize == 0 && + is_backed_by_thp(&pte_mapped[i], pagemap_fd, kpageflags_fd)) + thp_size++; + } + + if (thp_size) { + printf("Still %ld THPs not split\n", thp_size); + exit(EXIT_FAILURE); + } + + printf("Split PTE-mapped huge pages successful\n"); + munmap(one_page, len); + close(pagemap_fd); + close(kpageflags_fd); +} + +void split_file_backed_thp(void) +{ + int status; + int fd; + ssize_t num_written; + char tmpfs_template[] = "/tmp/thp_split_XXXXXX"; + const char *tmpfs_loc = mkdtemp(tmpfs_template); + char testfile[INPUT_MAX]; + uint64_t pgoff_start = 0, pgoff_end = 1024; + + printf("Please enable pr_debug in split_huge_pages_in_file() if you need more info.\n"); + + status = mount("tmpfs", tmpfs_loc, "tmpfs", 0, "huge=always,size=4m"); + + if (status) { + printf("Unable to create a tmpfs for testing\n"); + exit(EXIT_FAILURE); + } + + status = snprintf(testfile, INPUT_MAX, "%s/thp_file", tmpfs_loc); + if (status >= INPUT_MAX) { + printf("Fail to create file-backed THP split testing file\n"); + goto cleanup; + } + + fd = open(testfile, O_CREAT|O_WRONLY); + if (fd == -1) { + perror("Cannot open testing file\n"); + goto cleanup; + } + + /* write something to the file, so a file-backed THP can be allocated */ + num_written = write(fd, tmpfs_loc, sizeof(tmpfs_loc)); + close(fd); + + if (num_written < 1) { + printf("Fail to write data to testing file\n"); + goto cleanup; + } + + /* split the file-backed THP */ + write_debugfs(PATH_FMT, testfile, pgoff_start, pgoff_end); + + status = unlink(testfile); + if (status) + perror("Cannot remove testing file\n"); + +cleanup: + status = umount(tmpfs_loc); + if (status) { + printf("Unable to umount %s\n", tmpfs_loc); + exit(EXIT_FAILURE); + } + status = rmdir(tmpfs_loc); + if (status) { + perror("cannot remove tmp dir"); + exit(EXIT_FAILURE); + } + + printf("file-backed THP split test done, please check dmesg for more information\n"); +} + +int main(int argc, char **argv) +{ + if (geteuid() != 0) { + printf("Please run the benchmark as root\n"); + exit(EXIT_FAILURE); + } + + pagesize = getpagesize(); + pageshift = ffs(pagesize) - 1; + pmd_pagesize = read_pmd_pagesize(); + + split_pmd_thp(); + split_pte_mapped_thp(); + split_file_backed_thp(); + + return 0; +} diff --git a/tools/testing/selftests/vm/test_vmalloc.sh b/tools/testing/selftests/vm/test_vmalloc.sh index 06d2bb109f06..d73b846736f1 100755 --- a/tools/testing/selftests/vm/test_vmalloc.sh +++ b/tools/testing/selftests/vm/test_vmalloc.sh @@ -11,6 +11,7 @@ TEST_NAME="vmalloc" DRIVER="test_${TEST_NAME}" +NUM_CPUS=`grep -c ^processor /proc/cpuinfo` # 1 if fails exitcode=1 @@ -22,9 +23,9 @@ ksft_skip=4 # Static templates for performance, stressing and smoke tests. # Also it is possible to pass any supported parameters manualy. # -PERF_PARAM="single_cpu_test=1 sequential_test_order=1 test_repeat_count=3" -SMOKE_PARAM="single_cpu_test=1 test_loop_count=10000 test_repeat_count=10" -STRESS_PARAM="test_repeat_count=20" +PERF_PARAM="sequential_test_order=1 test_repeat_count=3" +SMOKE_PARAM="test_loop_count=10000 test_repeat_count=10" +STRESS_PARAM="nr_threads=$NUM_CPUS test_repeat_count=20" check_test_requirements() { @@ -58,8 +59,8 @@ run_perfformance_check() run_stability_check() { - echo "Run stability tests. In order to stress vmalloc subsystem we run" - echo "all available test cases on all available CPUs simultaneously." + echo "Run stability tests. In order to stress vmalloc subsystem all" + echo "available test cases are run by NUM_CPUS workers simultaneously." echo "It will take time, so be patient." modprobe $DRIVER $STRESS_PARAM > /dev/null 2>&1 @@ -92,17 +93,17 @@ usage() echo "# Shows help message" echo "./${DRIVER}.sh" echo - echo "# Runs 1 test(id_1), repeats it 5 times on all online CPUs" - echo "./${DRIVER}.sh run_test_mask=1 test_repeat_count=5" + echo "# Runs 1 test(id_1), repeats it 5 times by NUM_CPUS workers" + echo "./${DRIVER}.sh nr_threads=$NUM_CPUS run_test_mask=1 test_repeat_count=5" echo echo -n "# Runs 4 tests(id_1|id_2|id_4|id_16) on one CPU with " echo "sequential order" - echo -n "./${DRIVER}.sh single_cpu_test=1 sequential_test_order=1 " + echo -n "./${DRIVER}.sh sequential_test_order=1 " echo "run_test_mask=23" echo - echo -n "# Runs all tests on all online CPUs, shuffled order, repeats " + echo -n "# Runs all tests by NUM_CPUS workers, shuffled order, repeats " echo "20 times" - echo "./${DRIVER}.sh test_repeat_count=20" + echo "./${DRIVER}.sh nr_threads=$NUM_CPUS test_repeat_count=20" echo echo "# Performance analysis" echo "./${DRIVER}.sh performance" diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index 92b8ec423201..f5ab5e0312e7 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -81,6 +81,8 @@ static volatile bool test_uffdio_copy_eexist = true; static volatile bool test_uffdio_zeropage_eexist = true; /* Whether to test uffd write-protection */ static bool test_uffdio_wp = false; +/* Whether to test uffd minor faults */ +static bool test_uffdio_minor = false; static bool map_shared; static int huge_fd; @@ -96,6 +98,7 @@ struct uffd_stats { int cpu; unsigned long missing_faults; unsigned long wp_faults; + unsigned long minor_faults; }; /* pthread_mutex_t starts at page offset 0 */ @@ -153,17 +156,19 @@ static void uffd_stats_reset(struct uffd_stats *uffd_stats, uffd_stats[i].cpu = i; uffd_stats[i].missing_faults = 0; uffd_stats[i].wp_faults = 0; + uffd_stats[i].minor_faults = 0; } } static void uffd_stats_report(struct uffd_stats *stats, int n_cpus) { int i; - unsigned long long miss_total = 0, wp_total = 0; + unsigned long long miss_total = 0, wp_total = 0, minor_total = 0; for (i = 0; i < n_cpus; i++) { miss_total += stats[i].missing_faults; wp_total += stats[i].wp_faults; + minor_total += stats[i].minor_faults; } printf("userfaults: %llu missing (", miss_total); @@ -172,6 +177,9 @@ static void uffd_stats_report(struct uffd_stats *stats, int n_cpus) printf("\b), %llu wp (", wp_total); for (i = 0; i < n_cpus; i++) printf("%lu+", stats[i].wp_faults); + printf("\b), %llu minor (", minor_total); + for (i = 0; i < n_cpus; i++) + printf("%lu+", stats[i].minor_faults); printf("\b)\n"); } @@ -328,7 +336,7 @@ static struct uffd_test_ops shmem_uffd_test_ops = { }; static struct uffd_test_ops hugetlb_uffd_test_ops = { - .expected_ioctls = UFFD_API_RANGE_IOCTLS_BASIC, + .expected_ioctls = UFFD_API_RANGE_IOCTLS_BASIC & ~(1 << _UFFDIO_CONTINUE), .allocate_area = hugetlb_allocate_area, .release_pages = hugetlb_release_pages, .alias_mapping = hugetlb_alias_mapping, @@ -362,6 +370,22 @@ static void wp_range(int ufd, __u64 start, __u64 len, bool wp) } } +static void continue_range(int ufd, __u64 start, __u64 len) +{ + struct uffdio_continue req; + + req.range.start = start; + req.range.len = len; + req.mode = 0; + + if (ioctl(ufd, UFFDIO_CONTINUE, &req)) { + fprintf(stderr, + "UFFDIO_CONTINUE failed for address 0x%" PRIx64 "\n", + (uint64_t)start); + exit(1); + } +} + static void *locking_thread(void *arg) { unsigned long cpu = (unsigned long) arg; @@ -569,8 +593,32 @@ static void uffd_handle_page_fault(struct uffd_msg *msg, } if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WP) { + /* Write protect page faults */ wp_range(uffd, msg->arg.pagefault.address, page_size, false); stats->wp_faults++; + } else if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_MINOR) { + uint8_t *area; + int b; + + /* + * Minor page faults + * + * To prove we can modify the original range for testing + * purposes, we're going to bit flip this range before + * continuing. + * + * Note that this requires all minor page fault tests operate on + * area_dst (non-UFFD-registered) and area_dst_alias + * (UFFD-registered). + */ + + area = (uint8_t *)(area_dst + + ((char *)msg->arg.pagefault.address - + area_dst_alias)); + for (b = 0; b < page_size; ++b) + area[b] = ~area[b]; + continue_range(uffd, msg->arg.pagefault.address, page_size); + stats->minor_faults++; } else { /* Missing page faults */ if (bounces & BOUNCE_VERIFY && @@ -779,7 +827,7 @@ static int stress(struct uffd_stats *uffd_stats) return 0; } -static int userfaultfd_open(int features) +static int userfaultfd_open_ext(uint64_t *features) { struct uffdio_api uffdio_api; @@ -792,7 +840,7 @@ static int userfaultfd_open(int features) uffd_flags = fcntl(uffd, F_GETFD, NULL); uffdio_api.api = UFFD_API; - uffdio_api.features = features; + uffdio_api.features = *features; if (ioctl(uffd, UFFDIO_API, &uffdio_api)) { fprintf(stderr, "UFFDIO_API failed.\nPlease make sure to " "run with either root or ptrace capability.\n"); @@ -804,9 +852,15 @@ static int userfaultfd_open(int features) return 1; } + *features = uffdio_api.features; return 0; } +static int userfaultfd_open(uint64_t features) +{ + return userfaultfd_open_ext(&features); +} + sigjmp_buf jbuf, *sigbuf; static void sighndl(int sig, siginfo_t *siginfo, void *ptr) @@ -1112,7 +1166,7 @@ static int userfaultfd_events_test(void) } if (!pid) - return faulting_process(0); + exit(faulting_process(0)); waitpid(pid, &err, 0); if (err) { @@ -1215,6 +1269,102 @@ static int userfaultfd_sig_test(void) return userfaults != 0; } +static int userfaultfd_minor_test(void) +{ + struct uffdio_register uffdio_register; + unsigned long expected_ioctls; + unsigned long p; + pthread_t uffd_mon; + uint8_t expected_byte; + void *expected_page; + char c; + struct uffd_stats stats = { 0 }; + uint64_t features = UFFD_FEATURE_MINOR_HUGETLBFS; + + if (!test_uffdio_minor) + return 0; + + printf("testing minor faults: "); + fflush(stdout); + + if (uffd_test_ops->release_pages(area_dst)) + return 1; + + if (userfaultfd_open_ext(&features)) + return 1; + /* If kernel reports the feature isn't supported, skip the test. */ + if (!(features & UFFD_FEATURE_MINOR_HUGETLBFS)) { + printf("skipping test due to lack of feature support\n"); + fflush(stdout); + return 0; + } + + uffdio_register.range.start = (unsigned long)area_dst_alias; + uffdio_register.range.len = nr_pages * page_size; + uffdio_register.mode = UFFDIO_REGISTER_MODE_MINOR; + if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) { + fprintf(stderr, "register failure\n"); + exit(1); + } + + expected_ioctls = uffd_test_ops->expected_ioctls; + expected_ioctls |= 1 << _UFFDIO_CONTINUE; + if ((uffdio_register.ioctls & expected_ioctls) != expected_ioctls) { + fprintf(stderr, "unexpected missing ioctl(s)\n"); + exit(1); + } + + /* + * After registering with UFFD, populate the non-UFFD-registered side of + * the shared mapping. This should *not* trigger any UFFD minor faults. + */ + for (p = 0; p < nr_pages; ++p) { + memset(area_dst + (p * page_size), p % ((uint8_t)-1), + page_size); + } + + if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, &stats)) { + perror("uffd_poll_thread create"); + exit(1); + } + + /* + * Read each of the pages back using the UFFD-registered mapping. We + * expect that the first time we touch a page, it will result in a minor + * fault. uffd_poll_thread will resolve the fault by bit-flipping the + * page's contents, and then issuing a CONTINUE ioctl. + */ + + if (posix_memalign(&expected_page, page_size, page_size)) { + fprintf(stderr, "out of memory\n"); + return 1; + } + + for (p = 0; p < nr_pages; ++p) { + expected_byte = ~((uint8_t)(p % ((uint8_t)-1))); + memset(expected_page, expected_byte, page_size); + if (my_bcmp(expected_page, area_dst_alias + (p * page_size), + page_size)) { + fprintf(stderr, + "unexpected page contents after minor fault\n"); + exit(1); + } + } + + if (write(pipefd[1], &c, sizeof(c)) != sizeof(c)) { + perror("pipe write"); + exit(1); + } + if (pthread_join(uffd_mon, NULL)) + return 1; + + close(uffd); + + uffd_stats_report(&stats, 1); + + return stats.missing_faults != 0 || stats.minor_faults != nr_pages; +} + static int userfaultfd_stress(void) { void *area; @@ -1413,7 +1563,7 @@ static int userfaultfd_stress(void) close(uffd); return userfaultfd_zeropage_test() || userfaultfd_sig_test() - || userfaultfd_events_test(); + || userfaultfd_events_test() || userfaultfd_minor_test(); } /* @@ -1454,6 +1604,8 @@ static void set_test_type(const char *type) map_shared = true; test_type = TEST_HUGETLB; uffd_test_ops = &hugetlb_uffd_test_ops; + /* Minor faults require shared hugetlb; only enable here. */ + test_uffdio_minor = true; } else if (!strcmp(type, "shmem")) { map_shared = true; test_type = TEST_SHMEM; |