summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/dirty_log_test.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2022-12-06 12:29:06 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2022-12-12 15:54:07 -0500
commit9352e7470a1b4edd2fa9d235420ecc7bc3971bdc (patch)
tree141ccdb777f2ee36764f2067ab43f23da114e08a /tools/testing/selftests/kvm/dirty_log_test.c
parent2afc1fbbdab2aee831561f09f859989dcd5ed648 (diff)
parent5656374b168c98377b6feee8d7500993eebda230 (diff)
Merge remote-tracking branch 'kvm/queue' into HEAD
x86 Xen-for-KVM: * Allow the Xen runstate information to cross a page boundary * Allow XEN_RUNSTATE_UPDATE flag behaviour to be configured * add support for 32-bit guests in SCHEDOP_poll x86 fixes: * One-off fixes for various emulation flows (SGX, VMXON, NRIPS=0). * Reinstate IBPB on emulated VM-Exit that was incorrectly dropped a few years back when eliminating unnecessary barriers when switching between vmcs01 and vmcs02. * Clean up the MSR filter docs. * Clean up vmread_error_trampoline() to make it more obvious that params must be passed on the stack, even for x86-64. * Let userspace set all supported bits in MSR_IA32_FEAT_CTL irrespective of the current guest CPUID. * Fudge around a race with TSC refinement that results in KVM incorrectly thinking a guest needs TSC scaling when running on a CPU with a constant TSC, but no hardware-enumerated TSC frequency. * Advertise (on AMD) that the SMM_CTL MSR is not supported * Remove unnecessary exports Selftests: * Fix an inverted check in the access tracking perf test, and restore support for asserting that there aren't too many idle pages when running on bare metal. * Fix an ordering issue in the AMX test introduced by recent conversions to use kvm_cpu_has(), and harden the code to guard against similar bugs in the future. Anything that tiggers caching of KVM's supported CPUID, kvm_cpu_has() in this case, effectively hides opt-in XSAVE features if the caching occurs before the test opts in via prctl(). * Fix build errors that occur in certain setups (unsure exactly what is unique about the problematic setup) due to glibc overriding static_assert() to a variant that requires a custom message. * Introduce actual atomics for clear/set_bit() in selftests Documentation: * Remove deleted ioctls from documentation * Various fixes
Diffstat (limited to 'tools/testing/selftests/kvm/dirty_log_test.c')
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 9d4c50c4e72e..936f3a8d1b83 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -47,20 +47,20 @@
# define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
# define test_bit_le(nr, addr) \
test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
-# define set_bit_le(nr, addr) \
- set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
-# define clear_bit_le(nr, addr) \
- clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
-# define test_and_set_bit_le(nr, addr) \
- test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
-# define test_and_clear_bit_le(nr, addr) \
- test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
+# define __set_bit_le(nr, addr) \
+ __set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
+# define __clear_bit_le(nr, addr) \
+ __clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
+# define __test_and_set_bit_le(nr, addr) \
+ __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
+# define __test_and_clear_bit_le(nr, addr) \
+ __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
#else
-# define test_bit_le test_bit
-# define set_bit_le set_bit
-# define clear_bit_le clear_bit
-# define test_and_set_bit_le test_and_set_bit
-# define test_and_clear_bit_le test_and_clear_bit
+# define test_bit_le test_bit
+# define __set_bit_le __set_bit
+# define __clear_bit_le __clear_bit
+# define __test_and_set_bit_le __test_and_set_bit
+# define __test_and_clear_bit_le __test_and_clear_bit
#endif
#define TEST_DIRTY_RING_COUNT 65536
@@ -328,7 +328,7 @@ static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
"0x%llx >= 0x%x", cur->offset, num_pages);
//pr_info("fetch 0x%x page %llu\n", *fetch_index, cur->offset);
- set_bit_le(cur->offset, bitmap);
+ __set_bit_le(cur->offset, bitmap);
dirty_ring_last_page = cur->offset;
dirty_gfn_set_collected(cur);
(*fetch_index)++;
@@ -585,7 +585,7 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
value_ptr = host_test_mem + page * host_page_size;
/* If this is a special page that we were tracking... */
- if (test_and_clear_bit_le(page, host_bmap_track)) {
+ if (__test_and_clear_bit_le(page, host_bmap_track)) {
host_track_next_count++;
TEST_ASSERT(test_bit_le(page, bmap),
"Page %"PRIu64" should have its dirty bit "
@@ -593,7 +593,7 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
page);
}
- if (test_and_clear_bit_le(page, bmap)) {
+ if (__test_and_clear_bit_le(page, bmap)) {
bool matched;
host_dirty_count++;
@@ -686,7 +686,7 @@ static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
* should report its dirtyness in the
* next run
*/
- set_bit_le(page, host_bmap_track);
+ __set_bit_le(page, host_bmap_track);
}
}
}