summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-10-01 11:08:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-10-01 11:08:07 -0700
commitb2626f1e3245ddd810b69df86514774d6cb655ee (patch)
tree6bf8c29e62f3c6bfb5269c6f971942ba1766b0ff /tools
parent24f67d82c43c9c594821ee1bc4367a23d89d9f8b (diff)
parent7b0035eaa7dab9fd33d6658ad6a755024bdce26c (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more kvm fixes from Paolo Bonzini: "Small x86 fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: selftests: Ensure all migrations are performed when test is affined KVM: x86: Swap order of CPUID entry "index" vs. "significant flag" checks ptp: Fix ptp_kvm_getcrosststamp issue for x86 ptp_kvm x86/kvmclock: Move this_cpu_pvti into kvmclock.h selftests: KVM: Don't clobber XMM register when read KVM: VMX: Fix a TSX_CTRL_CPUID_CLEAR field mask issue
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h2
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c69
2 files changed, 60 insertions, 11 deletions
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index eba8bd08293e..05e65ca1c30c 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -315,7 +315,7 @@ static inline void set_xmm(int n, unsigned long val)
#define GET_XMM(__xmm) \
({ \
unsigned long __val; \
- asm volatile("movq %%"#__xmm", %0" : "=r"(__val) : : #__xmm); \
+ asm volatile("movq %%"#__xmm", %0" : "=r"(__val)); \
__val; \
})
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index c5e0dd664a7b..4158da0da2bb 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -10,6 +10,7 @@
#include <signal.h>
#include <syscall.h>
#include <sys/ioctl.h>
+#include <sys/sysinfo.h>
#include <asm/barrier.h>
#include <linux/atomic.h>
#include <linux/rseq.h>
@@ -39,6 +40,7 @@ static __thread volatile struct rseq __rseq = {
static pthread_t migration_thread;
static cpu_set_t possible_mask;
+static int min_cpu, max_cpu;
static bool done;
static atomic_t seq_cnt;
@@ -57,20 +59,37 @@ static void sys_rseq(int flags)
TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno));
}
+static int next_cpu(int cpu)
+{
+ /*
+ * Advance to the next CPU, skipping those that weren't in the original
+ * affinity set. Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's
+ * data storage is considered as opaque. Note, if this task is pinned
+ * to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will
+ * burn a lot cycles and the test will take longer than normal to
+ * complete.
+ */
+ do {
+ cpu++;
+ if (cpu > max_cpu) {
+ cpu = min_cpu;
+ TEST_ASSERT(CPU_ISSET(cpu, &possible_mask),
+ "Min CPU = %d must always be usable", cpu);
+ break;
+ }
+ } while (!CPU_ISSET(cpu, &possible_mask));
+
+ return cpu;
+}
+
static void *migration_worker(void *ign)
{
cpu_set_t allowed_mask;
- int r, i, nr_cpus, cpu;
+ int r, i, cpu;
CPU_ZERO(&allowed_mask);
- nr_cpus = CPU_COUNT(&possible_mask);
-
- for (i = 0; i < NR_TASK_MIGRATIONS; i++) {
- cpu = i % nr_cpus;
- if (!CPU_ISSET(cpu, &possible_mask))
- continue;
-
+ for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) {
CPU_SET(cpu, &allowed_mask);
/*
@@ -154,6 +173,36 @@ static void *migration_worker(void *ign)
return NULL;
}
+static int calc_min_max_cpu(void)
+{
+ int i, cnt, nproc;
+
+ if (CPU_COUNT(&possible_mask) < 2)
+ return -EINVAL;
+
+ /*
+ * CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that
+ * this task is affined to in order to reduce the time spent querying
+ * unusable CPUs, e.g. if this task is pinned to a small percentage of
+ * total CPUs.
+ */
+ nproc = get_nprocs_conf();
+ min_cpu = -1;
+ max_cpu = -1;
+ cnt = 0;
+
+ for (i = 0; i < nproc; i++) {
+ if (!CPU_ISSET(i, &possible_mask))
+ continue;
+ if (min_cpu == -1)
+ min_cpu = i;
+ max_cpu = i;
+ cnt++;
+ }
+
+ return (cnt < 2) ? -EINVAL : 0;
+}
+
int main(int argc, char *argv[])
{
int r, i, snapshot;
@@ -167,8 +216,8 @@ int main(int argc, char *argv[])
TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno,
strerror(errno));
- if (CPU_COUNT(&possible_mask) < 2) {
- print_skip("Only one CPU, task migration not possible\n");
+ if (calc_min_max_cpu()) {
+ print_skip("Only one usable CPU, task migration not possible");
exit(KSFT_SKIP);
}