From 515bbc8ab488e4bc16fe26df097502c04d3649d4 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 3 Jul 2019 22:33:54 +0530 Subject: powerpc/pseries: Use macros for referring to the DTL enable mask Introduce macros to encode the DTL enable mask fields and use those instead of hardcoding numbers. Acked-by: Nathan Lynch Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/lpar.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc/platforms/pseries/lpar.c') diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 1034ef1fe2b4..23f2ac6793b7 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -126,7 +126,7 @@ void vpa_init(int cpu) pr_err("WARNING: DTL registration of cpu %d (hw %d) " "failed with %ld\n", smp_processor_id(), hwcpu, ret); - lppaca_of(cpu).dtl_enable_mask = 2; + lppaca_of(cpu).dtl_enable_mask = DTL_LOG_PREEMPT; } } -- cgit From 1c85a2a1945cbafcd2e7cebc6e23d0e206aeda3d Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 3 Jul 2019 22:33:56 +0530 Subject: powerpc/pseries: Factor out DTL buffer allocation and registration routines Introduce new helpers for DTL buffer allocation and registration and have the existing code use those. Signed-off-by: Naveen N. Rao [mpe: Don't split error messages across lines, for grepability] Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/lpar.c | 67 +++++++++++++++++++++++++---------- 1 file changed, 49 insertions(+), 18 deletions(-) (limited to 'arch/powerpc/platforms/pseries/lpar.c') diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 23f2ac6793b7..da7e7c243864 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -65,13 +65,59 @@ EXPORT_SYMBOL(plpar_hcall); EXPORT_SYMBOL(plpar_hcall9); EXPORT_SYMBOL(plpar_hcall_norets); +void alloc_dtl_buffers(void) +{ + int cpu; + struct paca_struct *pp; + struct dtl_entry *dtl; + + for_each_possible_cpu(cpu) { + pp = paca_ptrs[cpu]; + dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); + if (!dtl) { + pr_warn("Failed to allocate dispatch trace log for cpu %d\n", + cpu); + pr_warn("Stolen time statistics will be unreliable\n"); + break; + } + + pp->dtl_ridx = 0; + pp->dispatch_log = dtl; + pp->dispatch_log_end = dtl + N_DISPATCH_LOG; + pp->dtl_curr = dtl; + } +} + +void register_dtl_buffer(int cpu) +{ + long ret; + struct paca_struct *pp; + struct dtl_entry *dtl; + int hwcpu = get_hard_smp_processor_id(cpu); + + pp = paca_ptrs[cpu]; + dtl = pp->dispatch_log; + if (dtl) { + pp->dtl_ridx = 0; + pp->dtl_curr = dtl; + lppaca_of(cpu).dtl_idx = 0; + + /* hypervisor reads buffer length from this field */ + dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES); + ret = register_dtl(hwcpu, __pa(dtl)); + if (ret) + pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n", + cpu, hwcpu, ret); + + lppaca_of(cpu).dtl_enable_mask = DTL_LOG_PREEMPT; + } +} + void vpa_init(int cpu) { int hwcpu = get_hard_smp_processor_id(cpu); unsigned long addr; long ret; - struct paca_struct *pp; - struct dtl_entry *dtl; /* * The spec says it "may be problematic" if CPU x registers the VPA of @@ -112,22 +158,7 @@ void vpa_init(int cpu) /* * Register dispatch trace log, if one has been allocated. */ - pp = paca_ptrs[cpu]; - dtl = pp->dispatch_log; - if (dtl) { - pp->dtl_ridx = 0; - pp->dtl_curr = dtl; - lppaca_of(cpu).dtl_idx = 0; - - /* hypervisor reads buffer length from this field */ - dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES); - ret = register_dtl(hwcpu, __pa(dtl)); - if (ret) - pr_err("WARNING: DTL registration of cpu %d (hw %d) " - "failed with %ld\n", smp_processor_id(), - hwcpu, ret); - lppaca_of(cpu).dtl_enable_mask = DTL_LOG_PREEMPT; - } + register_dtl_buffer(cpu); } #ifdef CONFIG_PPC_BOOK3S_64 -- cgit From 06220d78f24a20549757be1014e57c382406cc92 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 3 Jul 2019 22:33:57 +0530 Subject: powerpc/pseries: Introduce rwlock to gatekeep DTLB usage Since we would be introducing a new user of the DTL buffer in a subsequent patch, we need a way to gatekeep use of the DTL buffer. The current debugfs interface for DTL allows registering and opening cpu-specific DTL buffers. Cpu specific files are exposed under debugfs 'powerpc/dtl/' node, and changing 'dtl_event_mask' in the same directory enables controlling the event mask used when registering DTL buffer for a particular cpu. Subsequently, we will be introducing a user of the DTL buffers that registers access to the DTL buffers across all cpus with the same event mask. To ensure these two users do not step on each other, we introduce a rwlock to gatekeep DTL buffer access. This fits the requirement of the current debugfs interface wanting to allow multiple independent cpu-specific users (read lock), and the subsequent user wanting exclusive access (write lock). Suggested-by: Michael Ellerman Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/lpar.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/powerpc/platforms/pseries/lpar.c') diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index da7e7c243864..7869121ab431 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -113,6 +113,10 @@ void register_dtl_buffer(int cpu) } } +#ifdef CONFIG_PPC_SPLPAR +DEFINE_RWLOCK(dtl_access_lock); +#endif /* CONFIG_PPC_SPLPAR */ + void vpa_init(int cpu) { int hwcpu = get_hard_smp_processor_id(cpu); -- cgit From d62c8deeb6e69cd7815c21171a218301822e4a06 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 3 Jul 2019 22:34:00 +0530 Subject: powerpc/pseries: Provide vcpu dispatch statistics For Shared Processor LPARs, the POWER Hypervisor maintains a relatively static mapping of the LPAR processors (vcpus) to physical processor chips (representing the "home" node) and tries to always dispatch vcpus on their associated physical processor chip. However, under certain scenarios, vcpus may be dispatched on a different processor chip (away from its home node). The actual physical processor number on which a certain vcpu is dispatched is available to the guest in the 'processor_id' field of each DTL entry. The guest can discover the home node of each vcpu through the H_HOME_NODE_ASSOCIATIVITY(flags=1) hcall. The guest can also discover the associativity of physical processors, as represented in the DTL entry, through the H_HOME_NODE_ASSOCIATIVITY(flags=2) hcall. These can then be compared to determine if the vcpu was dispatched on its home node or not. If the vcpu was not dispatched on the home node, it is possible to determine if the vcpu was dispatched in a different chip, socket or drawer. Introduce a procfs file /proc/powerpc/vcpudispatch_stats that can be used to obtain these statistics. Writing '1' to this file enables collecting the statistics, while writing '0' disables the statistics. The statistics themselves are available by reading the procfs file. By default, the DTLB log for each vcpu is processed 50 times a second so as not to miss any entries. This processing frequency can be changed through /proc/powerpc/vcpudispatch_stats_freq. Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/lpar.c | 525 +++++++++++++++++++++++++++++++++- 1 file changed, 523 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/platforms/pseries/lpar.c') diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 7869121ab431..ec5a7893f71b 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -30,6 +30,10 @@ #include #include #include +#include +#include +#include +#include #include #include #include @@ -65,6 +69,12 @@ EXPORT_SYMBOL(plpar_hcall); EXPORT_SYMBOL(plpar_hcall9); EXPORT_SYMBOL(plpar_hcall_norets); +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +static u8 dtl_mask = DTL_LOG_PREEMPT; +#else +static u8 dtl_mask; +#endif + void alloc_dtl_buffers(void) { int cpu; @@ -73,11 +83,15 @@ void alloc_dtl_buffers(void) for_each_possible_cpu(cpu) { pp = paca_ptrs[cpu]; + if (pp->dispatch_log) + continue; dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); if (!dtl) { pr_warn("Failed to allocate dispatch trace log for cpu %d\n", cpu); +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE pr_warn("Stolen time statistics will be unreliable\n"); +#endif break; } @@ -97,7 +111,7 @@ void register_dtl_buffer(int cpu) pp = paca_ptrs[cpu]; dtl = pp->dispatch_log; - if (dtl) { + if (dtl && dtl_mask) { pp->dtl_ridx = 0; pp->dtl_curr = dtl; lppaca_of(cpu).dtl_idx = 0; @@ -109,12 +123,519 @@ void register_dtl_buffer(int cpu) pr_err("WARNING: DTL registration of cpu %d (hw %d) failed with %ld\n", cpu, hwcpu, ret); - lppaca_of(cpu).dtl_enable_mask = DTL_LOG_PREEMPT; + lppaca_of(cpu).dtl_enable_mask = dtl_mask; } } #ifdef CONFIG_PPC_SPLPAR +struct dtl_worker { + struct delayed_work work; + int cpu; +}; + +struct vcpu_dispatch_data { + int last_disp_cpu; + + int total_disp; + + int same_cpu_disp; + int same_chip_disp; + int diff_chip_disp; + int far_chip_disp; + + int numa_home_disp; + int numa_remote_disp; + int numa_far_disp; +}; + +/* + * This represents the number of cpus in the hypervisor. Since there is no + * architected way to discover the number of processors in the host, we + * provision for dealing with NR_CPUS. This is currently 2048 by default, and + * is sufficient for our purposes. This will need to be tweaked if + * CONFIG_NR_CPUS is changed. + */ +#define NR_CPUS_H NR_CPUS + DEFINE_RWLOCK(dtl_access_lock); +static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data); +static DEFINE_PER_CPU(u64, dtl_entry_ridx); +static DEFINE_PER_CPU(struct dtl_worker, dtl_workers); +static enum cpuhp_state dtl_worker_state; +static DEFINE_MUTEX(dtl_enable_mutex); +static int vcpudispatch_stats_on __read_mostly; +static int vcpudispatch_stats_freq = 50; +static __be32 *vcpu_associativity, *pcpu_associativity; + + +static void free_dtl_buffers(void) +{ +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + int cpu; + struct paca_struct *pp; + + for_each_possible_cpu(cpu) { + pp = paca_ptrs[cpu]; + if (!pp->dispatch_log) + continue; + kmem_cache_free(dtl_cache, pp->dispatch_log); + pp->dtl_ridx = 0; + pp->dispatch_log = 0; + pp->dispatch_log_end = 0; + pp->dtl_curr = 0; + } +#endif +} + +static int init_cpu_associativity(void) +{ + vcpu_associativity = kcalloc(num_possible_cpus() / threads_per_core, + VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL); + pcpu_associativity = kcalloc(NR_CPUS_H / threads_per_core, + VPHN_ASSOC_BUFSIZE * sizeof(__be32), GFP_KERNEL); + + if (!vcpu_associativity || !pcpu_associativity) { + pr_err("error allocating memory for associativity information\n"); + return -ENOMEM; + } + + return 0; +} + +static void destroy_cpu_associativity(void) +{ + kfree(vcpu_associativity); + kfree(pcpu_associativity); + vcpu_associativity = pcpu_associativity = 0; +} + +static __be32 *__get_cpu_associativity(int cpu, __be32 *cpu_assoc, int flag) +{ + __be32 *assoc; + int rc = 0; + + assoc = &cpu_assoc[(int)(cpu / threads_per_core) * VPHN_ASSOC_BUFSIZE]; + if (!assoc[0]) { + rc = hcall_vphn(cpu, flag, &assoc[0]); + if (rc) + return NULL; + } + + return assoc; +} + +static __be32 *get_pcpu_associativity(int cpu) +{ + return __get_cpu_associativity(cpu, pcpu_associativity, VPHN_FLAG_PCPU); +} + +static __be32 *get_vcpu_associativity(int cpu) +{ + return __get_cpu_associativity(cpu, vcpu_associativity, VPHN_FLAG_VCPU); +} + +static int cpu_relative_dispatch_distance(int last_disp_cpu, int cur_disp_cpu) +{ + __be32 *last_disp_cpu_assoc, *cur_disp_cpu_assoc; + + if (last_disp_cpu >= NR_CPUS_H || cur_disp_cpu >= NR_CPUS_H) + return -EINVAL; + + last_disp_cpu_assoc = get_pcpu_associativity(last_disp_cpu); + cur_disp_cpu_assoc = get_pcpu_associativity(cur_disp_cpu); + + if (!last_disp_cpu_assoc || !cur_disp_cpu_assoc) + return -EIO; + + return cpu_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc); +} + +static int cpu_home_node_dispatch_distance(int disp_cpu) +{ + __be32 *disp_cpu_assoc, *vcpu_assoc; + int vcpu_id = smp_processor_id(); + + if (disp_cpu >= NR_CPUS_H) { + pr_debug_ratelimited("vcpu dispatch cpu %d > %d\n", + disp_cpu, NR_CPUS_H); + return -EINVAL; + } + + disp_cpu_assoc = get_pcpu_associativity(disp_cpu); + vcpu_assoc = get_vcpu_associativity(vcpu_id); + + if (!disp_cpu_assoc || !vcpu_assoc) + return -EIO; + + return cpu_distance(disp_cpu_assoc, vcpu_assoc); +} + +static void update_vcpu_disp_stat(int disp_cpu) +{ + struct vcpu_dispatch_data *disp; + int distance; + + disp = this_cpu_ptr(&vcpu_disp_data); + if (disp->last_disp_cpu == -1) { + disp->last_disp_cpu = disp_cpu; + return; + } + + disp->total_disp++; + + if (disp->last_disp_cpu == disp_cpu || + (cpu_first_thread_sibling(disp->last_disp_cpu) == + cpu_first_thread_sibling(disp_cpu))) + disp->same_cpu_disp++; + else { + distance = cpu_relative_dispatch_distance(disp->last_disp_cpu, + disp_cpu); + if (distance < 0) + pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n", + smp_processor_id()); + else { + switch (distance) { + case 0: + disp->same_chip_disp++; + break; + case 1: + disp->diff_chip_disp++; + break; + case 2: + disp->far_chip_disp++; + break; + default: + pr_debug_ratelimited("vcpudispatch_stats: cpu %d (%d -> %d): unexpected relative dispatch distance %d\n", + smp_processor_id(), + disp->last_disp_cpu, + disp_cpu, + distance); + } + } + } + + distance = cpu_home_node_dispatch_distance(disp_cpu); + if (distance < 0) + pr_debug_ratelimited("vcpudispatch_stats: cpu %d: error determining associativity\n", + smp_processor_id()); + else { + switch (distance) { + case 0: + disp->numa_home_disp++; + break; + case 1: + disp->numa_remote_disp++; + break; + case 2: + disp->numa_far_disp++; + break; + default: + pr_debug_ratelimited("vcpudispatch_stats: cpu %d on %d: unexpected numa dispatch distance %d\n", + smp_processor_id(), + disp_cpu, + distance); + } + } + + disp->last_disp_cpu = disp_cpu; +} + +static void process_dtl_buffer(struct work_struct *work) +{ + struct dtl_entry dtle; + u64 i = __this_cpu_read(dtl_entry_ridx); + struct dtl_entry *dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); + struct dtl_entry *dtl_end = local_paca->dispatch_log_end; + struct lppaca *vpa = local_paca->lppaca_ptr; + struct dtl_worker *d = container_of(work, struct dtl_worker, work.work); + + if (!local_paca->dispatch_log) + return; + + /* if we have been migrated away, we cancel ourself */ + if (d->cpu != smp_processor_id()) { + pr_debug("vcpudispatch_stats: cpu %d worker migrated -- canceling worker\n", + smp_processor_id()); + return; + } + + if (i == be64_to_cpu(vpa->dtl_idx)) + goto out; + + while (i < be64_to_cpu(vpa->dtl_idx)) { + dtle = *dtl; + barrier(); + if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { + /* buffer has overflowed */ + pr_debug_ratelimited("vcpudispatch_stats: cpu %d lost %lld DTL samples\n", + d->cpu, + be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG - i); + i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG; + dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); + continue; + } + update_vcpu_disp_stat(be16_to_cpu(dtle.processor_id)); + ++i; + ++dtl; + if (dtl == dtl_end) + dtl = local_paca->dispatch_log; + } + + __this_cpu_write(dtl_entry_ridx, i); + +out: + schedule_delayed_work_on(d->cpu, to_delayed_work(work), + HZ / vcpudispatch_stats_freq); +} + +static int dtl_worker_online(unsigned int cpu) +{ + struct dtl_worker *d = &per_cpu(dtl_workers, cpu); + + memset(d, 0, sizeof(*d)); + INIT_DELAYED_WORK(&d->work, process_dtl_buffer); + d->cpu = cpu; + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + per_cpu(dtl_entry_ridx, cpu) = 0; + register_dtl_buffer(cpu); +#else + per_cpu(dtl_entry_ridx, cpu) = be64_to_cpu(lppaca_of(cpu).dtl_idx); +#endif + + schedule_delayed_work_on(cpu, &d->work, HZ / vcpudispatch_stats_freq); + return 0; +} + +static int dtl_worker_offline(unsigned int cpu) +{ + struct dtl_worker *d = &per_cpu(dtl_workers, cpu); + + cancel_delayed_work_sync(&d->work); + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + unregister_dtl(get_hard_smp_processor_id(cpu)); +#endif + + return 0; +} + +static void set_global_dtl_mask(u8 mask) +{ + int cpu; + + dtl_mask = mask; + for_each_present_cpu(cpu) + lppaca_of(cpu).dtl_enable_mask = dtl_mask; +} + +static void reset_global_dtl_mask(void) +{ + int cpu; + +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE + dtl_mask = DTL_LOG_PREEMPT; +#else + dtl_mask = 0; +#endif + for_each_present_cpu(cpu) + lppaca_of(cpu).dtl_enable_mask = dtl_mask; +} + +static int dtl_worker_enable(void) +{ + int rc = 0, state; + + if (!write_trylock(&dtl_access_lock)) { + rc = -EBUSY; + goto out; + } + + set_global_dtl_mask(DTL_LOG_ALL); + + /* Setup dtl buffers and register those */ + alloc_dtl_buffers(); + + state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/dtl:online", + dtl_worker_online, dtl_worker_offline); + if (state < 0) { + pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n"); + free_dtl_buffers(); + reset_global_dtl_mask(); + write_unlock(&dtl_access_lock); + rc = -EINVAL; + goto out; + } + dtl_worker_state = state; + +out: + return rc; +} + +static void dtl_worker_disable(void) +{ + cpuhp_remove_state(dtl_worker_state); + free_dtl_buffers(); + reset_global_dtl_mask(); + write_unlock(&dtl_access_lock); +} + +static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p, + size_t count, loff_t *ppos) +{ + struct vcpu_dispatch_data *disp; + int rc, cmd, cpu; + char buf[16]; + + if (count > 15) + return -EINVAL; + + if (copy_from_user(buf, p, count)) + return -EFAULT; + + buf[count] = 0; + rc = kstrtoint(buf, 0, &cmd); + if (rc || cmd < 0 || cmd > 1) { + pr_err("vcpudispatch_stats: please use 0 to disable or 1 to enable dispatch statistics\n"); + return rc ? rc : -EINVAL; + } + + mutex_lock(&dtl_enable_mutex); + + if ((cmd == 0 && !vcpudispatch_stats_on) || + (cmd == 1 && vcpudispatch_stats_on)) + goto out; + + if (cmd) { + rc = init_cpu_associativity(); + if (rc) + goto out; + + for_each_possible_cpu(cpu) { + disp = per_cpu_ptr(&vcpu_disp_data, cpu); + memset(disp, 0, sizeof(*disp)); + disp->last_disp_cpu = -1; + } + + rc = dtl_worker_enable(); + if (rc) { + destroy_cpu_associativity(); + goto out; + } + } else { + dtl_worker_disable(); + destroy_cpu_associativity(); + } + + vcpudispatch_stats_on = cmd; + +out: + mutex_unlock(&dtl_enable_mutex); + if (rc) + return rc; + return count; +} + +static int vcpudispatch_stats_display(struct seq_file *p, void *v) +{ + int cpu; + struct vcpu_dispatch_data *disp; + + if (!vcpudispatch_stats_on) { + seq_puts(p, "off\n"); + return 0; + } + + for_each_online_cpu(cpu) { + disp = per_cpu_ptr(&vcpu_disp_data, cpu); + seq_printf(p, "cpu%d", cpu); + seq_put_decimal_ull(p, " ", disp->total_disp); + seq_put_decimal_ull(p, " ", disp->same_cpu_disp); + seq_put_decimal_ull(p, " ", disp->same_chip_disp); + seq_put_decimal_ull(p, " ", disp->diff_chip_disp); + seq_put_decimal_ull(p, " ", disp->far_chip_disp); + seq_put_decimal_ull(p, " ", disp->numa_home_disp); + seq_put_decimal_ull(p, " ", disp->numa_remote_disp); + seq_put_decimal_ull(p, " ", disp->numa_far_disp); + seq_puts(p, "\n"); + } + + return 0; +} + +static int vcpudispatch_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, vcpudispatch_stats_display, NULL); +} + +static const struct file_operations vcpudispatch_stats_proc_ops = { + .open = vcpudispatch_stats_open, + .read = seq_read, + .write = vcpudispatch_stats_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t vcpudispatch_stats_freq_write(struct file *file, + const char __user *p, size_t count, loff_t *ppos) +{ + int rc, freq; + char buf[16]; + + if (count > 15) + return -EINVAL; + + if (copy_from_user(buf, p, count)) + return -EFAULT; + + buf[count] = 0; + rc = kstrtoint(buf, 0, &freq); + if (rc || freq < 1 || freq > HZ) { + pr_err("vcpudispatch_stats_freq: please specify a frequency between 1 and %d\n", + HZ); + return rc ? rc : -EINVAL; + } + + vcpudispatch_stats_freq = freq; + + return count; +} + +static int vcpudispatch_stats_freq_display(struct seq_file *p, void *v) +{ + seq_printf(p, "%d\n", vcpudispatch_stats_freq); + return 0; +} + +static int vcpudispatch_stats_freq_open(struct inode *inode, struct file *file) +{ + return single_open(file, vcpudispatch_stats_freq_display, NULL); +} + +static const struct file_operations vcpudispatch_stats_freq_proc_ops = { + .open = vcpudispatch_stats_freq_open, + .read = seq_read, + .write = vcpudispatch_stats_freq_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init vcpudispatch_stats_procfs_init(void) +{ + if (!lppaca_shared_proc(get_lppaca())) + return 0; + + if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL, + &vcpudispatch_stats_proc_ops)) + pr_err("vcpudispatch_stats: error creating procfs file\n"); + else if (!proc_create("powerpc/vcpudispatch_stats_freq", 0600, NULL, + &vcpudispatch_stats_freq_proc_ops)) + pr_err("vcpudispatch_stats_freq: error creating procfs file\n"); + + return 0; +} + +machine_device_initcall(pseries, vcpudispatch_stats_procfs_init); #endif /* CONFIG_PPC_SPLPAR */ void vpa_init(int cpu) -- cgit From 18a593c8b5426f6a08bcb353636d761a83f78d9e Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Wed, 3 Jul 2019 22:34:01 +0530 Subject: powerpc/pseries: Protect against hogging the cpu while setting up the stats When enabling or disabling the vcpu dispatch statistics, we do a lot of work including allocating/deallocating memory across all possible cpus for the DTL buffer. In order to guard against hogging the cpu for too long, track the time we're taking and yield the processor if necessary. Signed-off-by: Naveen N. Rao Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/lpar.c | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) (limited to 'arch/powerpc/platforms/pseries/lpar.c') diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index ec5a7893f71b..ae7040f59564 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -75,7 +75,7 @@ static u8 dtl_mask = DTL_LOG_PREEMPT; static u8 dtl_mask; #endif -void alloc_dtl_buffers(void) +void alloc_dtl_buffers(unsigned long *time_limit) { int cpu; struct paca_struct *pp; @@ -99,6 +99,11 @@ void alloc_dtl_buffers(void) pp->dispatch_log = dtl; pp->dispatch_log_end = dtl + N_DISPATCH_LOG; pp->dtl_curr = dtl; + + if (time_limit && time_after(jiffies, *time_limit)) { + cond_resched(); + *time_limit = jiffies + HZ; + } } } @@ -168,7 +173,7 @@ static int vcpudispatch_stats_freq = 50; static __be32 *vcpu_associativity, *pcpu_associativity; -static void free_dtl_buffers(void) +static void free_dtl_buffers(unsigned long *time_limit) { #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE int cpu; @@ -183,6 +188,11 @@ static void free_dtl_buffers(void) pp->dispatch_log = 0; pp->dispatch_log_end = 0; pp->dtl_curr = 0; + + if (time_limit && time_after(jiffies, *time_limit)) { + cond_resched(); + *time_limit = jiffies + HZ; + } } #endif } @@ -442,7 +452,7 @@ static void reset_global_dtl_mask(void) lppaca_of(cpu).dtl_enable_mask = dtl_mask; } -static int dtl_worker_enable(void) +static int dtl_worker_enable(unsigned long *time_limit) { int rc = 0, state; @@ -454,13 +464,13 @@ static int dtl_worker_enable(void) set_global_dtl_mask(DTL_LOG_ALL); /* Setup dtl buffers and register those */ - alloc_dtl_buffers(); + alloc_dtl_buffers(time_limit); state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/dtl:online", dtl_worker_online, dtl_worker_offline); if (state < 0) { pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n"); - free_dtl_buffers(); + free_dtl_buffers(time_limit); reset_global_dtl_mask(); write_unlock(&dtl_access_lock); rc = -EINVAL; @@ -472,10 +482,10 @@ out: return rc; } -static void dtl_worker_disable(void) +static void dtl_worker_disable(unsigned long *time_limit) { cpuhp_remove_state(dtl_worker_state); - free_dtl_buffers(); + free_dtl_buffers(time_limit); reset_global_dtl_mask(); write_unlock(&dtl_access_lock); } @@ -483,6 +493,7 @@ static void dtl_worker_disable(void) static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p, size_t count, loff_t *ppos) { + unsigned long time_limit = jiffies + HZ; struct vcpu_dispatch_data *disp; int rc, cmd, cpu; char buf[16]; @@ -517,13 +528,13 @@ static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p, disp->last_disp_cpu = -1; } - rc = dtl_worker_enable(); + rc = dtl_worker_enable(&time_limit); if (rc) { destroy_cpu_associativity(); goto out; } } else { - dtl_worker_disable(); + dtl_worker_disable(&time_limit); destroy_cpu_associativity(); } -- cgit