diff options
Diffstat (limited to 'arch')
| -rw-r--r-- | arch/riscv/include/asm/tlbflush.h | 16 | ||||
| -rw-r--r-- | arch/riscv/kernel/cpu.c | 8 | ||||
| -rw-r--r-- | arch/riscv/kernel/head.S | 4 | ||||
| -rw-r--r-- | arch/riscv/kernel/setup.c | 6 | ||||
| -rw-r--r-- | arch/riscv/kernel/smp.c | 24 | ||||
| -rw-r--r-- | arch/riscv/kernel/smpboot.c | 25 | 
6 files changed, 58 insertions, 25 deletions
| diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 85c2d8bae957..54fee0cadb1e 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -16,6 +16,7 @@  #define _ASM_RISCV_TLBFLUSH_H  #include <linux/mm_types.h> +#include <asm/smp.h>  /*   * Flush entire local TLB.  'sfence.vma' implicitly fences with the instruction @@ -49,13 +50,22 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,  #include <asm/sbi.h> +static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start, +				     unsigned long size) +{ +	struct cpumask hmask; + +	cpumask_clear(&hmask); +	riscv_cpuid_to_hartid_mask(cmask, &hmask); +	sbi_remote_sfence_vma(hmask.bits, start, size); +} +  #define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)  #define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0)  #define flush_tlb_range(vma, start, end) \ -	sbi_remote_sfence_vma(mm_cpumask((vma)->vm_mm)->bits, \ -			      start, (end) - (start)) +	remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))  #define flush_tlb_mm(mm) \ -	sbi_remote_sfence_vma(mm_cpumask(mm)->bits, 0, -1) +	remote_sfence_vma(mm_cpumask(mm), 0, -1)  #endif /* CONFIG_SMP */ diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index 4723e235dcaa..cccc6f61c538 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -14,6 +14,7 @@  #include <linux/init.h>  #include <linux/seq_file.h>  #include <linux/of.h> +#include <asm/smp.h>  /*   * Returns the hart ID of the given device tree node, or -1 if the device tree @@ -138,11 +139,12 @@ static void c_stop(struct seq_file *m, void *v)  static int c_show(struct seq_file *m, void *v)  { -	unsigned long hart_id = (unsigned long)v - 1; -	struct device_node *node = of_get_cpu_node(hart_id, NULL); +	unsigned long cpu_id = (unsigned long)v - 1; +	struct device_node *node = of_get_cpu_node(cpuid_to_hartid_map(cpu_id), +						   NULL);  	const char *compat, *isa, *mmu; -	seq_printf(m, "hart\t: %lu\n", hart_id); +	seq_printf(m, "hart\t: %lu\n", cpu_id);  	if (!of_property_read_string(node, "riscv,isa", &isa))  		print_isa(m, isa);  	if (!of_property_read_string(node, "mmu-type", &mmu)) diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index c4d2c63f9a29..711190d473d4 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -47,6 +47,8 @@ ENTRY(_start)  	/* Save hart ID and DTB physical address */  	mv s0, a0  	mv s1, a1 +	la a2, boot_cpu_hartid +	REG_S a0, (a2)  	/* Initialize page tables and relocate to virtual addresses */  	la sp, init_thread_union + THREAD_SIZE @@ -55,7 +57,7 @@ ENTRY(_start)  	/* Restore C environment */  	la tp, init_task -	sw s0, TASK_TI_CPU(tp) +	sw zero, TASK_TI_CPU(tp)  	la sp, init_thread_union  	li a0, ASM_THREAD_SIZE diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index d5d8611066d5..5e9e6f934cc0 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -81,11 +81,17 @@ EXPORT_SYMBOL(empty_zero_page);  /* The lucky hart to first increment this variable will boot the other cores */  atomic_t hart_lottery; +unsigned long boot_cpu_hartid;  unsigned long __cpuid_to_hartid_map[NR_CPUS] = {  	[0 ... NR_CPUS-1] = INVALID_HARTID  }; +void __init smp_setup_processor_id(void) +{ +	cpuid_to_hartid_map(0) = boot_cpu_hartid; +} +  #ifdef CONFIG_BLK_DEV_INITRD  static void __init setup_initrd(void)  { diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 0bd48935f886..4eac0094f47e 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -97,14 +97,18 @@ void riscv_software_interrupt(void)  static void  send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)  { -	int i; +	int cpuid, hartid; +	struct cpumask hartid_mask; +	cpumask_clear(&hartid_mask);  	mb(); -	for_each_cpu(i, to_whom) -		set_bit(operation, &ipi_data[i].bits); - +	for_each_cpu(cpuid, to_whom) { +		set_bit(operation, &ipi_data[cpuid].bits); +		hartid = cpuid_to_hartid_map(cpuid); +		cpumask_set_cpu(hartid, &hartid_mask); +	}  	mb(); -	sbi_send_ipi(cpumask_bits(to_whom)); +	sbi_send_ipi(cpumask_bits(&hartid_mask));  }  void arch_send_call_function_ipi_mask(struct cpumask *mask) @@ -146,7 +150,7 @@ void smp_send_reschedule(int cpu)  void flush_icache_mm(struct mm_struct *mm, bool local)  {  	unsigned int cpu; -	cpumask_t others, *mask; +	cpumask_t others, hmask, *mask;  	preempt_disable(); @@ -164,9 +168,11 @@ void flush_icache_mm(struct mm_struct *mm, bool local)  	 */  	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));  	local |= cpumask_empty(&others); -	if (mm != current->active_mm || !local) -		sbi_remote_fence_i(others.bits); -	else { +	if (mm != current->active_mm || !local) { +		cpumask_clear(&hmask); +		riscv_cpuid_to_hartid_mask(&others, &hmask); +		sbi_remote_fence_i(hmask.bits); +	} else {  		/*  		 * It's assumed that at least one strongly ordered operation is  		 * performed on this hart between setting a hart's cpumask bit diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 1e478615017c..18cda0e8cf94 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -53,17 +53,23 @@ void __init setup_smp(void)  	struct device_node *dn = NULL;  	int hart;  	bool found_boot_cpu = false; +	int cpuid = 1;  	while ((dn = of_find_node_by_type(dn, "cpu"))) {  		hart = riscv_of_processor_hartid(dn); -		if (hart >= 0) { -			set_cpu_possible(hart, true); -			set_cpu_present(hart, true); -			if (hart == smp_processor_id()) { -				BUG_ON(found_boot_cpu); -				found_boot_cpu = true; -			} +		if (hart < 0) +			continue; + +		if (hart == cpuid_to_hartid_map(0)) { +			BUG_ON(found_boot_cpu); +			found_boot_cpu = 1; +			continue;  		} + +		cpuid_to_hartid_map(cpuid) = hart; +		set_cpu_possible(cpuid, true); +		set_cpu_present(cpuid, true); +		cpuid++;  	}  	BUG_ON(!found_boot_cpu); @@ -71,6 +77,7 @@ void __init setup_smp(void)  int __cpu_up(unsigned int cpu, struct task_struct *tidle)  { +	int hartid = cpuid_to_hartid_map(cpu);  	tidle->thread_info.cpu = cpu;  	/* @@ -81,9 +88,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)  	 * the spinning harts that they can continue the boot process.  	 */  	smp_mb(); -	WRITE_ONCE(__cpu_up_stack_pointer[cpu], +	WRITE_ONCE(__cpu_up_stack_pointer[hartid],  		  task_stack_page(tidle) + THREAD_SIZE); -	WRITE_ONCE(__cpu_up_task_pointer[cpu], tidle); +	WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle);  	while (!cpu_online(cpu))  		cpu_relax(); | 
