From 214f33fcf656bf1be3f9f03d58fda067cdf7eecc Mon Sep 17 00:00:00 2001 From: Gautam Menghani Date: Tue, 14 May 2024 18:54:55 +0530 Subject: powerpc/pseries: Remove unused cede related functions Remove extended_cede_processor() and its helpers as extended_cede_processor() has no callers since commit 48f6e7f6d948("powerpc/pseries: remove cede offline state for CPUs") Signed-off-by: Gautam Menghani Acked-by: Naveen N Rao Signed-off-by: Michael Ellerman Link: https://msgid.link/20240514132457.292865-1-gautam@linux.ibm.com --- arch/powerpc/include/asm/plpar_wrappers.h | 28 ---------------------------- 1 file changed, 28 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index b3ee44a40c2f..71648c126970 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -18,16 +18,6 @@ static inline long poll_pending(void) return plpar_hcall_norets(H_POLL_PENDING); } -static inline u8 get_cede_latency_hint(void) -{ - return get_lppaca()->cede_latency_hint; -} - -static inline void set_cede_latency_hint(u8 latency_hint) -{ - get_lppaca()->cede_latency_hint = latency_hint; -} - static inline long cede_processor(void) { /* @@ -37,24 +27,6 @@ static inline long cede_processor(void) return plpar_hcall_norets_notrace(H_CEDE); } -static inline long extended_cede_processor(unsigned long latency_hint) -{ - long rc; - u8 old_latency_hint = get_cede_latency_hint(); - - set_cede_latency_hint(latency_hint); - - rc = cede_processor(); - - /* Ensure that H_CEDE returns with IRQs on */ - if (WARN_ON(IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && !(mfmsr() & MSR_EE))) - __hard_irq_enable(); - - set_cede_latency_hint(old_latency_hint); - - return rc; -} - static inline long vpa_call(unsigned long flags, unsigned long cpu, unsigned long vpa) { -- cgit From e1f288d2f9c69bb8965db9fb99a19b58231a00dd Mon Sep 17 00:00:00 2001 From: Gautam Menghani Date: Mon, 20 May 2024 23:27:40 +0530 Subject: KVM: PPC: Book3S HV nestedv2: Add support for reading VPA counters for pseries guests PAPR hypervisor has introduced three new counters in the VPA area of LPAR CPUs for KVM L2 guest (see [1] for terminology) observability - two for context switches from host to guest and vice versa, and one counter for getting the total time spent inside the KVM guest. Add a tracepoint that enables reading the counters for use by ftrace/perf. Note that this tracepoint is only available for nestedv2 API (i.e, KVM on PowerVM). [1] Terminology: a. L1 refers to the VM (LPAR) booted on top of PAPR hypervisor b. L2 refers to the KVM guest booted on top of L1. Reviewed-by: Nicholas Piggin Acked-by: Naveen N Rao Signed-off-by: Vaibhav Jain Signed-off-by: Gautam Menghani Signed-off-by: Michael Ellerman Link: https://msgid.link/20240520175742.196329-1-gautam@linux.ibm.com --- arch/powerpc/include/asm/kvm_book3s_64.h | 5 +++++ arch/powerpc/include/asm/lppaca.h | 11 ++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index d8729ec81ca0..2ef9a5f4e5d1 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -684,6 +684,11 @@ int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1); int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu); int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa); +int kmvhv_counters_tracepoint_regfunc(void); +void kmvhv_counters_tracepoint_unregfunc(void); +int kvmhv_get_l2_counters_status(void); +void kvmhv_set_l2_counters_status(int cpu, bool status); + #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 61ec2447dabf..f40a646bee3c 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -62,7 +62,8 @@ struct lppaca { u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */ u8 fpregs_in_use; u8 pmcregs_in_use; - u8 reserved8[28]; + u8 l2_counters_enable; /* Enable usage of counters for KVM guest */ + u8 reserved8[27]; __be64 wait_state_cycles; /* Wait cycles for this proc */ u8 reserved9[28]; __be16 slb_count; /* # of SLBs to maintain */ @@ -92,9 +93,13 @@ struct lppaca { /* cacheline 4-5 */ __be32 page_ins; /* CMO Hint - # page ins by OS */ - u8 reserved12[148]; + u8 reserved12[28]; + volatile __be64 l1_to_l2_cs_tb; + volatile __be64 l2_to_l1_cs_tb; + volatile __be64 l2_runtime_tb; + u8 reserved13[96]; volatile __be64 dtl_idx; /* Dispatch Trace Log head index */ - u8 reserved13[96]; + u8 reserved14[96]; } ____cacheline_aligned; #define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr) -- cgit From ff5163bb7000a0254ffdd7b50cb6df43add94f33 Mon Sep 17 00:00:00 2001 From: Gaurav Batra Date: Mon, 13 May 2024 20:46:08 -0500 Subject: powerpc/pseries/iommu: Split Dynamic DMA Window to be used in Hybrid mode Dynamic DMA Window (DDW) supports TCEs that are backed by 2MB page size. In most configurations, DDW is big enough to pre-map all of LPAR memory for IO. Pre-mapping of memory for DMA results in improvements in IO performance. Persistent memory, vPMEM, can be assigned to an LPAR as well. vPMEM is not contiguous with LPAR memory and usually is assigned at high memory addresses. This makes is not possible to pre-map both vPMEM and LPAR memory in the same DDW. For a dedicated adapter this limitation is not an issue. Dedicated adapters can have both Default DMA window, which is backed by 4K page size and a DDW backed by 2MB page size TCEs. In this scenario, LPAR memory is pre-mapped in the DDW. Any DMA going to the vPMEM is routed via dynamically allocated TCEs in the default window. The issue arises with SR-IOV adapters. There is only one DMA window - either Default or DDW. If an LPAR has vPMEM assigned, memory is not pre-mapped in the DDW since TCEs needs to be allocated for vPMEM as well. In this case, DDW is created and TCEs are dynamically allocated for both vPMEM and LPAR memory. Today, DDW is only used in single mode - direct mapped TCEs or dynamically mapped TCEs. This enhancement breaks a single DDW in 2 regions - 1. First region to pre-map LPAR memory 2. Second region to dynamically allocate TCEs for IO to vPMEM The DDW is split only if it is big enough to pre-map complete LPAR memory and still have some space left to dynamically map vPMEM. Maximum size possible DDW is created as permitted by the Hypervisor. Signed-off-by: Gaurav Batra Reviewed-by: Brian King Signed-off-by: Michael Ellerman Link: https://msgid.link/20240514014608.35537-1-gbatra@linux.ibm.com --- arch/powerpc/include/asm/iommu.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 026695943550..bb252a15cd4c 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -31,6 +31,8 @@ #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info" #define DMA64_PROPNAME "linux,dma64-ddr-window-info" +#define MIN_DDW_VPMEM_DMA_WINDOW SZ_2G + /* Boot time flags */ extern int iommu_is_off; extern int iommu_force_on; -- cgit From 55dfb8bed6fe8bda390cc71cca878d11a9407099 Mon Sep 17 00:00:00 2001 From: Gautam Menghani Date: Wed, 5 Jun 2024 17:09:09 +0530 Subject: KVM: PPC: Book3S HV nestedv2: Add DPDES support in helper library for Guest state buffer Add support for using DPDES in the library for using guest state buffers. DPDES support is needed for enabling usage of doorbells in a L2 KVM on PAPR guest. Fixes: 6ccbbc33f06a ("KVM: PPC: Add helper library for Guest State Buffers") Cc: stable@vger.kernel.org # v6.7+ Signed-off-by: Gautam Menghani Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://msgid.link/20240605113913.83715-2-gautam@linux.ibm.com --- arch/powerpc/include/asm/guest-state-buffer.h | 3 ++- arch/powerpc/include/asm/kvm_book3s.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/guest-state-buffer.h b/arch/powerpc/include/asm/guest-state-buffer.h index 808149f31576..d107abe1468f 100644 --- a/arch/powerpc/include/asm/guest-state-buffer.h +++ b/arch/powerpc/include/asm/guest-state-buffer.h @@ -81,6 +81,7 @@ #define KVMPPC_GSID_HASHKEYR 0x1050 #define KVMPPC_GSID_HASHPKEYR 0x1051 #define KVMPPC_GSID_CTRL 0x1052 +#define KVMPPC_GSID_DPDES 0x1053 #define KVMPPC_GSID_CR 0x2000 #define KVMPPC_GSID_PIDR 0x2001 @@ -110,7 +111,7 @@ #define KVMPPC_GSE_META_COUNT (KVMPPC_GSE_META_END - KVMPPC_GSE_META_START + 1) #define KVMPPC_GSE_DW_REGS_START KVMPPC_GSID_GPR(0) -#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_CTRL +#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_DPDES #define KVMPPC_GSE_DW_REGS_COUNT \ (KVMPPC_GSE_DW_REGS_END - KVMPPC_GSE_DW_REGS_START + 1) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 3e1e2a698c9e..10618622d7ef 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -594,6 +594,7 @@ static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB) +KVMPPC_BOOK3S_VCORE_ACCESSOR(dpdes, 64, KVMPPC_GSID_DPDES) KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR) KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR) KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(tb_offset, 64, KVMPPC_GSID_TB_OFFSET) -- cgit From 1a1e6865f516696adcf6e94f286c7a0f84d78df3 Mon Sep 17 00:00:00 2001 From: Shivaprasad G Bhat Date: Wed, 5 Jun 2024 13:06:42 +0000 Subject: KVM: PPC: Book3S HV: Add one-reg interface for DEXCR register The patch adds a one-reg register identifier which can be used to read and set the DEXCR for the guest during enter/exit with KVM_REG_PPC_DEXCR. The specific SPR KVM API documentation too updated. Signed-off-by: Shivaprasad G Bhat Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://msgid.link/171759279613.1480.12873911783530175699.stgit@linux.ibm.com --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/include/uapi/asm/kvm.h | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 8abac532146e..1e2fdcbecffd 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -599,6 +599,7 @@ struct kvm_vcpu_arch { ulong dawrx0; ulong dawr1; ulong dawrx1; + ulong dexcr; ulong ciabr; ulong cfar; ulong ppr; diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 1691297a766a..fcb947f65667 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -645,6 +645,7 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3) #define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4) #define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5) +#define KVM_REG_PPC_DEXCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc6) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs -- cgit From e9eb790b25577a15d3f450ed585c59048e4e6c44 Mon Sep 17 00:00:00 2001 From: Shivaprasad G Bhat Date: Wed, 5 Jun 2024 13:07:15 +0000 Subject: KVM: PPC: Book3S HV: Add one-reg interface for HASHKEYR register The patch adds a one-reg register identifier which can be used to read and set the virtual HASHKEYR for the guest during enter/exit with KVM_REG_PPC_HASHKEYR. The specific SPR KVM API documentation too updated. Signed-off-by: Shivaprasad G Bhat Reviewed-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://msgid.link/171759283170.1480.12904332463112769129.stgit@linux.ibm.com --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/include/uapi/asm/kvm.h | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 1e2fdcbecffd..a0cd9dbf534f 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -600,6 +600,7 @@ struct kvm_vcpu_arch { ulong dawr1; ulong dawrx1; ulong dexcr; + ulong hashkeyr; ulong ciabr; ulong cfar; ulong ppr; diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index fcb947f65667..23a0af739c78 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -646,6 +646,7 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4) #define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5) #define KVM_REG_PPC_DEXCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc6) +#define KVM_REG_PPC_HASHKEYR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc7) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs -- cgit From 9a0d2f4995ddde3022c54e43f9ece4f71f76f6e8 Mon Sep 17 00:00:00 2001 From: Shivaprasad G Bhat Date: Wed, 5 Jun 2024 13:07:39 +0000 Subject: KVM: PPC: Book3S HV: Add one-reg interface for HASHPKEYR register The patch adds a one-reg register identifier which can be used to read and set the virtual HASHPKEYR for the guest during enter/exit with KVM_REG_PPC_HASHPKEYR. The specific SPR KVM API documentation too updated. Signed-off-by: Shivaprasad G Bhat Signed-off-by: Michael Ellerman Link: https://msgid.link/171759285547.1480.12374595786792346073.stgit@linux.ibm.com --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/include/uapi/asm/kvm.h | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index a0cd9dbf534f..6a0c771d3ce8 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -601,6 +601,7 @@ struct kvm_vcpu_arch { ulong dawrx1; ulong dexcr; ulong hashkeyr; + ulong hashpkeyr; ulong ciabr; ulong cfar; ulong ppr; diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 23a0af739c78..eaeda001784e 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -647,6 +647,7 @@ struct kvm_ppc_cpu_char { #define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5) #define KVM_REG_PPC_DEXCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc6) #define KVM_REG_PPC_HASHKEYR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc7) +#define KVM_REG_PPC_HASHPKEYR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc8) /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs -- cgit From 0300a92e96cb393a1891d3b4a0f00b28dde8643b Mon Sep 17 00:00:00 2001 From: Anjali K Date: Tue, 28 May 2024 09:33:56 +0530 Subject: powerpc/perf: Set cpumode flags using sample address Currently in some cases, when the sampled instruction address register latches to a specific address during sampling, the privilege bits captured in the sampled event register are incorrect. For example, a snippet from the perf report on a power10 system is: Overhead Address Command Shared Object Symbol ........ .................. ............ ................. ....................... 2.41% 0x7fff9f94a02c null_syscall [unknown] [k] 0x00007fff9f94a02c 2.20% 0x7fff9f94a02c null_syscall libc.so.6 [.] syscall perf_get_misc_flags() function looks at the privilege bits to return the corresponding flags to be used for the address symbol and these privilege bit details are read from the sampled event register. In the above snippet, address "0x00007fff9f94a02c" is shown as "k" (kernel) due to the incorrect privilege bits captured in the sampled event register. To address this case check whether the sampled address is in the kernel area. Since this is specific to the latest platform, a new pmu flag is added called "PPMU_P10" and is used to contain the proposed fix. PPMU_P10_DD1 marked events are also included under PPMU_P10, hence remove the code specific to PPMU_P10_DD1 marked events. Signed-off-by: Anjali K Reviewed-by: Athira Rajeev > Signed-off-by: Michael Ellerman Link: https://msgid.link/20240528040356.2722275-1-anjalik@linux.ibm.com --- arch/powerpc/include/asm/perf_event_server.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index e2221d29fdf9..5995614e9062 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h @@ -89,7 +89,8 @@ struct power_pmu { #define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */ #define PPMU_ARCH_31 0x00000200 /* Has MMCR3, SIER2 and SIER3 */ #define PPMU_P10_DD1 0x00000400 /* Is power10 DD1 processor version */ -#define PPMU_HAS_ATTR_CONFIG1 0x00000800 /* Using config1 attribute */ +#define PPMU_P10 0x00000800 /* For power10 pmu */ +#define PPMU_HAS_ATTR_CONFIG1 0x00001000 /* Using config1 attribute */ /* * Values for flags to get_alternatives() -- cgit From 0d3ff067331ef84e7e7f49537d768881042ed5ba Mon Sep 17 00:00:00 2001 From: Sourabh Jain Date: Fri, 10 May 2024 15:52:34 +0530 Subject: powerpc/kexec_file: fix extra size calculation for kexec FDT While setting up the FDT for kexec, CPU nodes that are added after the system boots and reserved memory ranges are incorporated into the initial_boot_params (base FDT). However, they are not taken into account when determining the additional size needed for the kexec FDT. As a result, kexec fails to load, generating the following error: [1116.774451] Error updating memory reserve map: FDT_ERR_NOSPACE kexec_file_load failed: No such process Therefore, consider the extra size for CPU nodes added post-system boot and reserved memory ranges while preparing the kexec FDT. While adding a new parameter to the setup_new_fdt_ppc64 function, it was noticed that there were a couple of unused parameters, so they were removed. Signed-off-by: Sourabh Jain Signed-off-by: Michael Ellerman Link: https://msgid.link/20240510102235.2269496-2-sourabhjain@linux.ibm.com --- arch/powerpc/include/asm/kexec.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 95a98b390d62..270ee93a0f7d 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -103,10 +103,8 @@ int load_crashdump_segments_ppc64(struct kimage *image, int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, const void *fdt, unsigned long kernel_load_addr, unsigned long fdt_load_addr); -unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image); -int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, - unsigned long initrd_load_addr, - unsigned long initrd_len, const char *cmdline); +unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image, struct crash_mem *rmem); +int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, struct crash_mem *rmem); #endif /* CONFIG_PPC64 */ #endif /* CONFIG_KEXEC_FILE */ -- cgit From b09c031d9433dda3186190e5845ba0d720212567 Mon Sep 17 00:00:00 2001 From: Shivaprasad G Bhat Date: Mon, 24 Jun 2024 12:38:21 +0000 Subject: powerpc/iommu: Move pSeries specific functions to pseries/iommu.c The PowerNV specific table_group_ops are defined in powernv/pci-ioda.c. The pSeries specific table_group_ops are sitting in the generic powerpc file. Move it to where it actually belong(pseries/iommu.c). The functions are currently defined even for CONFIG_PPC_POWERNV which are unused on PowerNV. Only code movement, no functional changes intended. Signed-off-by: Shivaprasad G Bhat Signed-off-by: Michael Ellerman Link: https://msgid.link/171923269701.1397.15758640002786937132.stgit@linux.ibm.com --- arch/powerpc/include/asm/iommu.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index bb252a15cd4c..f49157effcee 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -158,6 +158,9 @@ extern int iommu_tce_table_put(struct iommu_table *tbl); extern struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, unsigned long res_start, unsigned long res_end); bool iommu_table_in_use(struct iommu_table *tbl); +extern void iommu_table_reserve_pages(struct iommu_table *tbl, + unsigned long res_start, unsigned long res_end); +extern void iommu_table_clear(struct iommu_table *tbl); #define IOMMU_TABLE_GROUP_MAX_TABLES 2 @@ -220,7 +223,6 @@ extern long iommu_tce_xchg_no_kill(struct mm_struct *mm, extern void iommu_tce_kill(struct iommu_table *tbl, unsigned long entry, unsigned long pages); -extern struct iommu_table_group_ops spapr_tce_table_group_ops; #else static inline void iommu_register_group(struct iommu_table_group *table_group, int pci_domain_number, -- cgit From 35146eadcb81d72153a1621f3cc0d5588cae19d3 Mon Sep 17 00:00:00 2001 From: Shivaprasad G Bhat Date: Mon, 24 Jun 2024 12:39:10 +0000 Subject: powerpc/iommu: Move dev_has_iommu_table() to iommu.c Move function dev_has_iommu_table() to powerpc/kernel/iommu.c as it is going to be used by machine specific iommu code as well in subsequent patches. Signed-off-by: Shivaprasad G Bhat Signed-off-by: Michael Ellerman Link: https://msgid.link/171923274748.1397.6274953248403106679.stgit@linux.ibm.com --- arch/powerpc/include/asm/iommu.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index f49157effcee..6dd5c25c7503 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -222,6 +222,7 @@ extern long iommu_tce_xchg_no_kill(struct mm_struct *mm, enum dma_data_direction *direction); extern void iommu_tce_kill(struct iommu_table *tbl, unsigned long entry, unsigned long pages); +int dev_has_iommu_table(struct device *dev, void *data); #else static inline void iommu_register_group(struct iommu_table_group *table_group, @@ -235,6 +236,11 @@ static inline int iommu_add_device(struct iommu_table_group *table_group, { return 0; } + +static inline int dev_has_iommu_table(struct device *dev, void *data) +{ + return 0; +} #endif /* !CONFIG_IOMMU_API */ u64 dma_iommu_get_required_mask(struct device *dev); -- cgit From f431a8cde7f102fce412546db6e62fdbde1131a7 Mon Sep 17 00:00:00 2001 From: Shivaprasad G Bhat Date: Mon, 24 Jun 2024 12:39:23 +0000 Subject: powerpc/iommu: Reimplement the iommu_table_group_ops for pSeries PPC64 IOMMU API defines iommu_table_group_ops which handles DMA windows for PEs, their ownership transfer, create/set/unset the TCE tables for the Dynamic DMA wundows(DDW). VFIOS uses these APIs for support on POWER. The commit 9d67c9433509 ("powerpc/iommu: Add "borrowing" iommu_table_group_ops") implemented partial support for this API with "borrow" mechanism wherein the DMA windows if created already by the host driver, they would be available for VFIO to use. Also, it didn't have the support to control/modify the window size or the IO page size. The current patch implements all the necessary iommu_table_group_ops APIs there by avoiding the "borrrowing". So, just the way it is on the PowerNV platform, with this patch the iommu table group ownership is transferred to the VFIO PPC subdriver, the iommu table, DMA windows creation/deletion all driven through the APIs. The pSeries uses the query-pe-dma-window, create-pe-dma-window and reset-pe-dma-window RTAS calls for DMA window creation, deletion and reset to defaul. The RTAs calls do show some minor differences to the way things are to be handled on the pSeries which are listed below. * On pSeries, the default DMA window size is "fixed" cannot be custom sized as requested by the user. For non-SRIOV VFs, It is fixed at 2GB and for SRIOV VFs, its variable sized based on the capacity assigned to it during the VF assignment to the LPAR. So, for the default DMA window alone the size if requested less than tce32_size, the smaller size is enforced using the iommu table->it_size. * The DMA start address for 32-bit window is 0, and for the 64-bit window in case of PowerNV is hardcoded to TVE select (bit 59) at 512PiB offset. This address is returned at the time of create_table() API call (even before the window is created), the subsequent set_window() call actually opens the DMA window. On pSeries, the DMA start address for 32-bit window is known from the 'ibm,dma-window' DT property. However, the 64-bit window start address is not known until the create-pe-dma RTAS call is made. So, the create_table() which returns the DMA window start address actually opens the DMA window and returns the DMA start address as returned by the Hypervisor for the create-pe-dma RTAS call. * The reset-pe-dma RTAS call resets the DMA windows and restores the default DMA window, however it does not clear the TCE table entries if there are any. In case of ownership transfer from platform domain which used direct mapping, the patch chooses remove-pe-dma instead of reset-pe for the 64-bit window intentionally so that the clear_dma_window() is called. Other than the DMA window management changes mentioned above, the patch also brings back the userspace view for the single level TCE as it existed before commit 090bad39b237a ("powerpc/powernv: Add indirect levels to it_userspace") along with the relavent refactoring. Signed-off-by: Shivaprasad G Bhat Signed-off-by: Michael Ellerman Link: https://msgid.link/171923275958.1397.907964437142542242.stgit@linux.ibm.com --- arch/powerpc/include/asm/iommu.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h index 6dd5c25c7503..04072b5f8962 100644 --- a/arch/powerpc/include/asm/iommu.h +++ b/arch/powerpc/include/asm/iommu.h @@ -183,9 +183,9 @@ struct iommu_table_group_ops { long (*unset_window)(struct iommu_table_group *table_group, int num); /* Switch ownership from platform code to external user (e.g. VFIO) */ - long (*take_ownership)(struct iommu_table_group *table_group); + long (*take_ownership)(struct iommu_table_group *table_group, struct device *dev); /* Switch ownership from external user (e.g. VFIO) back to core */ - void (*release_ownership)(struct iommu_table_group *table_group); + void (*release_ownership)(struct iommu_table_group *table_group, struct device *dev); }; struct iommu_table_group_link { -- cgit From 732b32daef80567a7ef5be3d87ae79b6bfd9d82d Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 28 Jun 2024 22:11:58 +1000 Subject: powerpc: Remove core support for 40x Now that 40x platforms have gone, remove support for 40x in the core of powerpc arch. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://msgid.link/20240628121201.130802-4-mpe@ellerman.id.au --- arch/powerpc/include/asm/cputable.h | 7 -- arch/powerpc/include/asm/mmu.h | 7 -- arch/powerpc/include/asm/nohash/32/mmu-40x.h | 68 ---------------- arch/powerpc/include/asm/nohash/32/pgtable.h | 4 +- arch/powerpc/include/asm/nohash/32/pte-40x.h | 73 ----------------- arch/powerpc/include/asm/nohash/mmu.h | 5 +- arch/powerpc/include/asm/reg.h | 25 +----- arch/powerpc/include/asm/reg_booke.h | 113 +-------------------------- arch/powerpc/include/asm/time.h | 7 +- arch/powerpc/include/asm/udbg.h | 1 - 10 files changed, 6 insertions(+), 304 deletions(-) delete mode 100644 arch/powerpc/include/asm/nohash/32/mmu-40x.h delete mode 100644 arch/powerpc/include/asm/nohash/32/pte-40x.h (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 07a204d21034..201218faed61 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h @@ -353,7 +353,6 @@ static inline void cpu_feature_keys_init(void) { } CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE | CPU_FTR_NOEXECUTE) #define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON) #define CPU_FTRS_8XX (CPU_FTR_NOEXECUTE) -#define CPU_FTRS_40X (CPU_FTR_NOEXECUTE) #define CPU_FTRS_44X (CPU_FTR_NOEXECUTE) #define CPU_FTRS_440x6 (CPU_FTR_NOEXECUTE | \ CPU_FTR_INDEXED_DCR) @@ -507,9 +506,6 @@ enum { #ifdef CONFIG_PPC_8xx CPU_FTRS_8XX | #endif -#ifdef CONFIG_40x - CPU_FTRS_40X | -#endif #ifdef CONFIG_PPC_47x CPU_FTRS_47X | CPU_FTR_476_DD2 | #elif defined(CONFIG_44x) @@ -582,9 +578,6 @@ enum { #ifdef CONFIG_PPC_8xx CPU_FTRS_8XX & #endif -#ifdef CONFIG_40x - CPU_FTRS_40X & -#endif #ifdef CONFIG_PPC_47x CPU_FTRS_47X & #elif defined(CONFIG_44x) diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 8a27b046c6a2..009c45044a5a 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -16,7 +16,6 @@ */ #define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001) #define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002) -#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004) #define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) #define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) #define MMU_FTR_TYPE_47x ASM_CONST(0x00000020) @@ -153,9 +152,6 @@ enum { #ifdef CONFIG_PPC_8xx MMU_FTR_TYPE_8xx | #endif -#ifdef CONFIG_40x - MMU_FTR_TYPE_40x | -#endif #ifdef CONFIG_PPC_47x MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL | #elif defined(CONFIG_44x) @@ -202,9 +198,6 @@ enum { #ifdef CONFIG_PPC_8xx #define MMU_FTRS_ALWAYS MMU_FTR_TYPE_8xx #endif -#ifdef CONFIG_40x -#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_40x -#endif #ifdef CONFIG_PPC_47x #define MMU_FTRS_ALWAYS MMU_FTR_TYPE_47x #elif defined(CONFIG_44x) diff --git a/arch/powerpc/include/asm/nohash/32/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h deleted file mode 100644 index 8a8f13a22cf4..000000000000 --- a/arch/powerpc/include/asm/nohash/32/mmu-40x.h +++ /dev/null @@ -1,68 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_MMU_40X_H_ -#define _ASM_POWERPC_MMU_40X_H_ - -/* - * PPC40x support - */ - -#define PPC40X_TLB_SIZE 64 - -/* - * TLB entries are defined by a "high" tag portion and a "low" data - * portion. On all architectures, the data portion is 32-bits. - * - * TLB entries are managed entirely under software control by reading, - * writing, and searchoing using the 4xx-specific tlbre, tlbwr, and tlbsx - * instructions. - */ - -#define TLB_LO 1 -#define TLB_HI 0 - -#define TLB_DATA TLB_LO -#define TLB_TAG TLB_HI - -/* Tag portion */ - -#define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */ -#define TLB_PAGESZ_MASK 0x00000380 -#define TLB_PAGESZ(x) (((x) & 0x7) << 7) -#define PAGESZ_1K 0 -#define PAGESZ_4K 1 -#define PAGESZ_16K 2 -#define PAGESZ_64K 3 -#define PAGESZ_256K 4 -#define PAGESZ_1M 5 -#define PAGESZ_4M 6 -#define PAGESZ_16M 7 -#define TLB_VALID 0x00000040 /* Entry is valid */ - -/* Data portion */ - -#define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */ -#define TLB_PERM_MASK 0x00000300 -#define TLB_EX 0x00000200 /* Instruction execution allowed */ -#define TLB_WR 0x00000100 /* Writes permitted */ -#define TLB_ZSEL_MASK 0x000000F0 -#define TLB_ZSEL(x) (((x) & 0xF) << 4) -#define TLB_ATTR_MASK 0x0000000F -#define TLB_W 0x00000008 /* Caching is write-through */ -#define TLB_I 0x00000004 /* Caching is inhibited */ -#define TLB_M 0x00000002 /* Memory is coherent */ -#define TLB_G 0x00000001 /* Memory is guarded from prefetch */ - -#ifndef __ASSEMBLY__ - -typedef struct { - unsigned int id; - unsigned int active; - void __user *vdso; -} mm_context_t; - -#endif /* !__ASSEMBLY__ */ - -#define mmu_virtual_psize MMU_PAGE_4K -#define mmu_linear_psize MMU_PAGE_256M - -#endif /* _ASM_POWERPC_MMU_40X_H_ */ diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index 9164a9e41b02..9508399dd036 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -118,9 +118,7 @@ * (hardware-defined) PowerPC PTE as closely as possible. */ -#if defined(CONFIG_40x) -#include -#elif defined(CONFIG_44x) +#if defined(CONFIG_44x) #include #elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) #include diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h deleted file mode 100644 index d759cfd74754..000000000000 --- a/arch/powerpc/include/asm/nohash/32/pte-40x.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_POWERPC_NOHASH_32_PTE_40x_H -#define _ASM_POWERPC_NOHASH_32_PTE_40x_H -#ifdef __KERNEL__ - -/* - * At present, all PowerPC 400-class processors share a similar TLB - * architecture. The instruction and data sides share a unified, - * 64-entry, fully-associative TLB which is maintained totally under - * software control. In addition, the instruction side has a - * hardware-managed, 4-entry, fully-associative TLB which serves as a - * first level to the shared TLB. These two TLBs are known as the UTLB - * and ITLB, respectively (see "mmu.h" for definitions). - * - * There are several potential gotchas here. The 40x hardware TLBLO - * field looks like this: - * - * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 - * RPN..................... 0 0 EX WR ZSEL....... W I M G - * - * Where possible we make the Linux PTE bits match up with this - * - * - bits 20 and 21 must be cleared, because we use 4k pages (40x can - * support down to 1k pages), this is done in the TLBMiss exception - * handler. - * - We use only zones 0 (for kernel pages) and 1 (for user pages) - * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB - * miss handler. Bit 27 is PAGE_USER, thus selecting the correct - * zone. - * - PRESENT *must* be in the bottom two bits because swap PTEs - * use the top 30 bits. Because 40x doesn't support SMP anyway, M is - * irrelevant so we borrow it for PAGE_PRESENT. Bit 30 - * is cleared in the TLB miss handler before the TLB entry is loaded. - * - All other bits of the PTE are loaded into TLBLO without - * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for - * software PTE bits. We actually use bits 21, 24, 25, and - * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and - * PRESENT. - */ - -#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ -#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ -#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ -#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ -#define _PAGE_READ 0x010 /* software: read permission */ -#define _PAGE_SPECIAL 0x020 /* software: Special page */ -#define _PAGE_DIRTY 0x080 /* software: dirty page */ -#define _PAGE_WRITE 0x100 /* hardware: WR, anded with dirty in exception */ -#define _PAGE_EXEC 0x200 /* hardware: EX permission */ -#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ - -/* No page size encoding in the linux PTE */ -#define _PAGE_PSIZE 0 - -/* cache related flags non existing on 40x */ -#define _PAGE_COHERENT 0 - -#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ -#define _PMD_PRESENT_MASK _PMD_PRESENT -#define _PMD_BAD 0x802 -#define _PMD_SIZE_4M 0x0c0 -#define _PMD_SIZE_16M 0x0e0 -#define _PMD_USER 0 - -#define _PTE_NONE_MASK 0 - -#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) -#define _PAGE_BASE (_PAGE_BASE_NC) - -#include - -#endif /* __KERNEL__ */ -#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */ diff --git a/arch/powerpc/include/asm/nohash/mmu.h b/arch/powerpc/include/asm/nohash/mmu.h index e264be219fdb..4cc795044103 100644 --- a/arch/powerpc/include/asm/nohash/mmu.h +++ b/arch/powerpc/include/asm/nohash/mmu.h @@ -2,10 +2,7 @@ #ifndef _ASM_POWERPC_NOHASH_MMU_H_ #define _ASM_POWERPC_NOHASH_MMU_H_ -#if defined(CONFIG_40x) -/* 40x-style software loaded TLB */ -#include -#elif defined(CONFIG_44x) +#if defined(CONFIG_44x) /* 44x-style software loaded TLB */ #include #elif defined(CONFIG_PPC_E500) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index eed33cb916d0..76c1490f5c97 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -233,14 +233,10 @@ /* Special Purpose Registers (SPRNs)*/ -#ifdef CONFIG_40x -#define SPRN_PID 0x3B1 /* Process ID */ -#else #define SPRN_PID 0x030 /* Process ID */ #ifdef CONFIG_BOOKE #define SPRN_PID0 SPRN_PID/* Process ID Register 0 */ #endif -#endif #define SPRN_CTR 0x009 /* Count Register */ #define SPRN_DSCR 0x11 @@ -527,7 +523,7 @@ #define SPRN_TSCR 0x399 /* Thread Switch Control Register */ #define SPRN_DEC 0x016 /* Decrement Register */ -#define SPRN_PIT 0x3DB /* Programmable Interval Timer (40x/BOOKE) */ +#define SPRN_PIT 0x3DB /* Programmable Interval Timer (BOOKE) */ #define SPRN_DER 0x095 /* Debug Enable Register */ #define DER_RSTE 0x40000000 /* Reset Interrupt */ @@ -1116,15 +1112,6 @@ * - SPRG2 indicator that we are in RTAS * - SPRG4 (603 only) pseudo TLB LRU data * - * 32-bit 40x: - * - SPRG0 scratch for exception vectors - * - SPRG1 scratch for exception vectors - * - SPRG2 scratch for exception vectors - * - SPRG4 scratch for exception vectors (not 403) - * - SPRG5 scratch for exception vectors (not 403) - * - SPRG6 scratch for exception vectors (not 403) - * - SPRG7 scratch for exception vectors (not 403) - * * 32-bit 440 and FSL BookE: * - SPRG0 scratch for exception vectors * - SPRG1 scratch for exception vectors (*) @@ -1216,16 +1203,6 @@ #define SPRN_SPRG_603_LRU SPRN_SPRG4 #endif -#ifdef CONFIG_40x -#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0 -#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 -#define SPRN_SPRG_SCRATCH2 SPRN_SPRG2 -#define SPRN_SPRG_SCRATCH3 SPRN_SPRG4 -#define SPRN_SPRG_SCRATCH4 SPRN_SPRG5 -#define SPRN_SPRG_SCRATCH5 SPRN_SPRG6 -#define SPRN_SPRG_SCRATCH6 SPRN_SPRG7 -#endif - #ifdef CONFIG_BOOKE #define SPRN_SPRG_RSCRATCH0 SPRN_SPRG0 #define SPRN_SPRG_WSCRATCH0 SPRN_SPRG0 diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index af56980b6cdb..656bfaf91526 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -1,10 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Contains register definitions common to the Book E PowerPC - * specification. Notice that while the IBM-40x series of CPUs - * are not true Book E PowerPCs, they borrowed a number of features - * before Book E was finalized, and are included here as well. Unfortunately, - * they sometimes used different locations than true Book E CPUs did. + * specification. * * Copyright 2009-2010 Freescale Semiconductor, Inc. */ @@ -42,9 +39,6 @@ #define MSR_KERNEL (MSR_ | MSR_64BIT) #define MSR_USER32 (MSR_ | MSR_PR | MSR_EE) #define MSR_USER64 (MSR_USER32 | MSR_64BIT) -#elif defined (CONFIG_40x) -#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) -#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) #else #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_CE) #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) @@ -157,7 +151,6 @@ #define SPRN_TLB3CFG 0x2B3 /* TLB 3 Config Register */ #define SPRN_EPR 0x2BE /* External Proxy Register */ #define SPRN_CCR1 0x378 /* Core Configuration Register 1 */ -#define SPRN_ZPR 0x3B0 /* Zone Protection Register (40x) */ #define SPRN_MAS7 0x3B0 /* MMU Assist Register 7 */ #define SPRN_MMUCR 0x3B2 /* MMU Control Register */ #define SPRN_CCR0 0x3B3 /* Core Configuration Register 0 */ @@ -166,7 +159,6 @@ #define SPRN_SGR 0x3B9 /* Storage Guarded Register */ #define SPRN_DCWR 0x3BA /* Data Cache Write-thru Register */ #define SPRN_SLER 0x3BB /* Little-endian real mode */ -#define SPRN_SU0R 0x3BC /* "User 0" real mode (40x) */ #define SPRN_DCMP 0x3D1 /* Data TLB Compare Register */ #define SPRN_ICDBDR 0x3D3 /* Instruction Cache Debug Data Register */ #define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */ @@ -183,10 +175,8 @@ #define SPRN_SVR 0x3FF /* System Version Register */ /* - * SPRs which have conflicting definitions on true Book E versus classic, - * or IBM 40x. + * SPRs which have conflicting definitions on true Book E versus classic. */ -#ifdef CONFIG_BOOKE #define SPRN_CSRR0 0x03A /* Critical Save and Restore Register 0 */ #define SPRN_CSRR1 0x03B /* Critical Save and Restore Register 1 */ #define SPRN_DEAR 0x03D /* Data Error Address Register */ @@ -201,22 +191,6 @@ #define SPRN_DAC2 0x13D /* Data Address Compare 2 */ #define SPRN_TSR 0x150 /* Timer Status Register */ #define SPRN_TCR 0x154 /* Timer Control Register */ -#endif /* Book E */ -#ifdef CONFIG_40x -#define SPRN_DBCR1 0x3BD /* Debug Control Register 1 */ -#define SPRN_ESR 0x3D4 /* Exception Syndrome Register */ -#define SPRN_DEAR 0x3D5 /* Data Error Address Register */ -#define SPRN_TSR 0x3D8 /* Timer Status Register */ -#define SPRN_TCR 0x3DA /* Timer Control Register */ -#define SPRN_SRR2 0x3DE /* Save/Restore Register 2 */ -#define SPRN_SRR3 0x3DF /* Save/Restore Register 3 */ -#define SPRN_DBSR 0x3F0 /* Debug Status Register */ -#define SPRN_DBCR0 0x3F2 /* Debug Control Register 0 */ -#define SPRN_DAC1 0x3F6 /* Data Address Compare 1 */ -#define SPRN_DAC2 0x3F7 /* Data Address Compare 2 */ -#define SPRN_CSRR0 SPRN_SRR2 /* Critical Save and Restore Register 0 */ -#define SPRN_CSRR1 SPRN_SRR3 /* Critical Save and Restore Register 1 */ -#endif #define SPRN_HACOP 0x15F /* Hypervisor Available Coprocessor Register */ /* Bit definitions for CCR1. */ @@ -296,10 +270,6 @@ #endif /* Bit definitions for the DBSR. */ -/* - * DBSR bits which have conflicting definitions on true Book E versus IBM 40x. - */ -#ifdef CONFIG_BOOKE #define DBSR_IDE 0x80000000 /* Imprecise Debug Event */ #define DBSR_MRR 0x30000000 /* Most Recent Reset */ #define DBSR_IC 0x08000000 /* Instruction Completion */ @@ -319,21 +289,6 @@ #define DBSR_CRET 0x00000020 /* Critical Return Debug Event */ #define DBSR_IAC12ATS 0x00000002 /* Instr Address Compare 1/2 Toggle */ #define DBSR_IAC34ATS 0x00000001 /* Instr Address Compare 3/4 Toggle */ -#endif -#ifdef CONFIG_40x -#define DBSR_IC 0x80000000 /* Instruction Completion */ -#define DBSR_BT 0x40000000 /* Branch taken */ -#define DBSR_IRPT 0x20000000 /* Exception Debug Event */ -#define DBSR_TIE 0x10000000 /* Trap Instruction debug Event */ -#define DBSR_IAC1 0x04000000 /* Instruction Address Compare 1 Event */ -#define DBSR_IAC2 0x02000000 /* Instruction Address Compare 2 Event */ -#define DBSR_IAC3 0x00080000 /* Instruction Address Compare 3 Event */ -#define DBSR_IAC4 0x00040000 /* Instruction Address Compare 4 Event */ -#define DBSR_DAC1R 0x01000000 /* Data Address Compare 1 Read Event */ -#define DBSR_DAC1W 0x00800000 /* Data Address Compare 1 Write Event */ -#define DBSR_DAC2R 0x00400000 /* Data Address Compare 2 Read Event */ -#define DBSR_DAC2W 0x00200000 /* Data Address Compare 2 Write Event */ -#endif /* Bit definitions related to the ESR. */ #define ESR_MCI 0x80000000 /* Machine Check - Instruction */ @@ -355,69 +310,6 @@ #define ESR_SPV 0x00000080 /* Signal Processing operation */ /* Bit definitions related to the DBCR0. */ -#if defined(CONFIG_40x) -#define DBCR0_EDM 0x80000000 /* External Debug Mode */ -#define DBCR0_IDM 0x40000000 /* Internal Debug Mode */ -#define DBCR0_RST 0x30000000 /* all the bits in the RST field */ -#define DBCR0_RST_SYSTEM 0x30000000 /* System Reset */ -#define DBCR0_RST_CHIP 0x20000000 /* Chip Reset */ -#define DBCR0_RST_CORE 0x10000000 /* Core Reset */ -#define DBCR0_RST_NONE 0x00000000 /* No Reset */ -#define DBCR0_IC 0x08000000 /* Instruction Completion */ -#define DBCR0_ICMP DBCR0_IC -#define DBCR0_BT 0x04000000 /* Branch Taken */ -#define DBCR0_BRT DBCR0_BT -#define DBCR0_EDE 0x02000000 /* Exception Debug Event */ -#define DBCR0_IRPT DBCR0_EDE -#define DBCR0_TDE 0x01000000 /* TRAP Debug Event */ -#define DBCR0_IA1 0x00800000 /* Instr Addr compare 1 enable */ -#define DBCR0_IAC1 DBCR0_IA1 -#define DBCR0_IA2 0x00400000 /* Instr Addr compare 2 enable */ -#define DBCR0_IAC2 DBCR0_IA2 -#define DBCR0_IA12 0x00200000 /* Instr Addr 1-2 range enable */ -#define DBCR0_IA12X 0x00100000 /* Instr Addr 1-2 range eXclusive */ -#define DBCR0_IA3 0x00080000 /* Instr Addr compare 3 enable */ -#define DBCR0_IAC3 DBCR0_IA3 -#define DBCR0_IA4 0x00040000 /* Instr Addr compare 4 enable */ -#define DBCR0_IAC4 DBCR0_IA4 -#define DBCR0_IA34 0x00020000 /* Instr Addr 3-4 range Enable */ -#define DBCR0_IA34X 0x00010000 /* Instr Addr 3-4 range eXclusive */ -#define DBCR0_IA12T 0x00008000 /* Instr Addr 1-2 range Toggle */ -#define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */ -#define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ - -#define dbcr_iac_range(task) ((task)->thread.debug.dbcr0) -#define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */ -#define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */ -#define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */ -#define DBCR_IAC34I DBCR0_IA34 /* Range Inclusive */ -#define DBCR_IAC34X (DBCR0_IA34 | DBCR0_IA34X) /* Range Exclusive */ -#define DBCR_IAC34MODE (DBCR0_IA34 | DBCR0_IA34X) /* IAC 3-4 Mode Bits */ - -/* Bit definitions related to the DBCR1. */ -#define DBCR1_DAC1R 0x80000000 /* DAC1 Read Debug Event */ -#define DBCR1_DAC2R 0x40000000 /* DAC2 Read Debug Event */ -#define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */ -#define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */ - -#define dbcr_dac(task) ((task)->thread.debug.dbcr1) -#define DBCR_DAC1R DBCR1_DAC1R -#define DBCR_DAC1W DBCR1_DAC1W -#define DBCR_DAC2R DBCR1_DAC2R -#define DBCR_DAC2W DBCR1_DAC2W - -/* - * Are there any active Debug Events represented in the - * Debug Control Registers? - */ -#define DBCR0_ACTIVE_EVENTS (DBCR0_ICMP | DBCR0_IAC1 | DBCR0_IAC2 | \ - DBCR0_IAC3 | DBCR0_IAC4) -#define DBCR1_ACTIVE_EVENTS (DBCR1_DAC1R | DBCR1_DAC2R | \ - DBCR1_DAC1W | DBCR1_DAC2W) -#define DBCR_ACTIVE_EVENTS(dbcr0, dbcr1) (((dbcr0) & DBCR0_ACTIVE_EVENTS) || \ - ((dbcr1) & DBCR1_ACTIVE_EVENTS)) - -#elif defined(CONFIG_BOOKE) #define DBCR0_EDM 0x80000000 /* External Debug Mode */ #define DBCR0_IDM 0x40000000 /* Internal Debug Mode */ #define DBCR0_RST 0x30000000 /* all the bits in the RST field */ @@ -518,7 +410,6 @@ #define DBCR_ACTIVE_EVENTS(dbcr0, dbcr1) (((dbcr0) & DBCR0_ACTIVE_EVENTS) || \ ((dbcr1) & DBCR1_ACTIVE_EVENTS)) -#endif /* #elif defined(CONFIG_BOOKE) */ /* Bit definitions related to the TCR. */ #define TCR_WP(x) (((x)&0x3)<<30) /* WDT Period */ diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index 9f50766c4623..221c8f8ff89b 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -58,9 +58,6 @@ static inline u64 get_vtb(void) */ static inline u64 get_dec(void) { - if (IS_ENABLED(CONFIG_40x)) - return mfspr(SPRN_PIT); - return mfspr(SPRN_DEC); } @@ -71,9 +68,7 @@ static inline u64 get_dec(void) */ static inline void set_dec(u64 val) { - if (IS_ENABLED(CONFIG_40x)) - mtspr(SPRN_PIT, (u32)val); - else if (IS_ENABLED(CONFIG_BOOKE)) + if (IS_ENABLED(CONFIG_BOOKE)) mtspr(SPRN_DEC, val); else mtspr(SPRN_DEC, val - 1); diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index b1f094728b35..289023f7a656 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h @@ -44,7 +44,6 @@ void __init udbg_init_rtas_panel(void); void __init udbg_init_rtas_console(void); void __init udbg_init_btext(void); void __init udbg_init_44x_as1(void); -void __init udbg_init_40x_realmode(void); void __init udbg_init_cpm(void); void __init udbg_init_usbgecko(void); void __init udbg_init_memcons(void); -- cgit From 002b27a51b364a59eac99b8d080afe3924c2e064 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 28 Jun 2024 22:11:59 +1000 Subject: powerpc/4xx: Remove CONFIG_BOOKE_OR_40x Now that 40x is gone, replace CONFIG_BOOKE_OR_40x by CONFIG_BOOKE. Signed-off-by: Michael Ellerman Link: https://msgid.link/20240628121201.130802-5-mpe@ellerman.id.au --- arch/powerpc/include/asm/hw_irq.h | 8 ++++---- arch/powerpc/include/asm/irq.h | 2 +- arch/powerpc/include/asm/kup.h | 2 +- arch/powerpc/include/asm/processor.h | 2 +- arch/powerpc/include/asm/ptrace.h | 2 +- arch/powerpc/include/asm/reg.h | 2 +- 6 files changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index 317659fdeacf..569ac1165b06 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -63,7 +63,7 @@ static inline void __hard_irq_enable(void) { - if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) + if (IS_ENABLED(CONFIG_BOOKE)) wrtee(MSR_EE); else if (IS_ENABLED(CONFIG_PPC_8xx)) wrtspr(SPRN_EIE); @@ -75,7 +75,7 @@ static inline void __hard_irq_enable(void) static inline void __hard_irq_disable(void) { - if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) + if (IS_ENABLED(CONFIG_BOOKE)) wrtee(0); else if (IS_ENABLED(CONFIG_PPC_8xx)) wrtspr(SPRN_EID); @@ -87,7 +87,7 @@ static inline void __hard_irq_disable(void) static inline void __hard_EE_RI_disable(void) { - if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) + if (IS_ENABLED(CONFIG_BOOKE)) wrtee(0); else if (IS_ENABLED(CONFIG_PPC_8xx)) wrtspr(SPRN_NRI); @@ -99,7 +99,7 @@ static inline void __hard_EE_RI_disable(void) static inline void __hard_RI_enable(void) { - if (IS_ENABLED(CONFIG_BOOKE_OR_40x)) + if (IS_ENABLED(CONFIG_BOOKE)) return; if (IS_ENABLED(CONFIG_PPC_8xx)) diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index ba1a5974e714..aa3751960ffd 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -33,7 +33,7 @@ extern int distribute_irqs; struct pt_regs; -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE /* * Per-cpu stacks for handling critical, debug and machine check * level interrupts. diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h index ad7e8c5aec3f..2bb03d941e3e 100644 --- a/arch/powerpc/include/asm/kup.h +++ b/arch/powerpc/include/asm/kup.h @@ -20,7 +20,7 @@ static __always_inline bool kuap_is_disabled(void); #include #endif -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE #include #endif diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index e44cac0da346..6b94de17201c 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -159,7 +159,7 @@ struct thread_struct { unsigned long sr0; #endif #endif /* CONFIG_PPC32 */ -#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP) +#if defined(CONFIG_BOOKE) && defined(CONFIG_PPC_KUAP) unsigned long pid; /* value written in PID reg. at interrupt exit */ #endif /* Debug Registers */ diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index ea8f91fbc62f..7b9350756875 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -310,7 +310,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) static inline bool cpu_has_msr_ri(void) { - return !IS_ENABLED(CONFIG_BOOKE_OR_40x); + return !IS_ENABLED(CONFIG_BOOKE); } static inline bool regs_is_unrecoverable(struct pt_regs *regs) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 76c1490f5c97..0228c90bbcc7 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -18,7 +18,7 @@ #include /* Pickup Book E specific registers. */ -#ifdef CONFIG_BOOKE_OR_40x +#ifdef CONFIG_BOOKE #include #endif -- cgit From 7bf5f0562b62ae94b4da577994b7b0e04e71d37b Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 28 Jun 2024 22:12:00 +1000 Subject: powerpc: Replace CONFIG_4xx with CONFIG_44x Replace 4xx usage with 44x, and replace 4xx_SOC with 44x. Also, as pointed out by Christophe, if 44x || BOOKE can be simplified to just test BOOKE, because 44x always selects BOOKE. Retain the CONFIG_4xx symbol, as there are drivers that use it to mean 4xx || 44x, those will need updating before CONFIG_4xx can be removed. Signed-off-by: Michael Ellerman Link: https://msgid.link/20240628121201.130802-6-mpe@ellerman.id.au --- arch/powerpc/include/asm/cacheflush.h | 2 +- arch/powerpc/include/asm/ppc_asm.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h index ef7d2de33b89..f2656774aaa9 100644 --- a/arch/powerpc/include/asm/cacheflush.h +++ b/arch/powerpc/include/asm/cacheflush.h @@ -121,7 +121,7 @@ static inline void invalidate_dcache_range(unsigned long start, mb(); /* sync */ } -#ifdef CONFIG_4xx +#ifdef CONFIG_44x static inline void flush_instruction_cache(void) { iccci((void *)KERNELBASE); diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 1d1018c1e482..02897f4b0dbf 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -482,7 +482,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) * and they must be used. */ -#if !defined(CONFIG_4xx) && !defined(CONFIG_PPC_8xx) +#if !defined(CONFIG_44x) && !defined(CONFIG_PPC_8xx) #define tlbia \ li r4,1024; \ mtctr r4; \ -- cgit From 353d7a84c214f184d5a6b62acdec8b4424159b7c Mon Sep 17 00:00:00 2001 From: Hari Bathini Date: Mon, 1 Jul 2024 18:30:21 +0530 Subject: powerpc/64s/radix/kfence: map __kfence_pool at page granularity When KFENCE is enabled, total system memory is mapped at page level granularity. But in radix MMU mode, ~3GB additional memory is needed to map 100GB of system memory at page level granularity when compared to using 2MB direct mapping.This is not desired considering KFENCE is designed to be enabled in production kernels [1]. Mapping only the memory allocated for KFENCE pool at page granularity is sufficient to enable KFENCE support. So, allocate __kfence_pool during bootup and map it at page granularity instead of mapping all system memory at page granularity. Without patch: # cat /proc/meminfo MemTotal: 101201920 kB With patch: # cat /proc/meminfo MemTotal: 104483904 kB Note that enabling KFENCE at runtime is disabled for radix MMU for now, as it depends on the ability to split page table mappings and such APIs are not currently implemented for radix MMU. All kfence_test.c testcases passed with this patch. [1] https://lore.kernel.org/all/20201103175841.3495947-2-elver@google.com/ Signed-off-by: Hari Bathini Signed-off-by: Michael Ellerman Link: https://msgid.link/20240701130021.578240-1-hbathini@linux.ibm.com --- arch/powerpc/include/asm/kfence.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h index 424ceef82ae6..fab124ada1c7 100644 --- a/arch/powerpc/include/asm/kfence.h +++ b/arch/powerpc/include/asm/kfence.h @@ -15,10 +15,19 @@ #define ARCH_FUNC_PREFIX "." #endif +#ifdef CONFIG_KFENCE +extern bool kfence_disabled; + +static inline void disable_kfence(void) +{ + kfence_disabled = true; +} + static inline bool arch_kfence_init_pool(void) { - return true; + return !kfence_disabled; } +#endif #ifdef CONFIG_PPC64 static inline bool kfence_protect_page(unsigned long addr, bool protect) -- cgit From 717756c9c8ddad9f28389185bfb161d4d88e01a4 Mon Sep 17 00:00:00 2001 From: Artem Savkov Date: Fri, 17 May 2024 09:56:48 +0200 Subject: powerpc64/bpf: jit support for sign extended load Add jit support for sign extended load. Tested using test_bpf module. Signed-off-by: Artem Savkov Signed-off-by: Michael Ellerman Link: https://msgid.link/20240517075650.248801-4-asavkov@redhat.com --- arch/powerpc/include/asm/ppc-opcode.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 076ae60b4a55..76cc9a2d8206 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -471,6 +471,7 @@ #define PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb) \ (0x10000006 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21) #define PPC_RAW_LD(r, base, i) (0xe8000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_DS(i)) +#define PPC_RAW_LWA(r, base, i) (0xe8000002 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_DS(i)) #define PPC_RAW_LWZ(r, base, i) (0x80000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i)) #define PPC_RAW_LWZX(t, a, b) (0x7c00002e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_STD(r, base, i) (0xf8000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_DS(i)) -- cgit From fde318326daa48a4bb3ca8ee229bac4d14b5bc2a Mon Sep 17 00:00:00 2001 From: Artem Savkov Date: Fri, 17 May 2024 09:56:50 +0200 Subject: powerpc64/bpf: jit support for signed division and modulo Add jit support for sign division and modulo. Tested using test_bpf module. Signed-off-by: Artem Savkov Signed-off-by: Michael Ellerman Link: https://msgid.link/20240517075650.248801-6-asavkov@redhat.com --- arch/powerpc/include/asm/ppc-opcode.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 76cc9a2d8206..b98a9e982c03 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -536,6 +536,7 @@ #define PPC_RAW_MULI(d, a, i) (0x1c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i)) #define PPC_RAW_DIVW(d, a, b) (0x7c0003d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_DIVWU(d, a, b) (0x7c000396 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) +#define PPC_RAW_DIVD(d, a, b) (0x7c0003d2 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_DIVDU(d, a, b) (0x7c000392 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_DIVDE(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b)) #define PPC_RAW_DIVDE_DOT(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1) -- cgit From 489116d784bebec7e441f400715fbfe6edbce66c Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 9 May 2024 22:12:46 +1000 Subject: powerpc: Drop clang workaround for builtin constant checks The CPU/MMU feature code has build-time checks that the feature value is a builtin constant. Back when the code was added clang wasn't able to compile the checks, so an ifdef was added to avoid the checks for clang builds. See commit b5fa0f7f88ed ("powerpc: Fix build failure with clang due to BUILD_BUG_ON()") These days clang 13 and later are able to build the checks successfully, so drop the workaround. Signed-off-by: Michael Ellerman Link: https://msgid.link/20240509121248.270878-1-mpe@ellerman.id.au --- arch/powerpc/include/asm/cpu_has_feature.h | 2 -- arch/powerpc/include/asm/mmu.h | 2 -- 2 files changed, 4 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index 0efabccd820c..92e24e979954 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -24,9 +24,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature) { int i; -#ifndef __clang__ /* clang can't cope with this */ BUILD_BUG_ON(!__builtin_constant_p(feature)); -#endif #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG if (!static_key_feature_checks_initialized) { diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 009c45044a5a..1d1395b769a8 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -239,9 +239,7 @@ static __always_inline bool mmu_has_feature(unsigned long feature) { int i; -#ifndef __clang__ /* clang can't cope with this */ BUILD_BUG_ON(!__builtin_constant_p(feature)); -#endif #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG if (!static_key_feature_checks_initialized) { -- cgit From db25a9625dbc3aa6613c0347f574689c248a3d0b Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 9 May 2024 22:12:48 +1000 Subject: powerpc: Check only single values are passed to CPU/MMU feature checks cpu_has_feature()/mmu_has_feature() are only able to check a single feature at a time, but there is no enforcement of that. In fact, as fixed in the previous commit, there was code that was passing multiple values to cpu_has_feature(). So add a check that only a single feature is passed using popcount. Note that the test allows 0 or 1 bits to be set, because some code relies on cpu_has_feature(0) being false, the check with CPU_FTRS_POSSIBLE ensures that. See for example CPU_FTR_PPC_LE. Signed-off-by: Michael Ellerman Link: https://msgid.link/20240509121248.270878-3-mpe@ellerman.id.au --- arch/powerpc/include/asm/cpu_has_feature.h | 1 + arch/powerpc/include/asm/mmu.h | 1 + 2 files changed, 2 insertions(+) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h index 92e24e979954..bf8a228229fa 100644 --- a/arch/powerpc/include/asm/cpu_has_feature.h +++ b/arch/powerpc/include/asm/cpu_has_feature.h @@ -25,6 +25,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature) int i; BUILD_BUG_ON(!__builtin_constant_p(feature)); + BUILD_BUG_ON(__builtin_popcountl(feature) > 1); #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG if (!static_key_feature_checks_initialized) { diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 1d1395b769a8..4182d68d9cd1 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -240,6 +240,7 @@ static __always_inline bool mmu_has_feature(unsigned long feature) int i; BUILD_BUG_ON(!__builtin_constant_p(feature)); + BUILD_BUG_ON(__builtin_popcountl(feature) > 1); #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG if (!static_key_feature_checks_initialized) { -- cgit