summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig3
-rw-r--r--arch/powerpc/include/asm/atomic.h2
-rw-r--r--arch/powerpc/include/asm/device.h5
-rw-r--r--arch/powerpc/include/asm/dtl.h52
-rw-r--r--arch/powerpc/include/asm/icswx.h2
-rw-r--r--arch/powerpc/include/asm/lppaca.h44
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h18
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/sections.h2
-rw-r--r--arch/powerpc/include/uapi/asm/papr_pdsm.h132
-rw-r--r--arch/powerpc/kernel/dma-iommu.c90
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S20
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c2
-rw-r--r--arch/powerpc/kernel/module_64.c6
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/process.c27
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_pr.c3
-rw-r--r--arch/powerpc/kvm/powerpc.c2
-rw-r--r--arch/powerpc/lib/inst.c12
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c25
-rw-r--r--arch/powerpc/mm/book3s64/pkeys.c15
-rw-r--r--arch/powerpc/mm/nohash/kaslr_booke.c1
-rw-r--r--arch/powerpc/oprofile/backtrace.c6
-rw-r--r--arch/powerpc/perf/callchain_32.c2
-rw-r--r--arch/powerpc/perf/callchain_64.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c12
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c2
-rw-r--r--arch/powerpc/platforms/cell/cpufreq_spudemand.c26
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c2
-rw-r--r--arch/powerpc/platforms/powernv/vas-fault.c2
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c1
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c420
-rw-r--r--arch/powerpc/platforms/pseries/setup.c1
-rw-r--r--arch/powerpc/platforms/pseries/svm.c1
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c6
42 files changed, 740 insertions, 230 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9fa23eb320ff..29a057908173 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -151,6 +151,8 @@ config PPC
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
+ select DMA_OPS if PPC64
+ select DMA_OPS_BYPASS if PPC64
select DYNAMIC_FTRACE if FUNCTION_TRACER
select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT
@@ -186,7 +188,6 @@ config PPC
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
select HAVE_CONTEXT_TRACKING if PPC64
select HAVE_TIF_NOHZ if PPC64
- select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DYNAMIC_FTRACE
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 498785ffc25f..0311c3c42960 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -11,8 +11,6 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#define ATOMIC_INIT(i) { (i) }
-
/*
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
* a "bne-" instruction at the end, so an isync is enough as a acquire barrier
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 266542769e4b..452402215e12 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -19,11 +19,6 @@ struct iommu_table;
*/
struct dev_archdata {
/*
- * Set to %true if the dma_iommu_ops are requested to use a direct
- * window instead of dynamically mapping memory.
- */
- bool iommu_bypass : 1;
- /*
* These two used to be a union. However, with the hybrid ops we need
* both so here we store both a DMA offset for direct mappings and
* an iommu_table for remapped DMA.
diff --git a/arch/powerpc/include/asm/dtl.h b/arch/powerpc/include/asm/dtl.h
new file mode 100644
index 000000000000..1625888f27ef
--- /dev/null
+++ b/arch/powerpc/include/asm/dtl.h
@@ -0,0 +1,52 @@
+#ifndef _ASM_POWERPC_DTL_H
+#define _ASM_POWERPC_DTL_H
+
+#include <asm/lppaca.h>
+#include <linux/spinlock_types.h>
+
+/*
+ * Layout of entries in the hypervisor's dispatch trace log buffer.
+ */
+struct dtl_entry {
+ u8 dispatch_reason;
+ u8 preempt_reason;
+ __be16 processor_id;
+ __be32 enqueue_to_dispatch_time;
+ __be32 ready_to_enqueue_time;
+ __be32 waiting_to_ready_time;
+ __be64 timebase;
+ __be64 fault_addr;
+ __be64 srr0;
+ __be64 srr1;
+};
+
+#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
+#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
+
+/*
+ * Dispatch trace log event enable mask:
+ * 0x1: voluntary virtual processor waits
+ * 0x2: time-slice preempts
+ * 0x4: virtual partition memory page faults
+ */
+#define DTL_LOG_CEDE 0x1
+#define DTL_LOG_PREEMPT 0x2
+#define DTL_LOG_FAULT 0x4
+#define DTL_LOG_ALL (DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT)
+
+extern struct kmem_cache *dtl_cache;
+extern rwlock_t dtl_access_lock;
+
+/*
+ * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
+ * reading from the dispatch trace log. If other code wants to consume
+ * DTL entries, it can set this pointer to a function that will get
+ * called once for each DTL entry that gets processed.
+ */
+extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
+
+extern void register_dtl_buffer(int cpu);
+extern void alloc_dtl_buffers(unsigned long *time_limit);
+extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
+
+#endif /* _ASM_POWERPC_DTL_H */
diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h
index 965b1f39b2a5..b0c70a35fd0e 100644
--- a/arch/powerpc/include/asm/icswx.h
+++ b/arch/powerpc/include/asm/icswx.h
@@ -77,6 +77,8 @@ struct coprocessor_completion_block {
#define CSB_CC_CHAIN (37)
#define CSB_CC_SEQUENCE (38)
#define CSB_CC_HW (39)
+/* P9 DD2 NX Workbook 3.2 (Table 4-36): Address translation fault */
+#define CSB_CC_FAULT_ADDRESS (250)
#define CSB_SIZE (0x10)
#define CSB_ALIGN CSB_SIZE
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 3b4b305796ae..c390ec377bae 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -42,7 +42,6 @@
*/
#include <linux/cache.h>
#include <linux/threads.h>
-#include <linux/spinlock_types.h>
#include <asm/types.h>
#include <asm/mmu.h>
#include <asm/firmware.h>
@@ -146,49 +145,6 @@ struct slb_shadow {
} save_area[SLB_NUM_BOLTED];
} ____cacheline_aligned;
-/*
- * Layout of entries in the hypervisor's dispatch trace log buffer.
- */
-struct dtl_entry {
- u8 dispatch_reason;
- u8 preempt_reason;
- __be16 processor_id;
- __be32 enqueue_to_dispatch_time;
- __be32 ready_to_enqueue_time;
- __be32 waiting_to_ready_time;
- __be64 timebase;
- __be64 fault_addr;
- __be64 srr0;
- __be64 srr1;
-};
-
-#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
-#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
-
-/*
- * Dispatch trace log event enable mask:
- * 0x1: voluntary virtual processor waits
- * 0x2: time-slice preempts
- * 0x4: virtual partition memory page faults
- */
-#define DTL_LOG_CEDE 0x1
-#define DTL_LOG_PREEMPT 0x2
-#define DTL_LOG_FAULT 0x4
-#define DTL_LOG_ALL (DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT)
-
-extern struct kmem_cache *dtl_cache;
-extern rwlock_t dtl_access_lock;
-
-/*
- * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
- * reading from the dispatch trace log. If other code wants to consume
- * DTL entries, it can set this pointer to a function that will get
- * called once for each DTL entry that gets processed.
- */
-extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
-
-extern void register_dtl_buffer(int cpu);
-extern void alloc_dtl_buffers(unsigned long *time_limit);
extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
#endif /* CONFIG_PPC_BOOK3S */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index b56f14160ae5..b0afbdd07740 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -205,10 +205,6 @@ static inline void pmd_clear(pmd_t *pmdp)
*pmdp = __pmd(0);
}
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
/*
* PTE updates. This function is called whenever an existing
* valid PTE is updated. This does -not- include set_pte_at()
@@ -230,6 +226,8 @@ static inline void pmd_clear(pmd_t *pmdp)
* For other page sizes, we have a single entry in the table.
*/
#ifdef CONFIG_PPC_8xx
+static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
+
static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
unsigned long clr, unsigned long set, int huge)
{
@@ -237,7 +235,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p
pte_basic_t old = pte_val(*p);
pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
int num, i;
- pmd_t *pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
+ pmd_t *pmd = pmd_off(mm, addr);
if (!huge)
num = PAGE_SIZE / SZ_4K;
@@ -286,6 +284,16 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
}
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
+#define __HAVE_ARCH_PTEP_GET
+static inline pte_t ptep_get(pte_t *ptep)
+{
+ pte_t pte = {READ_ONCE(ptep->pte), 0, 0, 0};
+
+ return pte;
+}
+#endif
+
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 45a839a7c6cf..84b2564cf5a4 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -29,7 +29,6 @@
#include <asm/hmi.h>
#include <asm/cpuidle.h>
#include <asm/atomic.h>
-#include <asm/rtas-types.h>
#include <asm-generic/mmiowb_types.h>
@@ -53,6 +52,7 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
#define get_slb_shadow() (get_paca()->slb_shadow_ptr)
struct task_struct;
+struct rtas_args;
/*
* Defines the layout of the paca.
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index d19871763ed4..324d7b298ec3 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -85,7 +85,7 @@ static inline void *dereference_function_descriptor(void *ptr)
struct ppc64_opd_entry *desc = ptr;
void *p;
- if (!probe_kernel_address(&desc->funcaddr, p))
+ if (!get_kernel_nofault(p, (void *)&desc->funcaddr))
ptr = p;
return ptr;
}
diff --git a/arch/powerpc/include/uapi/asm/papr_pdsm.h b/arch/powerpc/include/uapi/asm/papr_pdsm.h
new file mode 100644
index 000000000000..9ccecc1d6840
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/papr_pdsm.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * PAPR nvDimm Specific Methods (PDSM) and structs for libndctl
+ *
+ * (C) Copyright IBM 2020
+ *
+ * Author: Vaibhav Jain <vaibhav at linux.ibm.com>
+ */
+
+#ifndef _UAPI_ASM_POWERPC_PAPR_PDSM_H_
+#define _UAPI_ASM_POWERPC_PAPR_PDSM_H_
+
+#include <linux/types.h>
+#include <linux/ndctl.h>
+
+/*
+ * PDSM Envelope:
+ *
+ * The ioctl ND_CMD_CALL exchange data between user-space and kernel via
+ * envelope which consists of 2 headers sections and payload sections as
+ * illustrated below:
+ * +-----------------+---------------+---------------------------+
+ * | 64-Bytes | 8-Bytes | Max 184-Bytes |
+ * +-----------------+---------------+---------------------------+
+ * | ND-HEADER | PDSM-HEADER | PDSM-PAYLOAD |
+ * +-----------------+---------------+---------------------------+
+ * | nd_family | | |
+ * | nd_size_out | cmd_status | |
+ * | nd_size_in | reserved | nd_pdsm_payload |
+ * | nd_command | payload --> | |
+ * | nd_fw_size | | |
+ * | nd_payload ---> | | |
+ * +---------------+-----------------+---------------------------+
+ *
+ * ND Header:
+ * This is the generic libnvdimm header described as 'struct nd_cmd_pkg'
+ * which is interpreted by libnvdimm before passed on to papr_scm. Important
+ * member fields used are:
+ * 'nd_family' : (In) NVDIMM_FAMILY_PAPR_SCM
+ * 'nd_size_in' : (In) PDSM-HEADER + PDSM-IN-PAYLOAD (usually 0)
+ * 'nd_size_out' : (In) PDSM-HEADER + PDSM-RETURN-PAYLOAD
+ * 'nd_command' : (In) One of PAPR_PDSM_XXX
+ * 'nd_fw_size' : (Out) PDSM-HEADER + size of actual payload returned
+ *
+ * PDSM Header:
+ * This is papr-scm specific header that precedes the payload. This is defined
+ * as nd_cmd_pdsm_pkg. Following fields aare available in this header:
+ *
+ * 'cmd_status' : (Out) Errors if any encountered while servicing PDSM.
+ * 'reserved' : Not used, reserved for future and should be set to 0.
+ * 'payload' : A union of all the possible payload structs
+ *
+ * PDSM Payload:
+ *
+ * The layout of the PDSM Payload is defined by various structs shared between
+ * papr_scm and libndctl so that contents of payload can be interpreted. As such
+ * its defined as a union of all possible payload structs as
+ * 'union nd_pdsm_payload'. Based on the value of 'nd_cmd_pkg.nd_command'
+ * appropriate member of the union is accessed.
+ */
+
+/* Max payload size that we can handle */
+#define ND_PDSM_PAYLOAD_MAX_SIZE 184
+
+/* Max payload size that we can handle */
+#define ND_PDSM_HDR_SIZE \
+ (sizeof(struct nd_pkg_pdsm) - ND_PDSM_PAYLOAD_MAX_SIZE)
+
+/* Various nvdimm health indicators */
+#define PAPR_PDSM_DIMM_HEALTHY 0
+#define PAPR_PDSM_DIMM_UNHEALTHY 1
+#define PAPR_PDSM_DIMM_CRITICAL 2
+#define PAPR_PDSM_DIMM_FATAL 3
+
+/*
+ * Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH
+ * Various flags indicate the health status of the dimm.
+ *
+ * extension_flags : Any extension fields present in the struct.
+ * dimm_unarmed : Dimm not armed. So contents wont persist.
+ * dimm_bad_shutdown : Previous shutdown did not persist contents.
+ * dimm_bad_restore : Contents from previous shutdown werent restored.
+ * dimm_scrubbed : Contents of the dimm have been scrubbed.
+ * dimm_locked : Contents of the dimm cant be modified until CEC reboot
+ * dimm_encrypted : Contents of dimm are encrypted.
+ * dimm_health : Dimm health indicator. One of PAPR_PDSM_DIMM_XXXX
+ */
+struct nd_papr_pdsm_health {
+ union {
+ struct {
+ __u32 extension_flags;
+ __u8 dimm_unarmed;
+ __u8 dimm_bad_shutdown;
+ __u8 dimm_bad_restore;
+ __u8 dimm_scrubbed;
+ __u8 dimm_locked;
+ __u8 dimm_encrypted;
+ __u16 dimm_health;
+ };
+ __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
+ };
+};
+
+/*
+ * Methods to be embedded in ND_CMD_CALL request. These are sent to the kernel
+ * via 'nd_cmd_pkg.nd_command' member of the ioctl struct
+ */
+enum papr_pdsm {
+ PAPR_PDSM_MIN = 0x0,
+ PAPR_PDSM_HEALTH,
+ PAPR_PDSM_MAX,
+};
+
+/* Maximal union that can hold all possible payload types */
+union nd_pdsm_payload {
+ struct nd_papr_pdsm_health health;
+ __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
+} __packed;
+
+/*
+ * PDSM-header + payload expected with ND_CMD_CALL ioctl from libnvdimm
+ * Valid member of union 'payload' is identified via 'nd_cmd_pkg.nd_command'
+ * that should always precede this struct when sent to papr_scm via CMD_CALL
+ * interface.
+ */
+struct nd_pkg_pdsm {
+ __s32 cmd_status; /* Out: Sub-cmd status returned back */
+ __u16 reserved[2]; /* Ignored and to be set as '0' */
+ union nd_pdsm_payload payload;
+} __packed;
+
+#endif /* _UAPI_ASM_POWERPC_PAPR_PDSM_H_ */
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index e486d1d78de2..569fecd7b5b2 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -14,23 +14,6 @@
* Generic iommu implementation
*/
-/*
- * The coherent mask may be smaller than the real mask, check if we can
- * really use a direct window.
- */
-static inline bool dma_iommu_alloc_bypass(struct device *dev)
-{
- return dev->archdata.iommu_bypass && !iommu_fixed_is_weak &&
- dma_direct_supported(dev, dev->coherent_dma_mask);
-}
-
-static inline bool dma_iommu_map_bypass(struct device *dev,
- unsigned long attrs)
-{
- return dev->archdata.iommu_bypass &&
- (!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING));
-}
-
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
* to the dma address (mapping) of the first page.
@@ -39,8 +22,6 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
- if (dma_iommu_alloc_bypass(dev))
- return dma_direct_alloc(dev, size, dma_handle, flag, attrs);
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
dma_handle, dev->coherent_dma_mask, flag,
dev_to_node(dev));
@@ -50,11 +31,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
- if (dma_iommu_alloc_bypass(dev))
- dma_direct_free(dev, size, vaddr, dma_handle, attrs);
- else
- iommu_free_coherent(get_iommu_table_base(dev), size, vaddr,
- dma_handle);
+ iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
}
/* Creates TCEs for a user provided buffer. The user buffer must be
@@ -67,9 +44,6 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
enum dma_data_direction direction,
unsigned long attrs)
{
- if (dma_iommu_map_bypass(dev, attrs))
- return dma_direct_map_page(dev, page, offset, size, direction,
- attrs);
return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
size, dma_get_mask(dev), direction, attrs);
}
@@ -79,11 +53,8 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
- if (!dma_iommu_map_bypass(dev, attrs))
- iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size,
- direction, attrs);
- else
- dma_direct_unmap_page(dev, dma_handle, size, direction, attrs);
+ iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
+ attrs);
}
@@ -91,8 +62,6 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
- if (dma_iommu_map_bypass(dev, attrs))
- return dma_direct_map_sg(dev, sglist, nelems, direction, attrs);
return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
dma_get_mask(dev), direction, attrs);
}
@@ -101,11 +70,8 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
- if (!dma_iommu_map_bypass(dev, attrs))
- ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
+ ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
direction, attrs);
- else
- dma_direct_unmap_sg(dev, sglist, nelems, direction, attrs);
}
static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
@@ -113,8 +79,9 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_controller *phb = pci_bus_to_host(pdev->bus);
- return phb->controller_ops.iommu_bypass_supported &&
- phb->controller_ops.iommu_bypass_supported(pdev, mask);
+ if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported)
+ return false;
+ return phb->controller_ops.iommu_bypass_supported(pdev, mask);
}
/* We support DMA to/from any memory page via the iommu */
@@ -123,7 +90,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
struct iommu_table *tbl = get_iommu_table_base(dev);
if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
- dev->archdata.iommu_bypass = true;
+ dev->dma_ops_bypass = true;
dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
return 1;
}
@@ -141,7 +108,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
}
dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
- dev->archdata.iommu_bypass = false;
+ dev->dma_ops_bypass = false;
return 1;
}
@@ -153,47 +120,12 @@ u64 dma_iommu_get_required_mask(struct device *dev)
if (!tbl)
return 0;
- if (dev_is_pci(dev)) {
- u64 bypass_mask = dma_direct_get_required_mask(dev);
-
- if (dma_iommu_bypass_supported(dev, bypass_mask))
- return bypass_mask;
- }
-
mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
mask += mask - 1;
return mask;
}
-static void dma_iommu_sync_for_cpu(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
- if (dma_iommu_alloc_bypass(dev))
- dma_direct_sync_single_for_cpu(dev, addr, size, dir);
-}
-
-static void dma_iommu_sync_for_device(struct device *dev, dma_addr_t addr,
- size_t sz, enum dma_data_direction dir)
-{
- if (dma_iommu_alloc_bypass(dev))
- dma_direct_sync_single_for_device(dev, addr, sz, dir);
-}
-
-extern void dma_iommu_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
- if (dma_iommu_alloc_bypass(dev))
- dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
-}
-
-extern void dma_iommu_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
- if (dma_iommu_alloc_bypass(dev))
- dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
-}
-
const struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent,
@@ -203,10 +135,6 @@ const struct dma_map_ops dma_iommu_ops = {
.map_page = dma_iommu_map_page,
.unmap_page = dma_iommu_unmap_page,
.get_required_mask = dma_iommu_get_required_mask,
- .sync_single_for_cpu = dma_iommu_sync_for_cpu,
- .sync_single_for_device = dma_iommu_sync_for_device,
- .sync_sg_for_cpu = dma_iommu_sync_sg_for_cpu,
- .sync_sg_for_device = dma_iommu_sync_sg_for_device,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
};
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e70ebb5c318c..446e54c3f71e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -270,7 +270,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.endif
- ld r10,PACA_EXGEN+EX_CTR(r13)
+ ld r10,IAREA+EX_CTR(r13)
mtctr r10
BEGIN_FTR_SECTION
ld r10,IAREA+EX_PPR(r13)
@@ -298,7 +298,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
.if IKVM_SKIP
89: mtocrf 0x80,r9
- ld r10,PACA_EXGEN+EX_CTR(r13)
+ ld r10,IAREA+EX_CTR(r13)
mtctr r10
ld r9,IAREA+EX_R9(r13)
ld r10,IAREA+EX_R10(r13)
@@ -2551,7 +2551,7 @@ EXC_VIRT_NONE(0x5400, 0x100)
INT_DEFINE_BEGIN(denorm_exception)
IVEC=0x1500
IHSRR=1
- IBRANCH_COMMON=0
+ IBRANCH_TO_COMMON=0
IKVM_REAL=1
INT_DEFINE_END(denorm_exception)
@@ -3072,10 +3072,18 @@ do_hash_page:
ori r0,r0,DSISR_BAD_FAULT_64S@l
and. r0,r5,r0 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
+
+ /*
+ * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
+ * don't call hash_page, just fail the fault. This is required to
+ * prevent re-entrancy problems in the hash code, namely perf
+ * interrupts hitting while something holds H_PAGE_BUSY, and taking a
+ * hash fault. See the comment in hash_preload().
+ */
ld r11, PACA_THREAD_INFO(r13)
- lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
- andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
- bne 77f /* then don't call hash_page now */
+ lwz r0,TI_PREEMPT(r11)
+ andis. r0,r0,NMI_MASK@h
+ bne 77f
/*
* r3 contains the trap number
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 652b2852bea3..409080208a6c 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -421,7 +421,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
unsigned int instr;
struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr;
- err = probe_kernel_address(addr, instr);
+ err = get_kernel_nofault(instr, (unsigned *) addr);
if (err)
return err;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 6f96f65ebfe8..9cc792a3a6a9 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -289,7 +289,7 @@ int kprobe_handler(struct pt_regs *regs)
if (!p) {
unsigned int instr;
- if (probe_kernel_address(addr, instr))
+ if (get_kernel_nofault(instr, addr))
goto no_kprobe;
if (instr != BREAKPOINT_INSTRUCTION) {
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index f4c2fa190192..ae2b188365b1 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -756,7 +756,8 @@ int module_trampoline_target(struct module *mod, unsigned long addr,
stub = (struct ppc64_stub_entry *)addr;
- if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) {
+ if (copy_from_kernel_nofault(&magic, &stub->magic,
+ sizeof(magic))) {
pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name);
return -EFAULT;
}
@@ -766,7 +767,8 @@ int module_trampoline_target(struct module *mod, unsigned long addr,
return -EFAULT;
}
- if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) {
+ if (copy_from_kernel_nofault(&funcdata, &stub->funcdata,
+ sizeof(funcdata))) {
pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name);
return -EFAULT;
}
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 2168372b792d..74da65aacbc9 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -87,7 +87,7 @@ static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align,
* This is very early in boot, so no harm done if the kernel crashes at
* this point.
*/
- BUG_ON(shared_lppaca_size >= shared_lppaca_total_size);
+ BUG_ON(shared_lppaca_size > shared_lppaca_total_size);
return ptr;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7bb7faf84490..794b754deec2 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1252,29 +1252,31 @@ struct task_struct *__switch_to(struct task_struct *prev,
static void show_instructions(struct pt_regs *regs)
{
int i;
+ unsigned long nip = regs->nip;
unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
printk("Instruction dump:");
+ /*
+ * If we were executing with the MMU off for instructions, adjust pc
+ * rather than printing XXXXXXXX.
+ */
+ if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
+ pc = (unsigned long)phys_to_virt(pc);
+ nip = (unsigned long)phys_to_virt(regs->nip);
+ }
+
for (i = 0; i < NR_INSN_TO_PRINT; i++) {
int instr;
if (!(i % 8))
pr_cont("\n");
-#if !defined(CONFIG_BOOKE)
- /* If executing with the IMMU off, adjust pc rather
- * than print XXXXXXXX.
- */
- if (!(regs->msr & MSR_IR))
- pc = (unsigned long)phys_to_virt(pc);
-#endif
-
if (!__kernel_text_address(pc) ||
- probe_kernel_address((const void *)pc, instr)) {
+ get_kernel_nofault(instr, (const void *)pc)) {
pr_cont("XXXXXXXX ");
} else {
- if (regs->nip == pc)
+ if (nip == pc)
pr_cont("<%08x> ", instr);
else
pr_cont("%08x ", instr);
@@ -1305,7 +1307,8 @@ void show_user_instructions(struct pt_regs *regs)
for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
int instr;
- if (probe_user_read(&instr, (void __user *)pc, sizeof(instr))) {
+ if (copy_from_user_nofault(&instr, (void __user *)pc,
+ sizeof(instr))) {
seq_buf_printf(&s, "XXXXXXXX ");
continue;
}
@@ -1590,7 +1593,7 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
/*
* Copy architecture-specific thread state
*/
-int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
+int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long kthread_arg, struct task_struct *p,
unsigned long tls)
{
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index f833a3190822..dd87a782d80e 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -525,6 +525,7 @@
435 32 clone3 ppc_clone3 sys_clone3
435 64 clone3 sys_clone3
435 spu clone3 sys_ni_syscall
+436 common close_range sys_close_range
437 common openat2 sys_openat2
438 common pidfd_getfd sys_pidfd_getfd
439 common faccessat2 sys_faccessat2
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 6fcae436ae51..f85539ebb513 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -183,6 +183,8 @@ static inline unsigned long read_spurr(unsigned long tb)
#ifdef CONFIG_PPC_SPLPAR
+#include <asm/dtl.h>
+
/*
* Scan the dispatch trace log and count up the stolen time.
* Should be called with interrupts disabled.
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 5e399628f51a..c1fede6ec934 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -226,7 +226,7 @@ __ftrace_make_nop(struct module *mod,
unsigned long ip = rec->ip;
unsigned long tramp;
- if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
+ if (copy_from_kernel_nofault(&op, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
/* Make sure that that this is still a 24bit jump */
@@ -249,7 +249,7 @@ __ftrace_make_nop(struct module *mod,
pr_devel("ip:%lx jumps to %lx", ip, tramp);
/* Find where the trampoline jumps to */
- if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
+ if (copy_from_kernel_nofault(jmp, (void *)tramp, sizeof(jmp))) {
pr_err("Failed to read %lx\n", tramp);
return -EFAULT;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 3cb0c9843d01..777aa5625d5f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -33,14 +33,15 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
gva_t eaddr, void *to, void *from,
unsigned long n)
{
- int uninitialized_var(old_pid), old_lpid;
+ int old_pid, old_lpid;
unsigned long quadrant, ret = n;
bool is_load = !!to;
/* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
if (kvmhv_on_pseries())
return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
- __pa(to), __pa(from), n);
+ (to != NULL) ? __pa(to): 0,
+ (from != NULL) ? __pa(from): 0, n);
quadrant = 1;
if (!pid)
@@ -64,9 +65,9 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
isync();
if (is_load)
- ret = probe_user_read(to, (const void __user *)from, n);
+ ret = copy_from_user_nofault(to, (const void __user *)from, n);
else
- ret = probe_user_write((void __user *)to, from, n);
+ ret = copy_to_user_nofault((void __user *)to, from, n);
/* switch the pid first to avoid running host with unallocated pid */
if (quadrant == 1 && pid != old_pid)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 6bf66649ab92..ebb04f331ad3 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -74,6 +74,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/kvm_book3s_uvmem.h>
#include <asm/ultravisor.h>
+#include <asm/dtl.h>
#include "book3s.h"
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index ef54f917bdaf..ed12dfbf9bb5 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1828,9 +1828,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
int ret;
-#ifdef CONFIG_ALTIVEC
- unsigned long uninitialized_var(vrsave);
-#endif
/* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) {
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index dd7d141e33e8..aaa7b62f2f82 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -1110,7 +1110,7 @@ static inline u32 dp_to_sp(u64 fprd)
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- u64 uninitialized_var(gpr);
+ u64 gpr;
if (run->mmio.len > sizeof(gpr)) {
printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
diff --git a/arch/powerpc/lib/inst.c b/arch/powerpc/lib/inst.c
index aedfd6e31e53..9cc17eb62462 100644
--- a/arch/powerpc/lib/inst.c
+++ b/arch/powerpc/lib/inst.c
@@ -15,11 +15,11 @@ int probe_user_read_inst(struct ppc_inst *inst,
unsigned int val, suffix;
int err;
- err = probe_user_read(&val, nip, sizeof(val));
+ err = copy_from_user_nofault(&val, nip, sizeof(val));
if (err)
return err;
if (get_op(val) == OP_PREFIX) {
- err = probe_user_read(&suffix, (void __user *)nip + 4, 4);
+ err = copy_from_user_nofault(&suffix, (void __user *)nip + 4, 4);
*inst = ppc_inst_prefix(val, suffix);
} else {
*inst = ppc_inst(val);
@@ -33,11 +33,11 @@ int probe_kernel_read_inst(struct ppc_inst *inst,
unsigned int val, suffix;
int err;
- err = probe_kernel_read(&val, src, sizeof(val));
+ err = copy_from_kernel_nofault(&val, src, sizeof(val));
if (err)
return err;
if (get_op(val) == OP_PREFIX) {
- err = probe_kernel_read(&suffix, (void *)src + 4, 4);
+ err = copy_from_kernel_nofault(&suffix, (void *)src + 4, 4);
*inst = ppc_inst_prefix(val, suffix);
} else {
*inst = ppc_inst(val);
@@ -51,7 +51,7 @@ int probe_user_read_inst(struct ppc_inst *inst,
unsigned int val;
int err;
- err = probe_user_read(&val, nip, sizeof(val));
+ err = copy_from_user_nofault(&val, nip, sizeof(val));
if (!err)
*inst = ppc_inst(val);
@@ -64,7 +64,7 @@ int probe_kernel_read_inst(struct ppc_inst *inst,
unsigned int val;
int err;
- err = probe_kernel_read(&val, src, sizeof(val));
+ err = copy_from_kernel_nofault(&val, src, sizeof(val));
if (!err)
*inst = ppc_inst(val);
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 468169e33c86..9b9f92ad0e7a 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1559,6 +1559,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
pgd_t *pgdir;
int rc, ssize, update_flags = 0;
unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
+ unsigned long flags;
BUG_ON(get_region_id(ea) != USER_REGION_ID);
@@ -1592,6 +1593,28 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
return;
#endif /* CONFIG_PPC_64K_PAGES */
+ /*
+ * __hash_page_* must run with interrupts off, as it sets the
+ * H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any
+ * time and may take a hash fault reading the user stack, see
+ * read_user_stack_slow() in the powerpc/perf code.
+ *
+ * If that takes a hash fault on the same page as we lock here, it
+ * will bail out when seeing H_PAGE_BUSY set, and retry the access
+ * leading to an infinite loop.
+ *
+ * Disabling interrupts here does not prevent perf interrupts, but it
+ * will prevent them taking hash faults (see the NMI test in
+ * do_hash_page), then read_user_stack's copy_from_user_nofault will
+ * fail and perf will fall back to read_user_stack_slow(), which
+ * walks the Linux page tables.
+ *
+ * Interrupts must also be off for the duration of the
+ * mm_is_thread_local test and update, to prevent preempt running the
+ * mm on another CPU (XXX: this may be racy vs kthread_use_mm).
+ */
+ local_irq_save(flags);
+
/* Is that local to this CPU ? */
if (mm_is_thread_local(mm))
update_flags |= HPTE_LOCAL_UPDATE;
@@ -1614,6 +1637,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
mm_ctx_user_psize(&mm->context),
mm_ctx_user_psize(&mm->context),
pte_val(*ptep));
+
+ local_irq_restore(flags);
}
/*
diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c
index 1199fc2bfaec..d174106bab67 100644
--- a/arch/powerpc/mm/book3s64/pkeys.c
+++ b/arch/powerpc/mm/book3s64/pkeys.c
@@ -353,16 +353,15 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute)
int pkey_shift;
u64 amr;
- if (!is_pkey_enabled(pkey))
- return true;
-
pkey_shift = pkeyshift(pkey);
- if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift)))
- return true;
+ if (execute)
+ return !(read_iamr() & (IAMR_EX_BIT << pkey_shift));
+
+ amr = read_amr();
+ if (write)
+ return !(amr & (AMR_WR_BIT << pkey_shift));
- amr = read_amr(); /* Delay reading amr until absolutely needed */
- return ((!write && !(amr & (AMR_RD_BIT << pkey_shift))) ||
- (write && !(amr & (AMR_WR_BIT << pkey_shift))));
+ return !(amr & (AMR_RD_BIT << pkey_shift));
}
bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 4a75f2d9bf0e..bce0e5349978 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -14,6 +14,7 @@
#include <linux/memblock.h>
#include <linux/libfdt.h>
#include <linux/crash_core.h>
+#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <asm/kdump.h>
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c
index 6f347fa29f41..9db7ada79d10 100644
--- a/arch/powerpc/oprofile/backtrace.c
+++ b/arch/powerpc/oprofile/backtrace.c
@@ -33,7 +33,8 @@ static unsigned int user_getsp32(unsigned int sp, int is_first)
* which means that we've done all that we can do from
* interrupt context.
*/
- if (probe_user_read(stack_frame, (void __user *)p, sizeof(stack_frame)))
+ if (copy_from_user_nofault(stack_frame, (void __user *)p,
+ sizeof(stack_frame)))
return 0;
if (!is_first)
@@ -51,7 +52,8 @@ static unsigned long user_getsp64(unsigned long sp, int is_first)
{
unsigned long stack_frame[3];
- if (probe_user_read(stack_frame, (void __user *)sp, sizeof(stack_frame)))
+ if (copy_from_user_nofault(stack_frame, (void __user *)sp,
+ sizeof(stack_frame)))
return 0;
if (!is_first)
diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c
index f7d888d39cd3..542e68b8eae0 100644
--- a/arch/powerpc/perf/callchain_32.c
+++ b/arch/powerpc/perf/callchain_32.c
@@ -44,7 +44,7 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
((unsigned long)ptr & 3))
return -EFAULT;
- rc = probe_user_read(ret, ptr, sizeof(*ret));
+ rc = copy_from_user_nofault(ret, ptr, sizeof(*ret));
if (IS_ENABLED(CONFIG_PPC64) && rc)
return read_user_stack_slow(ptr, ret, 4);
diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c
index 814d1c2c2b9c..fa2a1b83b9b0 100644
--- a/arch/powerpc/perf/callchain_64.c
+++ b/arch/powerpc/perf/callchain_64.c
@@ -50,7 +50,7 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
((unsigned long)ptr & 7))
return -EFAULT;
- if (!probe_user_read(ret, ptr, sizeof(*ret)))
+ if (!copy_from_user_nofault(ret, ptr, sizeof(*ret)))
return 0;
return read_user_stack_slow(ptr, ret, 8);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 13b9dd5e4a76..01d70280d287 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -418,14 +418,16 @@ static __u64 power_pmu_bhrb_to(u64 addr)
__u64 target;
if (is_kernel_addr(addr)) {
- if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
+ if (copy_from_kernel_nofault(&instr, (void *)addr,
+ sizeof(instr)))
return 0;
return branch_target((struct ppc_inst *)&instr);
}
/* Userspace: need copy instruction here then translate it */
- if (probe_user_read(&instr, (unsigned int __user *)addr, sizeof(instr)))
+ if (copy_from_user_nofault(&instr, (unsigned int __user *)addr,
+ sizeof(instr)))
return 0;
target = branch_target((struct ppc_inst *)&instr);
@@ -2177,6 +2179,12 @@ static void __perf_event_interrupt(struct pt_regs *regs)
perf_read_regs(regs);
+ /*
+ * If perf interrupts hit in a local_irq_disable (soft-masked) region,
+ * we consider them as NMIs. This is required to prevent hash faults on
+ * user addresses when reading callchains. See the NMI test in
+ * do_hash_page.
+ */
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index fc98912f42cf..76a8102bdb98 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -340,7 +340,7 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
{
int l1irq;
int l2irq;
- struct irq_chip *uninitialized_var(irqchip);
+ struct irq_chip *irqchip;
void *hndlr;
int type;
u32 reg;
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index 55b31eadb3c8..ca7849e113d7 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -126,30 +126,8 @@ static struct cpufreq_governor spu_governor = {
.stop = spu_gov_stop,
.owner = THIS_MODULE,
};
-
-/*
- * module init and destoy
- */
-
-static int __init spu_gov_init(void)
-{
- int ret;
-
- ret = cpufreq_register_governor(&spu_governor);
- if (ret)
- printk(KERN_ERR "registration of governor failed\n");
- return ret;
-}
-
-static void __exit spu_gov_exit(void)
-{
- cpufreq_unregister_governor(&spu_governor);
-}
-
-
-module_init(spu_gov_init);
-module_exit(spu_gov_exit);
+cpufreq_governor_init(spu_governor);
+cpufreq_governor_exit(spu_governor);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
-
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index cbee3666da07..abdef9bcf432 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -35,7 +35,7 @@
*/
static void *spu_syscall_table[] = {
-#define __SYSCALL(nr, entry) entry,
+#define __SYSCALL(nr, entry) [nr] = entry,
#include <asm/syscall_table_spu.h>
#undef __SYSCALL
};
diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c
index 266a6ca5e15e..3d21fce254b7 100644
--- a/arch/powerpc/platforms/powernv/vas-fault.c
+++ b/arch/powerpc/platforms/powernv/vas-fault.c
@@ -79,7 +79,7 @@ static void update_csb(struct vas_window *window,
csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
memset(&csb, 0, sizeof(csb));
- csb.cc = CSB_CC_TRANSLATION;
+ csb.cc = CSB_CC_FAULT_ADDRESS;
csb.ce = CSB_CE_TERMINATION;
csb.cs = 0;
csb.count = 0;
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index eab8aa293743..982f069e4c31 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -12,6 +12,7 @@
#include <asm/smp.h>
#include <linux/uaccess.h>
#include <asm/firmware.h>
+#include <asm/dtl.h>
#include <asm/lppaca.h>
#include <asm/debugfs.h>
#include <asm/plpar_wrappers.h>
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index fd26f3d21d7b..f71ff2c94efe 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -40,6 +40,7 @@
#include <asm/fadump.h>
#include <asm/asm-prototypes.h>
#include <asm/debugfs.h>
+#include <asm/dtl.h>
#include "pseries.h"
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index f35592423380..9c569078a09f 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -12,16 +12,57 @@
#include <linux/libnvdimm.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/seq_buf.h>
#include <asm/plpar_wrappers.h>
+#include <asm/papr_pdsm.h>
#define BIND_ANY_ADDR (~0ul)
#define PAPR_SCM_DIMM_CMD_MASK \
((1ul << ND_CMD_GET_CONFIG_SIZE) | \
(1ul << ND_CMD_GET_CONFIG_DATA) | \
- (1ul << ND_CMD_SET_CONFIG_DATA))
-
+ (1ul << ND_CMD_SET_CONFIG_DATA) | \
+ (1ul << ND_CMD_CALL))
+
+/* DIMM health bitmap bitmap indicators */
+/* SCM device is unable to persist memory contents */
+#define PAPR_PMEM_UNARMED (1ULL << (63 - 0))
+/* SCM device failed to persist memory contents */
+#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1))
+/* SCM device contents are persisted from previous IPL */
+#define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2))
+/* SCM device contents are not persisted from previous IPL */
+#define PAPR_PMEM_EMPTY (1ULL << (63 - 3))
+/* SCM device memory life remaining is critically low */
+#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4))
+/* SCM device will be garded off next IPL due to failure */
+#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5))
+/* SCM contents cannot persist due to current platform health status */
+#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6))
+/* SCM device is unable to persist memory contents in certain conditions */
+#define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7))
+/* SCM device is encrypted */
+#define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8))
+/* SCM device has been scrubbed and locked */
+#define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9))
+
+/* Bits status indicators for health bitmap indicating unarmed dimm */
+#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \
+ PAPR_PMEM_HEALTH_UNHEALTHY)
+
+/* Bits status indicators for health bitmap indicating unflushed dimm */
+#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY)
+
+/* Bits status indicators for health bitmap indicating unrestored dimm */
+#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY)
+
+/* Bit status indicators for smart event notification */
+#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \
+ PAPR_PMEM_HEALTH_FATAL | \
+ PAPR_PMEM_HEALTH_UNHEALTHY)
+
+/* private struct associated with each region */
struct papr_scm_priv {
struct platform_device *pdev;
struct device_node *dn;
@@ -39,6 +80,15 @@ struct papr_scm_priv {
struct resource res;
struct nd_region *region;
struct nd_interleave_set nd_set;
+
+ /* Protect dimm health data from concurrent read/writes */
+ struct mutex health_mutex;
+
+ /* Last time the health information of the dimm was updated */
+ unsigned long lasthealth_jiffies;
+
+ /* Health information for the dimm */
+ u64 health_bitmap;
};
static int drc_pmem_bind(struct papr_scm_priv *p)
@@ -144,6 +194,61 @@ err_out:
return drc_pmem_bind(p);
}
+/*
+ * Issue hcall to retrieve dimm health info and populate papr_scm_priv with the
+ * health information.
+ */
+static int __drc_pmem_query_health(struct papr_scm_priv *p)
+{
+ unsigned long ret[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ /* issue the hcall */
+ rc = plpar_hcall(H_SCM_HEALTH, ret, p->drc_index);
+ if (rc != H_SUCCESS) {
+ dev_err(&p->pdev->dev,
+ "Failed to query health information, Err:%ld\n", rc);
+ return -ENXIO;
+ }
+
+ p->lasthealth_jiffies = jiffies;
+ p->health_bitmap = ret[0] & ret[1];
+
+ dev_dbg(&p->pdev->dev,
+ "Queried dimm health info. Bitmap:0x%016lx Mask:0x%016lx\n",
+ ret[0], ret[1]);
+
+ return 0;
+}
+
+/* Min interval in seconds for assuming stable dimm health */
+#define MIN_HEALTH_QUERY_INTERVAL 60
+
+/* Query cached health info and if needed call drc_pmem_query_health */
+static int drc_pmem_query_health(struct papr_scm_priv *p)
+{
+ unsigned long cache_timeout;
+ int rc;
+
+ /* Protect concurrent modifications to papr_scm_priv */
+ rc = mutex_lock_interruptible(&p->health_mutex);
+ if (rc)
+ return rc;
+
+ /* Jiffies offset for which the health data is assumed to be same */
+ cache_timeout = p->lasthealth_jiffies +
+ msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000);
+
+ /* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */
+ if (time_after(jiffies, cache_timeout))
+ rc = __drc_pmem_query_health(p);
+ else
+ /* Assume cached health data is valid */
+ rc = 0;
+
+ mutex_unlock(&p->health_mutex);
+ return rc;
+}
static int papr_scm_meta_get(struct papr_scm_priv *p,
struct nd_cmd_get_config_data_hdr *hdr)
@@ -246,16 +351,250 @@ static int papr_scm_meta_set(struct papr_scm_priv *p,
return 0;
}
+/*
+ * Do a sanity checks on the inputs args to dimm-control function and return
+ * '0' if valid. Validation of PDSM payloads happens later in
+ * papr_scm_service_pdsm.
+ */
+static int is_cmd_valid(struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len)
+{
+ unsigned long cmd_mask = PAPR_SCM_DIMM_CMD_MASK;
+ struct nd_cmd_pkg *nd_cmd;
+ struct papr_scm_priv *p;
+ enum papr_pdsm pdsm;
+
+ /* Only dimm-specific calls are supported atm */
+ if (!nvdimm)
+ return -EINVAL;
+
+ /* get the provider data from struct nvdimm */
+ p = nvdimm_provider_data(nvdimm);
+
+ if (!test_bit(cmd, &cmd_mask)) {
+ dev_dbg(&p->pdev->dev, "Unsupported cmd=%u\n", cmd);
+ return -EINVAL;
+ }
+
+ /* For CMD_CALL verify pdsm request */
+ if (cmd == ND_CMD_CALL) {
+ /* Verify the envelope and envelop size */
+ if (!buf ||
+ buf_len < (sizeof(struct nd_cmd_pkg) + ND_PDSM_HDR_SIZE)) {
+ dev_dbg(&p->pdev->dev, "Invalid pkg size=%u\n",
+ buf_len);
+ return -EINVAL;
+ }
+
+ /* Verify that the nd_cmd_pkg.nd_family is correct */
+ nd_cmd = (struct nd_cmd_pkg *)buf;
+
+ if (nd_cmd->nd_family != NVDIMM_FAMILY_PAPR) {
+ dev_dbg(&p->pdev->dev, "Invalid pkg family=0x%llx\n",
+ nd_cmd->nd_family);
+ return -EINVAL;
+ }
+
+ pdsm = (enum papr_pdsm)nd_cmd->nd_command;
+
+ /* Verify if the pdsm command is valid */
+ if (pdsm <= PAPR_PDSM_MIN || pdsm >= PAPR_PDSM_MAX) {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid PDSM\n",
+ pdsm);
+ return -EINVAL;
+ }
+
+ /* Have enough space to hold returned 'nd_pkg_pdsm' header */
+ if (nd_cmd->nd_size_out < ND_PDSM_HDR_SIZE) {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid payload\n",
+ pdsm);
+ return -EINVAL;
+ }
+ }
+
+ /* Let the command be further processed */
+ return 0;
+}
+
+/* Fetch the DIMM health info and populate it in provided package. */
+static int papr_pdsm_health(struct papr_scm_priv *p,
+ union nd_pdsm_payload *payload)
+{
+ int rc;
+
+ /* Ensure dimm health mutex is taken preventing concurrent access */
+ rc = mutex_lock_interruptible(&p->health_mutex);
+ if (rc)
+ goto out;
+
+ /* Always fetch upto date dimm health data ignoring cached values */
+ rc = __drc_pmem_query_health(p);
+ if (rc) {
+ mutex_unlock(&p->health_mutex);
+ goto out;
+ }
+
+ /* update health struct with various flags derived from health bitmap */
+ payload->health = (struct nd_papr_pdsm_health) {
+ .extension_flags = 0,
+ .dimm_unarmed = !!(p->health_bitmap & PAPR_PMEM_UNARMED_MASK),
+ .dimm_bad_shutdown = !!(p->health_bitmap & PAPR_PMEM_BAD_SHUTDOWN_MASK),
+ .dimm_bad_restore = !!(p->health_bitmap & PAPR_PMEM_BAD_RESTORE_MASK),
+ .dimm_scrubbed = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
+ .dimm_locked = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
+ .dimm_encrypted = !!(p->health_bitmap & PAPR_PMEM_ENCRYPTED),
+ .dimm_health = PAPR_PDSM_DIMM_HEALTHY,
+ };
+
+ /* Update field dimm_health based on health_bitmap flags */
+ if (p->health_bitmap & PAPR_PMEM_HEALTH_FATAL)
+ payload->health.dimm_health = PAPR_PDSM_DIMM_FATAL;
+ else if (p->health_bitmap & PAPR_PMEM_HEALTH_CRITICAL)
+ payload->health.dimm_health = PAPR_PDSM_DIMM_CRITICAL;
+ else if (p->health_bitmap & PAPR_PMEM_HEALTH_UNHEALTHY)
+ payload->health.dimm_health = PAPR_PDSM_DIMM_UNHEALTHY;
+
+ /* struct populated hence can release the mutex now */
+ mutex_unlock(&p->health_mutex);
+ rc = sizeof(struct nd_papr_pdsm_health);
+
+out:
+ return rc;
+}
+
+/*
+ * 'struct pdsm_cmd_desc'
+ * Identifies supported PDSMs' expected length of in/out payloads
+ * and pdsm service function.
+ *
+ * size_in : Size of input payload if any in the PDSM request.
+ * size_out : Size of output payload if any in the PDSM request.
+ * service : Service function for the PDSM request. Return semantics:
+ * rc < 0 : Error servicing PDSM and rc indicates the error.
+ * rc >=0 : Serviced successfully and 'rc' indicate number of
+ * bytes written to payload.
+ */
+struct pdsm_cmd_desc {
+ u32 size_in;
+ u32 size_out;
+ int (*service)(struct papr_scm_priv *dimm,
+ union nd_pdsm_payload *payload);
+};
+
+/* Holds all supported PDSMs' command descriptors */
+static const struct pdsm_cmd_desc __pdsm_cmd_descriptors[] = {
+ [PAPR_PDSM_MIN] = {
+ .size_in = 0,
+ .size_out = 0,
+ .service = NULL,
+ },
+ /* New PDSM command descriptors to be added below */
+
+ [PAPR_PDSM_HEALTH] = {
+ .size_in = 0,
+ .size_out = sizeof(struct nd_papr_pdsm_health),
+ .service = papr_pdsm_health,
+ },
+ /* Empty */
+ [PAPR_PDSM_MAX] = {
+ .size_in = 0,
+ .size_out = 0,
+ .service = NULL,
+ },
+};
+
+/* Given a valid pdsm cmd return its command descriptor else return NULL */
+static inline const struct pdsm_cmd_desc *pdsm_cmd_desc(enum papr_pdsm cmd)
+{
+ if (cmd >= 0 || cmd < ARRAY_SIZE(__pdsm_cmd_descriptors))
+ return &__pdsm_cmd_descriptors[cmd];
+
+ return NULL;
+}
+
+/*
+ * For a given pdsm request call an appropriate service function.
+ * Returns errors if any while handling the pdsm command package.
+ */
+static int papr_scm_service_pdsm(struct papr_scm_priv *p,
+ struct nd_cmd_pkg *pkg)
+{
+ /* Get the PDSM header and PDSM command */
+ struct nd_pkg_pdsm *pdsm_pkg = (struct nd_pkg_pdsm *)pkg->nd_payload;
+ enum papr_pdsm pdsm = (enum papr_pdsm)pkg->nd_command;
+ const struct pdsm_cmd_desc *pdsc;
+ int rc;
+
+ /* Fetch corresponding pdsm descriptor for validation and servicing */
+ pdsc = pdsm_cmd_desc(pdsm);
+
+ /* Validate pdsm descriptor */
+ /* Ensure that reserved fields are 0 */
+ if (pdsm_pkg->reserved[0] || pdsm_pkg->reserved[1]) {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid reserved field\n",
+ pdsm);
+ return -EINVAL;
+ }
+
+ /* If pdsm expects some input, then ensure that the size_in matches */
+ if (pdsc->size_in &&
+ pkg->nd_size_in != (pdsc->size_in + ND_PDSM_HDR_SIZE)) {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_in=%d\n",
+ pdsm, pkg->nd_size_in);
+ return -EINVAL;
+ }
+
+ /* If pdsm wants to return data, then ensure that size_out matches */
+ if (pdsc->size_out &&
+ pkg->nd_size_out != (pdsc->size_out + ND_PDSM_HDR_SIZE)) {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_out=%d\n",
+ pdsm, pkg->nd_size_out);
+ return -EINVAL;
+ }
+
+ /* Service the pdsm */
+ if (pdsc->service) {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Servicing..\n", pdsm);
+
+ rc = pdsc->service(p, &pdsm_pkg->payload);
+
+ if (rc < 0) {
+ /* error encountered while servicing pdsm */
+ pdsm_pkg->cmd_status = rc;
+ pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
+ } else {
+ /* pdsm serviced and 'rc' bytes written to payload */
+ pdsm_pkg->cmd_status = 0;
+ pkg->nd_fw_size = ND_PDSM_HDR_SIZE + rc;
+ }
+ } else {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Unsupported PDSM request\n",
+ pdsm);
+ pdsm_pkg->cmd_status = -ENOENT;
+ pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
+ }
+
+ return pdsm_pkg->cmd_status;
+}
+
static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc)
{
struct nd_cmd_get_config_size *get_size_hdr;
+ struct nd_cmd_pkg *call_pkg = NULL;
struct papr_scm_priv *p;
+ int rc;
- /* Only dimm-specific calls are supported atm */
- if (!nvdimm)
- return -EINVAL;
+ rc = is_cmd_valid(nvdimm, cmd, buf, buf_len);
+ if (rc) {
+ pr_debug("Invalid cmd=0x%x. Err=%d\n", cmd, rc);
+ return rc;
+ }
+
+ /* Use a local variable in case cmd_rc pointer is NULL */
+ if (!cmd_rc)
+ cmd_rc = &rc;
p = nvdimm_provider_data(nvdimm);
@@ -277,7 +616,13 @@ static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
*cmd_rc = papr_scm_meta_set(p, buf);
break;
+ case ND_CMD_CALL:
+ call_pkg = (struct nd_cmd_pkg *)buf;
+ *cmd_rc = papr_scm_service_pdsm(p, call_pkg);
+ break;
+
default:
+ dev_dbg(&p->pdev->dev, "Unknown command = %d\n", cmd);
return -EINVAL;
}
@@ -286,6 +631,64 @@ static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
return 0;
}
+static ssize_t flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *dimm = to_nvdimm(dev);
+ struct papr_scm_priv *p = nvdimm_provider_data(dimm);
+ struct seq_buf s;
+ u64 health;
+ int rc;
+
+ rc = drc_pmem_query_health(p);
+ if (rc)
+ return rc;
+
+ /* Copy health_bitmap locally, check masks & update out buffer */
+ health = READ_ONCE(p->health_bitmap);
+
+ seq_buf_init(&s, buf, PAGE_SIZE);
+ if (health & PAPR_PMEM_UNARMED_MASK)
+ seq_buf_printf(&s, "not_armed ");
+
+ if (health & PAPR_PMEM_BAD_SHUTDOWN_MASK)
+ seq_buf_printf(&s, "flush_fail ");
+
+ if (health & PAPR_PMEM_BAD_RESTORE_MASK)
+ seq_buf_printf(&s, "restore_fail ");
+
+ if (health & PAPR_PMEM_ENCRYPTED)
+ seq_buf_printf(&s, "encrypted ");
+
+ if (health & PAPR_PMEM_SMART_EVENT_MASK)
+ seq_buf_printf(&s, "smart_notify ");
+
+ if (health & PAPR_PMEM_SCRUBBED_AND_LOCKED)
+ seq_buf_printf(&s, "scrubbed locked ");
+
+ if (seq_buf_used(&s))
+ seq_buf_printf(&s, "\n");
+
+ return seq_buf_used(&s);
+}
+DEVICE_ATTR_RO(flags);
+
+/* papr_scm specific dimm attributes */
+static struct attribute *papr_nd_attributes[] = {
+ &dev_attr_flags.attr,
+ NULL,
+};
+
+static struct attribute_group papr_nd_attribute_group = {
+ .name = "papr",
+ .attrs = papr_nd_attributes,
+};
+
+static const struct attribute_group *papr_nd_attr_groups[] = {
+ &papr_nd_attribute_group,
+ NULL,
+};
+
static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
{
struct device *dev = &p->pdev->dev;
@@ -312,8 +715,8 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
dimm_flags = 0;
set_bit(NDD_LABELING, &dimm_flags);
- p->nvdimm = nvdimm_create(p->bus, p, NULL, dimm_flags,
- PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
+ p->nvdimm = nvdimm_create(p->bus, p, papr_nd_attr_groups,
+ dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
if (!p->nvdimm) {
dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
goto err;
@@ -399,6 +802,9 @@ static int papr_scm_probe(struct platform_device *pdev)
if (!p)
return -ENOMEM;
+ /* Initialize the dimm mutex */
+ mutex_init(&p->health_mutex);
+
/* optional DT properties */
of_property_read_u32(dn, "ibm,metadata-size", &metadata_size);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 2db8469e475f..27094c872fd6 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -70,6 +70,7 @@
#include <asm/idle.h>
#include <asm/swiotlb.h>
#include <asm/svm.h>
+#include <asm/dtl.h>
#include "pseries.h"
#include "../../../../drivers/pci/pci.h"
diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c
index 40c0637203d5..e6d7a344d9f2 100644
--- a/arch/powerpc/platforms/pseries/svm.c
+++ b/arch/powerpc/platforms/pseries/svm.c
@@ -11,6 +11,7 @@
#include <asm/svm.h>
#include <asm/swiotlb.h>
#include <asm/ultravisor.h>
+#include <asm/dtl.h>
static int __init init_svm(void)
{
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 4a8874bc1057..040b9d01c079 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -1066,10 +1066,10 @@ int fsl_pci_mcheck_exception(struct pt_regs *regs)
if (is_in_pci_mem_space(addr)) {
if (user_mode(regs))
- ret = probe_user_read(&inst, (void __user *)regs->nip,
- sizeof(inst));
+ ret = copy_from_user_nofault(&inst,
+ (void __user *)regs->nip, sizeof(inst));
else
- ret = probe_kernel_address((void *)regs->nip, inst);
+ ret = get_kernel_nofault(inst, (void *)regs->nip);
if (!ret && mcheck_handle_load(regs, inst)) {
regs->nip += 4;