summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/boot/startup.c2
-rw-r--r--arch/s390/boot/vmem.c6
-rw-r--r--arch/s390/include/asm/ctlreg.h30
-rw-r--r--arch/s390/include/asm/mmu_context.h6
-rw-r--r--arch/s390/kernel/ctlreg.c26
-rw-r--r--arch/s390/kernel/early.c12
-rw-r--r--arch/s390/kernel/guarded_storage.c6
-rw-r--r--arch/s390/kernel/ipl.c2
-rw-r--r--arch/s390/kernel/irq.c4
-rw-r--r--arch/s390/kernel/kprobes.c6
-rw-r--r--arch/s390/kernel/machine_kexec.c6
-rw-r--r--arch/s390/kernel/nmi.c18
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/perf_pai_crypto.c4
-rw-r--r--arch/s390/kernel/perf_pai_ext.c4
-rw-r--r--arch/s390/kernel/ptrace.c14
-rw-r--r--arch/s390/kernel/setup.c14
-rw-r--r--arch/s390/kernel/smp.c12
-rw-r--r--arch/s390/kernel/time.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/lib/uaccess.c4
-rw-r--r--arch/s390/mm/pgalloc.c2
-rw-r--r--arch/s390/mm/vmem.c2
-rw-r--r--arch/s390/pci/pci.c2
25 files changed, 98 insertions, 98 deletions
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index d3e48bd9c394..cf622479647f 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -49,7 +49,7 @@ static void detect_facilities(void)
{
if (test_facility(8)) {
machine.has_edat1 = 1;
- __ctl_set_bit(0, 23);
+ local_ctl_set_bit(0, 23);
}
if (test_facility(78))
machine.has_edat2 = 1;
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 9ac6d00537c1..666fc4e50c1e 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -446,9 +446,9 @@ void setup_vmem(unsigned long asce_limit)
S390_lowcore.kernel_asce = swapper_pg_dir | asce_bits;
S390_lowcore.user_asce = s390_invalid_asce;
- __ctl_load(S390_lowcore.kernel_asce, 1, 1);
- __ctl_load(S390_lowcore.user_asce, 7, 7);
- __ctl_load(S390_lowcore.kernel_asce, 13, 13);
+ __local_ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ __local_ctl_load(S390_lowcore.user_asce, 7, 7);
+ __local_ctl_load(S390_lowcore.kernel_asce, 13, 13);
init_mm.context.asce = S390_lowcore.kernel_asce;
}
diff --git a/arch/s390/include/asm/ctlreg.h b/arch/s390/include/asm/ctlreg.h
index 0b55a1bb3113..09d35ab3d1ce 100644
--- a/arch/s390/include/asm/ctlreg.h
+++ b/arch/s390/include/asm/ctlreg.h
@@ -35,7 +35,7 @@
#include <linux/bug.h>
-#define __ctl_load(array, low, high) do { \
+#define __local_ctl_load(array, low, high) do { \
struct addrtype { \
char _[sizeof(array)]; \
}; \
@@ -53,7 +53,7 @@
: "memory"); \
} while (0)
-#define __ctl_store(array, low, high) do { \
+#define __local_ctl_store(array, low, high) do { \
struct addrtype { \
char _[sizeof(array)]; \
}; \
@@ -69,36 +69,36 @@
: [_low] "i" (low), [_high] "i" (high)); \
} while (0)
-static __always_inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
+static __always_inline void local_ctl_set_bit(unsigned int cr, unsigned int bit)
{
unsigned long reg;
- __ctl_store(reg, cr, cr);
+ __local_ctl_store(reg, cr, cr);
reg |= 1UL << bit;
- __ctl_load(reg, cr, cr);
+ __local_ctl_load(reg, cr, cr);
}
-static __always_inline void __ctl_clear_bit(unsigned int cr, unsigned int bit)
+static __always_inline void local_ctl_clear_bit(unsigned int cr, unsigned int bit)
{
unsigned long reg;
- __ctl_store(reg, cr, cr);
+ __local_ctl_store(reg, cr, cr);
reg &= ~(1UL << bit);
- __ctl_load(reg, cr, cr);
+ __local_ctl_load(reg, cr, cr);
}
-void ctlreg_lock(void);
-void ctlreg_unlock(void);
-void ctl_set_clear_bit(int cr, int bit, bool set);
+void system_ctlreg_lock(void);
+void system_ctlreg_unlock(void);
+void system_ctl_set_clear_bit(unsigned int cr, unsigned int bit, bool set);
-static inline void ctl_set_bit(int cr, int bit)
+static inline void system_ctl_set_bit(unsigned int cr, unsigned int bit)
{
- ctl_set_clear_bit(cr, bit, true);
+ system_ctl_set_clear_bit(cr, bit, true);
}
-static inline void ctl_clear_bit(int cr, int bit)
+static inline void system_ctl_clear_bit(unsigned int cr, unsigned int bit)
{
- ctl_set_clear_bit(cr, bit, false);
+ system_ctl_set_clear_bit(cr, bit, false);
}
union ctlreg0 {
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 50f247eede60..a72865fce0ad 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -81,7 +81,7 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *
S390_lowcore.user_asce = next->context.asce;
cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
/* Clear previous user-ASCE from CR7 */
- __ctl_load(s390_invalid_asce, 7, 7);
+ __local_ctl_load(s390_invalid_asce, 7, 7);
if (prev != next)
cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
@@ -111,7 +111,7 @@ static inline void finish_arch_post_lock_switch(void)
__tlb_flush_mm_lazy(mm);
preempt_enable();
}
- __ctl_load(S390_lowcore.user_asce, 7, 7);
+ __local_ctl_load(S390_lowcore.user_asce, 7, 7);
}
#define activate_mm activate_mm
@@ -120,7 +120,7 @@ static inline void activate_mm(struct mm_struct *prev,
{
switch_mm(prev, next, current);
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
- __ctl_load(S390_lowcore.user_asce, 7, 7);
+ __local_ctl_load(S390_lowcore.user_asce, 7, 7);
}
#include <asm-generic/mmu_context.h>
diff --git a/arch/s390/kernel/ctlreg.c b/arch/s390/kernel/ctlreg.c
index a01b901d4866..795e7f4861fb 100644
--- a/arch/s390/kernel/ctlreg.c
+++ b/arch/s390/kernel/ctlreg.c
@@ -13,18 +13,18 @@
* are kept in the control register save area within absolute lowcore
* at physical address zero.
*/
-static DEFINE_SPINLOCK(ctl_lock);
+static DEFINE_SPINLOCK(system_ctl_lock);
-void ctlreg_lock(void)
- __acquires(&ctl_lock)
+void system_ctlreg_lock(void)
+ __acquires(&system_ctl_lock)
{
- spin_lock(&ctl_lock);
+ spin_lock(&system_ctl_lock);
}
-void ctlreg_unlock(void)
- __releases(&ctl_lock)
+void system_ctlreg_unlock(void)
+ __releases(&system_ctl_lock)
{
- spin_unlock(&ctl_lock);
+ spin_unlock(&system_ctl_lock);
}
struct ctl_bit_parms {
@@ -38,25 +38,25 @@ static void ctl_bit_callback(void *info)
struct ctl_bit_parms *pp = info;
unsigned long regs[16];
- __ctl_store(regs, 0, 15);
+ __local_ctl_store(regs, 0, 15);
regs[pp->cr] &= pp->andval;
regs[pp->cr] |= pp->orval;
- __ctl_load(regs, 0, 15);
+ __local_ctl_load(regs, 0, 15);
}
-void ctl_set_clear_bit(int cr, int bit, bool set)
+void system_ctl_set_clear_bit(unsigned int cr, unsigned int bit, bool set)
{
struct ctl_bit_parms pp = { .cr = cr, };
struct lowcore *abs_lc;
pp.orval = set ? 1UL << bit : 0;
pp.andval = set ? -1UL : ~(1UL << bit);
- ctlreg_lock();
+ system_ctlreg_lock();
abs_lc = get_abs_lowcore();
abs_lc->cregs_save_area[cr] &= pp.andval;
abs_lc->cregs_save_area[cr] |= pp.orval;
put_abs_lowcore(abs_lc);
on_each_cpu(ctl_bit_callback, &pp, 1);
- ctlreg_unlock();
+ system_ctlreg_unlock();
}
-EXPORT_SYMBOL(ctl_set_clear_bit);
+EXPORT_SYMBOL(system_ctl_set_clear_bit);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 3a54733e4fc6..1f359309cce6 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -216,7 +216,7 @@ static __init void detect_machine_facilities(void)
{
if (test_facility(8)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT1;
- __ctl_set_bit(0, 23);
+ local_ctl_set_bit(0, 23);
}
if (test_facility(78))
S390_lowcore.machine_flags |= MACHINE_FLAG_EDAT2;
@@ -224,13 +224,13 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
if (test_facility(50) && test_facility(73)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
- __ctl_set_bit(0, 55);
+ local_ctl_set_bit(0, 55);
}
if (test_facility(51))
S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
if (test_facility(129)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
- __ctl_set_bit(0, 17);
+ local_ctl_set_bit(0, 17);
}
if (test_facility(130))
S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
@@ -240,7 +240,7 @@ static __init void detect_machine_facilities(void)
/* Enabled signed clock comparator comparisons */
S390_lowcore.machine_flags |= MACHINE_FLAG_SCC;
clock_comparator_max = -1ULL >> 1;
- __ctl_set_bit(0, 53);
+ local_ctl_set_bit(0, 53);
}
if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
@@ -260,7 +260,7 @@ static inline void save_vector_registers(void)
static inline void setup_low_address_protection(void)
{
- __ctl_set_bit(0, 28);
+ local_ctl_set_bit(0, 28);
}
static inline void setup_access_registers(void)
@@ -273,7 +273,7 @@ static inline void setup_access_registers(void)
static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
- __ctl_clear_bit(0, 17);
+ local_ctl_clear_bit(0, 17);
return 0;
}
early_param("novx", disable_vector_extension);
diff --git a/arch/s390/kernel/guarded_storage.c b/arch/s390/kernel/guarded_storage.c
index d14dd1c2e524..945acfc77ee4 100644
--- a/arch/s390/kernel/guarded_storage.c
+++ b/arch/s390/kernel/guarded_storage.c
@@ -28,7 +28,7 @@ static int gs_enable(void)
return -ENOMEM;
gs_cb->gsd = 25;
preempt_disable();
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, 4);
load_gs_cb(gs_cb);
current->thread.gs_cb = gs_cb;
preempt_enable();
@@ -42,7 +42,7 @@ static int gs_disable(void)
preempt_disable();
kfree(current->thread.gs_cb);
current->thread.gs_cb = NULL;
- __ctl_clear_bit(2, 4);
+ local_ctl_clear_bit(2, 4);
preempt_enable();
}
return 0;
@@ -84,7 +84,7 @@ void gs_load_bc_cb(struct pt_regs *regs)
if (gs_cb) {
kfree(current->thread.gs_cb);
current->thread.gs_bc_cb = NULL;
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, 4);
load_gs_cb(gs_cb);
current->thread.gs_cb = gs_cb;
}
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 05e51666db03..492dd8f371da 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2381,7 +2381,7 @@ void s390_reset_system(void)
set_prefix(0);
/* Disable lowcore protection */
- __ctl_clear_bit(0, 28);
+ local_ctl_clear_bit(0, 28);
diag_amode31_ops.diag308_reset();
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index b020ff17d206..6f71b0ce1068 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -385,7 +385,7 @@ void irq_subclass_register(enum irq_subclass subclass)
{
spin_lock(&irq_subclass_lock);
if (!irq_subclass_refcount[subclass])
- ctl_set_bit(0, subclass);
+ system_ctl_set_bit(0, subclass);
irq_subclass_refcount[subclass]++;
spin_unlock(&irq_subclass_lock);
}
@@ -396,7 +396,7 @@ void irq_subclass_unregister(enum irq_subclass subclass)
spin_lock(&irq_subclass_lock);
irq_subclass_refcount[subclass]--;
if (!irq_subclass_refcount[subclass])
- ctl_clear_bit(0, subclass);
+ system_ctl_clear_bit(0, subclass);
spin_unlock(&irq_subclass_lock);
}
EXPORT_SYMBOL(irq_subclass_unregister);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index d4b863ed0aa7..1b2e1903a54c 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -232,12 +232,12 @@ static void enable_singlestep(struct kprobe_ctlblk *kcb,
per_kprobe.end = ip;
/* Save control regs and psw mask */
- __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
+ __local_ctl_store(kcb->kprobe_saved_ctl, 9, 11);
kcb->kprobe_saved_imask = regs->psw.mask &
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
/* Set PER control regs, turns on single step for the given address */
- __ctl_load(per_kprobe, 9, 11);
+ __local_ctl_load(per_kprobe, 9, 11);
regs->psw.mask |= PSW_MASK_PER;
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
regs->psw.addr = ip;
@@ -249,7 +249,7 @@ static void disable_singlestep(struct kprobe_ctlblk *kcb,
unsigned long ip)
{
/* Restore control regs and psw mask, set new psw address */
- __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
+ __local_ctl_load(kcb->kprobe_saved_ctl, 9, 11);
regs->psw.mask &= ~PSW_MASK_PER;
regs->psw.mask |= kcb->kprobe_saved_imask;
regs->psw.addr = ip;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index ce65fc01671f..07152e74afb1 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -94,12 +94,12 @@ static noinline void __machine_kdump(void *image)
if (MACHINE_HAS_VX)
save_vx_regs((__vector128 *) mcesa->vector_save_area);
if (MACHINE_HAS_GS) {
- __ctl_store(cr2_old.val, 2, 2);
+ __local_ctl_store(cr2_old.val, 2, 2);
cr2_new = cr2_old;
cr2_new.gse = 1;
- __ctl_load(cr2_new.val, 2, 2);
+ __local_ctl_load(cr2_new.val, 2, 2);
save_gs_cb((struct gs_cb *) mcesa->guarded_storage_save_area);
- __ctl_load(cr2_old.val, 2, 2);
+ __local_ctl_load(cr2_old.val, 2, 2);
}
/*
* To create a good backchain for this CPU in the dump store_status
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index b813d9dfa53c..927bd83ac9c0 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -131,10 +131,10 @@ static notrace void s390_handle_damage(void)
* Disable low address protection and make machine check new PSW a
* disabled wait PSW. Any additional machine check cannot be handled.
*/
- __ctl_store(cr0.val, 0, 0);
+ __local_ctl_store(cr0.val, 0, 0);
cr0_new = cr0;
cr0_new.lap = 0;
- __ctl_load(cr0_new.val, 0, 0);
+ __local_ctl_load(cr0_new.val, 0, 0);
psw_save = S390_lowcore.mcck_new_psw;
psw_bits(S390_lowcore.mcck_new_psw).io = 0;
psw_bits(S390_lowcore.mcck_new_psw).ext = 0;
@@ -146,7 +146,7 @@ static notrace void s390_handle_damage(void)
* values. This makes possible system dump analysis easier.
*/
S390_lowcore.mcck_new_psw = psw_save;
- __ctl_load(cr0.val, 0, 0);
+ __local_ctl_load(cr0.val, 0, 0);
disabled_wait();
while (1);
}
@@ -185,7 +185,7 @@ void s390_handle_mcck(void)
static int mchchk_wng_posted = 0;
/* Use single cpu clear, as we cannot handle smp here. */
- __ctl_clear_bit(14, 24); /* Disable WARNING MCH */
+ local_ctl_clear_bit(14, 24); /* Disable WARNING MCH */
if (xchg(&mchchk_wng_posted, 1) == 0)
kill_cad_pid(SIGPWR, 1);
}
@@ -271,7 +271,7 @@ static int notrace s390_validate_registers(union mci mci)
kill_task = 1;
cr0.val = S390_lowcore.cregs_save_area[0];
cr0.afp = cr0.vx = 1;
- __ctl_load(cr0.val, 0, 0);
+ __local_ctl_load(cr0.val, 0, 0);
asm volatile(
" la 1,%0\n"
" VLM 0,15,0,1\n"
@@ -279,7 +279,7 @@ static int notrace s390_validate_registers(union mci mci)
:
: "Q" (*(struct vx_array *)mcesa->vector_save_area)
: "1");
- __ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
+ __local_ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
}
/* Validate access registers */
asm volatile(
@@ -505,9 +505,9 @@ NOKPROBE_SYMBOL(s390_do_machine_check);
static int __init machine_check_init(void)
{
- ctl_set_bit(14, 25); /* enable external damage MCH */
- ctl_set_bit(14, 27); /* enable system recovery MCH */
- ctl_set_bit(14, 24); /* enable warning MCH */
+ system_ctl_set_bit(14, 25); /* enable external damage MCH */
+ system_ctl_set_bit(14, 27); /* enable system recovery MCH */
+ system_ctl_set_bit(14, 24); /* enable warning MCH */
return 0;
}
early_initcall(machine_check_init);
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 850c11ea631a..4e05c0cb1648 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -1193,7 +1193,7 @@ static int __init cpumf_pmu_init(void)
* Clear bit 15 of cr0 to unauthorize problem-state to
* extract measurement counters
*/
- ctl_clear_bit(0, 48);
+ system_ctl_clear_bit(0, 48);
/* register handler for measurement-alert interruptions */
rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c
index 6280a0e588c5..6612fd13e95b 100644
--- a/arch/s390/kernel/perf_pai_crypto.c
+++ b/arch/s390/kernel/perf_pai_crypto.c
@@ -265,7 +265,7 @@ static int paicrypt_add(struct perf_event *event, int flags)
if (++cpump->active_events == 1) {
ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
WRITE_ONCE(S390_lowcore.ccd, ccd);
- __ctl_set_bit(0, 50);
+ local_ctl_set_bit(0, 50);
}
cpump->event = event;
if (flags & PERF_EF_START && !event->attr.sample_period) {
@@ -294,7 +294,7 @@ static void paicrypt_del(struct perf_event *event, int flags)
/* Only counting needs to read counter */
paicrypt_stop(event, PERF_EF_UPDATE);
if (--cpump->active_events == 0) {
- __ctl_clear_bit(0, 50);
+ local_ctl_clear_bit(0, 50);
WRITE_ONCE(S390_lowcore.ccd, 0);
}
}
diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c
index b8877493dbb6..9f724ae44fc7 100644
--- a/arch/s390/kernel/perf_pai_ext.c
+++ b/arch/s390/kernel/perf_pai_ext.c
@@ -339,7 +339,7 @@ static int paiext_add(struct perf_event *event, int flags)
S390_lowcore.aicd = virt_to_phys(cpump->paiext_cb);
pcb->acc = virt_to_phys(cpump->area) | 0x1;
/* Enable CPU instruction lookup for PAIE1 control block */
- __ctl_set_bit(0, 49);
+ local_ctl_set_bit(0, 49);
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
__func__, S390_lowcore.aicd, pcb->acc);
}
@@ -375,7 +375,7 @@ static void paiext_del(struct perf_event *event, int flags)
}
if (--cpump->active_events == 0) {
/* Disable CPU instruction lookup for PAIE1 control block */
- __ctl_clear_bit(0, 49);
+ local_ctl_clear_bit(0, 49);
pcb->acc = 0;
S390_lowcore.aicd = 0;
debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n",
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index ea244a73efad..1824c1208852 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -46,8 +46,8 @@ void update_cr_regs(struct task_struct *task)
union ctlreg2 cr2_old, cr2_new;
int cr0_changed, cr2_changed;
- __ctl_store(cr0_old.val, 0, 0);
- __ctl_store(cr2_old.val, 2, 2);
+ __local_ctl_store(cr0_old.val, 0, 0);
+ __local_ctl_store(cr2_old.val, 2, 2);
cr0_new = cr0_old;
cr2_new = cr2_old;
/* Take care of the enable/disable of transactional execution. */
@@ -75,9 +75,9 @@ void update_cr_regs(struct task_struct *task)
cr0_changed = cr0_new.val != cr0_old.val;
cr2_changed = cr2_new.val != cr2_old.val;
if (cr0_changed)
- __ctl_load(cr0_new.val, 0, 0);
+ __local_ctl_load(cr0_new.val, 0, 0);
if (cr2_changed)
- __ctl_load(cr2_new.val, 2, 2);
+ __local_ctl_load(cr2_new.val, 2, 2);
/* Copy user specified PER registers */
new.control = thread->per_user.control;
new.start = thread->per_user.start;
@@ -104,9 +104,9 @@ void update_cr_regs(struct task_struct *task)
return;
}
regs->psw.mask |= PSW_MASK_PER;
- __ctl_store(old, 9, 11);
+ __local_ctl_store(old, 9, 11);
if (memcmp(&new, &old, sizeof(struct per_regs)) != 0)
- __ctl_load(new, 9, 11);
+ __local_ctl_load(new, 9, 11);
}
void user_enable_single_step(struct task_struct *task)
@@ -1107,7 +1107,7 @@ static int s390_gs_cb_set(struct task_struct *target,
target->thread.gs_cb = data;
*target->thread.gs_cb = gs_cb;
if (target == current) {
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, 4);
restore_gs_cb(target->thread.gs_cb);
}
preempt_enable();
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index ef158bd0ea7b..c516e2e909e6 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -449,7 +449,7 @@ static void __init setup_lowcore(void)
lc->restart_fn = (unsigned long) do_restart;
lc->restart_data = 0;
lc->restart_source = -1U;
- __ctl_store(lc->cregs_save_area, 0, 15);
+ __local_ctl_store(lc->cregs_save_area, 0, 15);
lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
@@ -791,15 +791,15 @@ static void __init setup_cr(void)
__ctl_duct[4] = (unsigned long)__ctl_duald;
/* Update control registers CR2, CR5 and CR15 */
- __ctl_store(cr2.val, 2, 2);
- __ctl_store(cr5.val, 5, 5);
- __ctl_store(cr15.val, 15, 15);
+ __local_ctl_store(cr2.val, 2, 2);
+ __local_ctl_store(cr5.val, 5, 5);
+ __local_ctl_store(cr15.val, 15, 15);
cr2.ducto = (unsigned long)__ctl_duct >> 6;
cr5.pasteo = (unsigned long)__ctl_duct >> 6;
cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
- __ctl_load(cr2.val, 2, 2);
- __ctl_load(cr5.val, 5, 5);
- __ctl_load(cr15.val, 15, 15);
+ __local_ctl_load(cr2.val, 2, 2);
+ __local_ctl_load(cr5.val, 5, 5);
+ __local_ctl_load(cr15.val, 15, 15);
}
/*
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 204c72428696..1c65bbd411c7 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -888,14 +888,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
* Make sure global control register contents do not change
* until new CPU has initialized control registers.
*/
- ctlreg_lock();
+ system_ctlreg_lock();
pcpu_prepare_secondary(pcpu, cpu);
pcpu_attach_task(pcpu, tidle);
pcpu_start_fn(pcpu, smp_start_secondary, NULL);
/* Wait until cpu puts itself in the online & active maps */
while (!cpu_online(cpu))
cpu_relax();
- ctlreg_unlock();
+ system_ctlreg_unlock();
return 0;
}
@@ -922,11 +922,11 @@ int __cpu_disable(void)
/* Disable pseudo page faults on this cpu. */
pfault_fini();
/* Disable interrupt sources via control register. */
- __ctl_store(cregs, 0, 15);
+ __local_ctl_store(cregs, 0, 15);
cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
- __ctl_load(cregs, 0, 15);
+ __local_ctl_load(cregs, 0, 15);
clear_cpu_flag(CIF_NOHZ_DELAY);
return 0;
}
@@ -968,10 +968,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1201");
- ctl_set_bit(0, 14);
+ system_ctl_set_bit(0, 14);
if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
- ctl_set_bit(0, 13);
+ system_ctl_set_bit(0, 13);
}
void __init smp_prepare_boot_cpu(void)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d34d3548c046..34c4fbf4e555 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -173,10 +173,10 @@ void init_cpu_timer(void)
clockevents_register_device(cd);
/* Enable clock comparator timer interrupt. */
- __ctl_set_bit(0,11);
+ local_ctl_set_bit(0, 11);
/* Always allow the timing alert external interrupt. */
- __ctl_set_bit(0, 4);
+ local_ctl_set_bit(0, 4);
}
static void clock_comparator_interrupt(struct ext_code ext_code,
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index b3f17e014cab..92bd15fe769a 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -4918,7 +4918,7 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
}
if (MACHINE_HAS_GS) {
preempt_disable();
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, 4);
if (current->thread.gs_cb) {
vcpu->arch.host_gscb = current->thread.gs_cb;
save_gs_cb(vcpu->arch.host_gscb);
@@ -4995,13 +4995,13 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu)
kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
if (MACHINE_HAS_GS) {
preempt_disable();
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, 4);
if (vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
current->thread.gs_cb = vcpu->arch.host_gscb;
restore_gs_cb(vcpu->arch.host_gscb);
if (!vcpu->arch.host_gscb)
- __ctl_clear_bit(2, 4);
+ local_ctl_clear_bit(2, 4);
vcpu->arch.host_gscb = NULL;
preempt_enable();
}
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index dc4cfa8795c0..c199e4b5e5ce 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -57,7 +57,7 @@ static int handle_gs(struct kvm_vcpu *vcpu)
if (test_kvm_facility(vcpu->kvm, 133)) {
VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
preempt_disable();
- __ctl_set_bit(2, 4);
+ local_ctl_set_bit(2, 4);
current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
restore_gs_cb(current->thread.gs_cb);
preempt_enable();
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 478ab4c7d197..551de5394be6 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -19,8 +19,8 @@ void debug_user_asce(int exit)
{
unsigned long cr1, cr7;
- __ctl_store(cr1, 1, 1);
- __ctl_store(cr7, 7, 7);
+ __local_ctl_store(cr1, 1, 1);
+ __local_ctl_store(cr7, 7, 7);
if (cr1 == S390_lowcore.kernel_asce && cr7 == S390_lowcore.user_asce)
return;
panic("incorrect ASCE on kernel %s\n"
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 07fc660a24aa..43bef9cce9b2 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -63,7 +63,7 @@ static void __crst_table_upgrade(void *arg)
/* change all active ASCEs to avoid the creation of new TLBs */
if (current->active_mm == mm) {
S390_lowcore.user_asce = mm->context.asce;
- __ctl_load(S390_lowcore.user_asce, 7, 7);
+ __local_ctl_load(S390_lowcore.user_asce, 7, 7);
}
__tlb_flush_local();
}
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 91c1f42f6a76..95f04a502e62 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -660,7 +660,7 @@ void __init vmem_map_init(void)
__set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
}
if (MACHINE_HAS_NX)
- ctl_set_bit(0, 20);
+ system_ctl_set_bit(0, 20);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index d34d5813d006..a0aecc94e284 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -1094,7 +1094,7 @@ static int __init pci_base_init(void)
if (MACHINE_HAS_PCI_MIO) {
static_branch_enable(&have_mio);
- ctl_set_bit(2, 5);
+ system_ctl_set_bit(2, 5);
}
rc = zpci_debug_init();