summaryrefslogtreecommitdiff
path: root/arch/loongarch/include/asm/fpu.h
blob: 192f8e35d9126bbb40b29bcd6fb18d20b5b22e40 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Author: Huacai Chen <chenhuacai@loongson.cn>
 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
 */
#ifndef _ASM_FPU_H
#define _ASM_FPU_H

#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/ptrace.h>
#include <linux/thread_info.h>
#include <linux/bitops.h>

#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/current.h>
#include <asm/loongarch.h>
#include <asm/processor.h>
#include <asm/ptrace.h>

struct sigcontext;

extern void kernel_fpu_begin(void);
extern void kernel_fpu_end(void);

extern void _init_fpu(unsigned int);
extern void _save_fp(struct loongarch_fpu *);
extern void _restore_fp(struct loongarch_fpu *);

/*
 * Mask the FCSR Cause bits according to the Enable bits, observing
 * that Unimplemented is always enabled.
 */
static inline unsigned long mask_fcsr_x(unsigned long fcsr)
{
	return fcsr & ((fcsr & FPU_CSR_ALL_E) <<
			(ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)));
}

static inline int is_fp_enabled(void)
{
	return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_FPEN) ?
		1 : 0;
}

#define enable_fpu()		set_csr_euen(CSR_EUEN_FPEN)

#define disable_fpu()		clear_csr_euen(CSR_EUEN_FPEN)

#define clear_fpu_owner()	clear_thread_flag(TIF_USEDFPU)

static inline int is_fpu_owner(void)
{
	return test_thread_flag(TIF_USEDFPU);
}

static inline void __own_fpu(void)
{
	enable_fpu();
	set_thread_flag(TIF_USEDFPU);
	KSTK_EUEN(current) |= CSR_EUEN_FPEN;
}

static inline void own_fpu_inatomic(int restore)
{
	if (cpu_has_fpu && !is_fpu_owner()) {
		__own_fpu();
		if (restore)
			_restore_fp(&current->thread.fpu);
	}
}

static inline void own_fpu(int restore)
{
	preempt_disable();
	own_fpu_inatomic(restore);
	preempt_enable();
}

static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
{
	if (is_fpu_owner()) {
		if (save)
			_save_fp(&tsk->thread.fpu);
		disable_fpu();
		clear_tsk_thread_flag(tsk, TIF_USEDFPU);
	}
	KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
}

static inline void lose_fpu(int save)
{
	preempt_disable();
	lose_fpu_inatomic(save, current);
	preempt_enable();
}

static inline void init_fpu(void)
{
	unsigned int fcsr = current->thread.fpu.fcsr;

	__own_fpu();
	_init_fpu(fcsr);
	set_used_math();
}

static inline void save_fp(struct task_struct *tsk)
{
	if (cpu_has_fpu)
		_save_fp(&tsk->thread.fpu);
}

static inline void restore_fp(struct task_struct *tsk)
{
	if (cpu_has_fpu)
		_restore_fp(&tsk->thread.fpu);
}

static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
{
	if (tsk == current) {
		preempt_disable();
		if (is_fpu_owner())
			_save_fp(&current->thread.fpu);
		preempt_enable();
	}

	return tsk->thread.fpu.fpr;
}

#endif /* _ASM_FPU_H */