summaryrefslogtreecommitdiff
path: root/arch/csky/include/asm/asid.h
blob: ac08b0ffbe1fc64d9c46983bfb3a4e74f2d91bd1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_ASM_ASID_H
#define __ASM_ASM_ASID_H

#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>

struct asid_info
{
	atomic64_t	generation;
	unsigned long	*map;
	atomic64_t __percpu	*active;
	u64 __percpu		*reserved;
	u32			bits;
	/* Lock protecting the structure */
	raw_spinlock_t		lock;
	/* Which CPU requires context flush on next call */
	cpumask_t		flush_pending;
	/* Number of ASID allocated by context (shift value) */
	unsigned int		ctxt_shift;
	/* Callback to locally flush the context. */
	void			(*flush_cpu_ctxt_cb)(void);
};

#define NUM_ASIDS(info)			(1UL << ((info)->bits))
#define NUM_CTXT_ASIDS(info)		(NUM_ASIDS(info) >> (info)->ctxt_shift)

#define active_asid(info, cpu)	*per_cpu_ptr((info)->active, cpu)

void asid_new_context(struct asid_info *info, atomic64_t *pasid,
		      unsigned int cpu, struct mm_struct *mm);

/*
 * Check the ASID is still valid for the context. If not generate a new ASID.
 *
 * @pasid: Pointer to the current ASID batch
 * @cpu: current CPU ID. Must have been acquired throught get_cpu()
 */
static inline void asid_check_context(struct asid_info *info,
				      atomic64_t *pasid, unsigned int cpu,
				      struct mm_struct *mm)
{
	u64 asid, old_active_asid;

	asid = atomic64_read(pasid);

	/*
	 * The memory ordering here is subtle.
	 * If our active_asid is non-zero and the ASID matches the current
	 * generation, then we update the active_asid entry with a relaxed
	 * cmpxchg. Racing with a concurrent rollover means that either:
	 *
	 * - We get a zero back from the cmpxchg and end up waiting on the
	 *   lock. Taking the lock synchronises with the rollover and so
	 *   we are forced to see the updated generation.
	 *
	 * - We get a valid ASID back from the cmpxchg, which means the
	 *   relaxed xchg in flush_context will treat us as reserved
	 *   because atomic RmWs are totally ordered for a given location.
	 */
	old_active_asid = atomic64_read(&active_asid(info, cpu));
	if (old_active_asid &&
	    !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
	    atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
				     old_active_asid, asid))
		return;

	asid_new_context(info, pasid, cpu, mm);
}

int asid_allocator_init(struct asid_info *info,
			u32 bits, unsigned int asid_per_ctxt,
			void (*flush_cpu_ctxt_cb)(void));

#endif