summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
blob: a39a2fb163aef0cd73b6e19700bb75b8dd00d97b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2023 Intel Corporation
 */

#include "xe_gt.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_ct.h"

static struct xe_gt *
guc_to_gt(struct xe_guc *guc)
{
	return container_of(guc, struct xe_gt, uc.guc);
}

int xe_gt_tlb_invalidation_init(struct xe_gt *gt)
{
	gt->tlb_invalidation.seqno = 1;

	return 0;
}

static int send_tlb_invalidation(struct xe_guc *guc)
{
	struct xe_gt *gt = guc_to_gt(guc);
	u32 action[] = {
		XE_GUC_ACTION_TLB_INVALIDATION,
		0,
		XE_GUC_TLB_INVAL_FULL << XE_GUC_TLB_INVAL_TYPE_SHIFT |
		XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT |
		XE_GUC_TLB_INVAL_FLUSH_CACHE,
	};
	int seqno;
	int ret;

	/*
	 * XXX: The seqno algorithm relies on TLB invalidation being processed
	 * in order which they currently are, if that changes the algorithm will
	 * need to be updated.
	 */
	mutex_lock(&guc->ct.lock);
	seqno = gt->tlb_invalidation.seqno;
	action[1] = seqno;
	gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
		TLB_INVALIDATION_SEQNO_MAX;
	if (!gt->tlb_invalidation.seqno)
		gt->tlb_invalidation.seqno = 1;
	ret = xe_guc_ct_send_locked(&guc->ct, action, ARRAY_SIZE(action),
				    G2H_LEN_DW_TLB_INVALIDATE, 1);
	if (!ret)
		ret = seqno;
	mutex_unlock(&guc->ct.lock);

	return ret;
}

int xe_gt_tlb_invalidation(struct xe_gt *gt)
{
	return send_tlb_invalidation(&gt->uc.guc);
}

static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
{
	if (gt->tlb_invalidation.seqno_recv >= seqno)
		return true;

	if (seqno - gt->tlb_invalidation.seqno_recv >
	    (TLB_INVALIDATION_SEQNO_MAX / 2))
		return true;

	return false;
}

int xe_gt_tlb_invalidation_wait(struct xe_gt *gt, int seqno)
{
	struct xe_device *xe = gt_to_xe(gt);
	struct xe_guc *guc = &gt->uc.guc;
	int ret;

	/*
	 * XXX: See above, this algorithm only works if seqno are always in
	 * order
	 */
	ret = wait_event_timeout(guc->ct.wq,
				 tlb_invalidation_seqno_past(gt, seqno),
				 HZ / 5);
	if (!ret) {
		drm_err(&xe->drm, "TLB invalidation time'd out, seqno=%d, recv=%d\n",
			seqno, gt->tlb_invalidation.seqno_recv);
		return -ETIME;
	}

	return 0;
}

int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
	struct xe_gt *gt = guc_to_gt(guc);
	int expected_seqno;

	if (unlikely(len != 1))
		return -EPROTO;

	/* Sanity check on seqno */
	expected_seqno = (gt->tlb_invalidation.seqno_recv + 1) %
		TLB_INVALIDATION_SEQNO_MAX;
	XE_WARN_ON(expected_seqno != msg[0]);

	gt->tlb_invalidation.seqno_recv = msg[0];
	smp_wmb();
	wake_up_all(&guc->ct.wq);

	return 0;
}