summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
blob: f41e45763d0d2130059890b884891356c9cb4e8f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
// SPDX-License-Identifier: MIT
/*
 * Copyright(c) 2020, Intel Corporation. All rights reserved.
 */

#include "intel_pxp.h"
#include "intel_pxp_cmd.h"
#include "intel_pxp_session.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"

#include "i915_trace.h"

/* stall until prior PXP and MFX/HCP/HUC objects are cmopleted */
#define MFX_WAIT_PXP (MFX_WAIT | \
		      MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \
		      MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG)

static u32 *pxp_emit_session_selection(u32 *cs, u32 idx)
{
	*cs++ = MFX_WAIT_PXP;

	/* pxp off */
	*cs++ = MI_FLUSH_DW;
	*cs++ = 0;
	*cs++ = 0;

	/* select session */
	*cs++ = MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx);

	*cs++ = MFX_WAIT_PXP;

	/* pxp on */
	*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_PROTECTED_MEM_EN |
		MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
	*cs++ = I915_GEM_HWS_PXP_ADDR | MI_FLUSH_DW_USE_GTT;
	*cs++ = 0;

	*cs++ = MFX_WAIT_PXP;

	return cs;
}

static u32 *pxp_emit_inline_termination(u32 *cs)
{
	/* session inline termination */
	*cs++ = CRYPTO_KEY_EXCHANGE;
	*cs++ = 0;

	return cs;
}

static u32 *pxp_emit_session_termination(u32 *cs, u32 idx)
{
	cs = pxp_emit_session_selection(cs, idx);
	cs = pxp_emit_inline_termination(cs);

	return cs;
}

static u32 *pxp_emit_wait(u32 *cs)
{
	/* wait for cmds to go through */
	*cs++ = MFX_WAIT_PXP;
	*cs++ = 0;

	return cs;
}

/*
 * if we ever need to terminate more than one session, we can submit multiple
 * selections and terminations back-to-back with a single wait at the end
 */
#define SELECTION_LEN 10
#define TERMINATION_LEN 2
#define SESSION_TERMINATION_LEN(x) ((SELECTION_LEN + TERMINATION_LEN) * (x))
#define WAIT_LEN 2

static void pxp_request_commit(struct i915_request *rq)
{
	struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
	struct intel_timeline * const tl = i915_request_timeline(rq);

	lockdep_unpin_lock(&tl->mutex, rq->cookie);

	trace_i915_request_add(rq);
	__i915_request_commit(rq);
	__i915_request_queue(rq, &attr);

	mutex_unlock(&tl->mutex);
}

int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 id)
{
	struct i915_request *rq;
	struct intel_context *ce = pxp->ce;
	u32 *cs;
	int err = 0;

	if (!intel_pxp_is_enabled(pxp))
		return 0;

	rq = i915_request_create(ce);
	if (IS_ERR(rq))
		return PTR_ERR(rq);

	if (ce->engine->emit_init_breadcrumb) {
		err = ce->engine->emit_init_breadcrumb(rq);
		if (err)
			goto out_rq;
	}

	cs = intel_ring_begin(rq, SESSION_TERMINATION_LEN(1) + WAIT_LEN);
	if (IS_ERR(cs)) {
		err = PTR_ERR(cs);
		goto out_rq;
	}

	cs = pxp_emit_session_termination(cs, id);
	cs = pxp_emit_wait(cs);

	intel_ring_advance(rq, cs);

out_rq:
	i915_request_get(rq);

	if (unlikely(err))
		i915_request_set_error_once(rq, err);

	pxp_request_commit(rq);

	if (!err && i915_request_wait(rq, 0, HZ / 5) < 0)
		err = -ETIME;

	i915_request_put(rq);

	return err;
}