summaryrefslogtreecommitdiff
path: root/include/drm
diff options
context:
space:
mode:
authorMaxime Ripard <maxime@cerno.tech>2020-02-17 10:34:34 +0100
committerMaxime Ripard <maxime@cerno.tech>2020-02-17 10:34:34 +0100
commit28f2aff1caa4997f58ca31179cad1b4a84a62827 (patch)
tree69fb4b0a752f3660ce022a4313f8c7b276bbcceb /include/drm
parent3e8a3844fefbaad911c596f02dd48c39188ffa81 (diff)
parent11a48a5a18c63fd7621bb050228cebf13566e4d8 (diff)
Merge v5.6-rc2 into drm-misc-next
Lyude needs some patches in 5.6-rc2 and we didn't bring drm-misc-next forward yet, so it looks like a good occasion. Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/drm_dp_helper.h8
-rw-r--r--include/drm/drm_dp_mst_helper.h26
-rw-r--r--include/drm/drm_fourcc.h8
-rw-r--r--include/drm/drm_of.h5
-rw-r--r--include/drm/gpu_scheduler.h23
-rw-r--r--include/drm/task_barrier.h107
6 files changed, 158 insertions, 19 deletions
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 957a3d00ee05..b8f6bac6341c 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1470,6 +1470,7 @@ int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4], struct drm_dp_aux *aux);
+void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
void drm_dp_aux_init(struct drm_dp_aux *aux);
int drm_dp_aux_register(struct drm_dp_aux *aux);
void drm_dp_aux_unregister(struct drm_dp_aux *aux);
@@ -1527,6 +1528,13 @@ enum drm_dp_quirk {
* The driver should ignore SINK_COUNT during detection.
*/
DP_DPCD_QUIRK_NO_SINK_COUNT,
+ /**
+ * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD:
+ *
+ * The device supports MST DSC despite not supporting Virtual DPCD.
+ * The DSC caps can be read from the physical aux instead.
+ */
+ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
};
/**
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 7b9453b8b5c4..5483f888712a 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -156,6 +156,8 @@ struct drm_dp_mst_port {
* audio-capable.
*/
bool has_audio;
+
+ bool fec_capable;
};
/**
@@ -383,6 +385,7 @@ struct drm_dp_port_number_req {
struct drm_dp_enum_path_resources_ack_reply {
u8 port_number;
+ bool fec_capable;
u16 full_payload_bw_number;
u16 avail_payload_bw_number;
};
@@ -499,6 +502,8 @@ struct drm_dp_payload {
struct drm_dp_vcpi_allocation {
struct drm_dp_mst_port *port;
int vcpi;
+ int pbn;
+ bool dsc_enabled;
struct list_head next;
};
@@ -613,6 +618,12 @@ struct drm_dp_mst_topology_mgr {
* &drm_dp_sideband_msg_tx.state once they are queued
*/
struct mutex qlock;
+
+ /**
+ * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
+ */
+ bool is_waiting_for_dwn_reply;
+
/**
* @tx_msg_downq: List of pending down replies.
*/
@@ -729,8 +740,7 @@ bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-int drm_dp_calc_pbn_mode(int clock, int bpp);
-
+int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, int pbn, int slots);
@@ -779,7 +789,15 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
int __must_check
drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn);
+ struct drm_dp_mst_port *port, int pbn,
+ int pbn_div);
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+ struct drm_dp_mst_port *port,
+ int pbn, int pbn_div,
+ bool enable);
+int __must_check
+drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
int __must_check
drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
@@ -791,6 +809,8 @@ int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
+struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
+
extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
/**
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 306d1efeb5e0..156b122c0ad5 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -78,7 +78,7 @@ struct drm_format_info {
* triplet @char_per_block, @block_w, @block_h for better
* describing the pixel format.
*/
- u8 cpp[3];
+ u8 cpp[4];
/**
* @char_per_block:
@@ -104,7 +104,7 @@ struct drm_format_info {
* information from their drm_mode_config.get_format_info hook
* if they want the core to be validating the pitch.
*/
- u8 char_per_block[3];
+ u8 char_per_block[4];
};
/**
@@ -113,7 +113,7 @@ struct drm_format_info {
* Block width in pixels, this is intended to be accessed through
* drm_format_info_block_width()
*/
- u8 block_w[3];
+ u8 block_w[4];
/**
* @block_h:
@@ -121,7 +121,7 @@ struct drm_format_info {
* Block height in pixels, this is intended to be accessed through
* drm_format_info_block_height()
*/
- u8 block_h[3];
+ u8 block_h[4];
/** @hsub: Horizontal chroma subsampling factor */
u8 hsub;
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 8ec7ca6d2369..b9b093add92e 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -92,8 +92,9 @@ static inline int drm_of_find_panel_or_bridge(const struct device_node *np,
return -EINVAL;
}
-int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
- const struct device_node *port2)
+static inline int
+drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
+ const struct device_node *port2)
{
return -EINVAL;
}
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 684692a8ed76..589be851f8a1 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -52,9 +52,10 @@ enum drm_sched_priority {
* @list: used to append this struct to the list of entities in the
* runqueue.
* @rq: runqueue on which this entity is currently scheduled.
- * @rq_list: a list of run queues on which jobs from this entity can
- * be scheduled
- * @num_rq_list: number of run queues in the rq_list
+ * @sched_list: A list of schedulers (drm_gpu_schedulers).
+ * Jobs from this entity can be scheduled on any scheduler
+ * on this list.
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
* @rq_lock: lock to modify the runqueue to which this entity belongs.
* @job_queue: the list of jobs of this entity.
* @fence_seq: a linearly increasing seqno incremented with each
@@ -81,8 +82,9 @@ enum drm_sched_priority {
struct drm_sched_entity {
struct list_head list;
struct drm_sched_rq *rq;
- struct drm_sched_rq **rq_list;
- unsigned int num_rq_list;
+ struct drm_gpu_scheduler **sched_list;
+ unsigned int num_sched_list;
+ enum drm_sched_priority priority;
spinlock_t rq_lock;
struct spsc_queue job_queue;
@@ -260,7 +262,7 @@ struct drm_sched_backend_ops {
* @job_list_lock: lock to protect the ring_mirror_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
- * @num_jobs: the number of jobs in queue in the scheduler
+ * @score: score to help loadbalancer pick a idle sched
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
*
@@ -281,8 +283,8 @@ struct drm_gpu_scheduler {
struct list_head ring_mirror_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t num_jobs;
- bool ready;
+ atomic_t score;
+ bool ready;
bool free_guilty;
};
@@ -312,8 +314,9 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity);
int drm_sched_entity_init(struct drm_sched_entity *entity,
- struct drm_sched_rq **rq_list,
- unsigned int num_rq_list,
+ enum drm_sched_priority priority,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list,
atomic_t *guilty);
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h
new file mode 100644
index 000000000000..087e3f649c52
--- /dev/null
+++ b/include/drm/task_barrier.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/semaphore.h>
+#include <linux/atomic.h>
+
+/*
+ * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks.
+ * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/
+ */
+
+
+
+#ifndef DRM_TASK_BARRIER_H_
+#define DRM_TASK_BARRIER_H_
+
+/*
+ * Represents an instance of a task barrier.
+ */
+struct task_barrier {
+ unsigned int n;
+ atomic_t count;
+ struct semaphore enter_turnstile;
+ struct semaphore exit_turnstile;
+};
+
+static inline void task_barrier_signal_turnstile(struct semaphore *turnstile,
+ unsigned int n)
+{
+ int i;
+
+ for (i = 0 ; i < n; i++)
+ up(turnstile);
+}
+
+static inline void task_barrier_init(struct task_barrier *tb)
+{
+ tb->n = 0;
+ atomic_set(&tb->count, 0);
+ sema_init(&tb->enter_turnstile, 0);
+ sema_init(&tb->exit_turnstile, 0);
+}
+
+static inline void task_barrier_add_task(struct task_barrier *tb)
+{
+ tb->n++;
+}
+
+static inline void task_barrier_rem_task(struct task_barrier *tb)
+{
+ tb->n--;
+}
+
+/*
+ * Lines up all the threads BEFORE the critical point.
+ *
+ * When all thread passed this code the entry barrier is back to locked state.
+ */
+static inline void task_barrier_enter(struct task_barrier *tb)
+{
+ if (atomic_inc_return(&tb->count) == tb->n)
+ task_barrier_signal_turnstile(&tb->enter_turnstile, tb->n);
+
+ down(&tb->enter_turnstile);
+}
+
+/*
+ * Lines up all the threads AFTER the critical point.
+ *
+ * This function is used to avoid any one thread running ahead if the barrier is
+ * used repeatedly .
+ */
+static inline void task_barrier_exit(struct task_barrier *tb)
+{
+ if (atomic_dec_return(&tb->count) == 0)
+ task_barrier_signal_turnstile(&tb->exit_turnstile, tb->n);
+
+ down(&tb->exit_turnstile);
+}
+
+/* Convinieince function when nothing to be done in between entry and exit */
+static inline void task_barrier_full(struct task_barrier *tb)
+{
+ task_barrier_enter(tb);
+ task_barrier_exit(tb);
+}
+
+#endif