summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2023-09-29 08:27:00 +1000
committerDave Airlie <airlied@redhat.com>2023-09-29 08:27:15 +1000
commit79fb229b8810071648b65c37382aea7819a5f935 (patch)
treebf201cd7732ad48ad98a963344cb535b95653804 /include
parentf107ff76a8c242b298413ef52db9978dc3fe0153 (diff)
parent78f54469b871db5ba8ea49abd4e5994e97bd525b (diff)
Merge tag 'drm-misc-next-2023-09-27' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for v6.7-rc1: UAPI Changes: - drm_file owner is now updated during use, in the case of a drm fd opened by the display server for a client, the correct owner is displayed. - Qaic gains support for the QAIC_DETACH_SLICE_BO ioctl to allow bo recycling. Cross-subsystem Changes: - Disable boot logo for au1200fb, mmpfb and unexport logo helpers. Only fbcon should manage display of logo. - Update freescale in MAINTAINERS. - Add some bridge files to bridge in MAINTAINERS. - Update gma500 driver repo in MAINTAINERS to point to drm-misc. Core Changes: - Move size computations to drm buddy allocator. - Make drm_atomic_helper_shutdown(NULL) a nop. - Assorted small fixes in drm_debugfs, DP-MST payload addition error handling. - Fix DRM_BRIDGE_ATTACH_NO_CONNECTOR handling. - Handle bad (h/v)sync_end in EDID by clipping to htotal. - Build GPUVM as a module. Driver Changes: - Simple drivers don't need to cache prepared result. - Call drm_atomic_helper_shutdown() in shutdown/unbind for a whole lot more drm drivers. - Assorted small fixes in amdgpu, ssd130x, bridge/it6621, accel/qaic, nouveau, tc358768. - Add NV12 for komeda writeback. - Add arbitration lost event to synopsis/dw-hdmi-cec. - Speed up s/r in nouveau by not restoring some big bo's. - Assorted nouveau display rework in preparation for GSP-RM, especially related to how the modeset sequence works and the DP sequence in relation to link training. - Update anx7816 panel. - Support NVSYNC and NHSYNC in tegra. - Allow multiple power domains in simple driver. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/f1fae5eb-25b8-192a-9a53-215e1184ce81@linux.intel.com
Diffstat (limited to 'include')
-rw-r--r--include/drm/drm_buddy.h6
-rw-r--r--include/drm/drm_debugfs.h9
-rw-r--r--include/drm/drm_file.h13
-rw-r--r--include/drm/drm_gpuvm.h (renamed from include/drm/drm_gpuva_mgr.h)155
-rw-r--r--include/linux/fb.h5
-rw-r--r--include/uapi/drm/qaic_accel.h24
6 files changed, 114 insertions, 98 deletions
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 572077ff8ae7..a5b39fc01003 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -22,8 +22,9 @@
start__ >= max__ || size__ > max__ - start__; \
})
-#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
-#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
+#define DRM_BUDDY_RANGE_ALLOCATION BIT(0)
+#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1)
+#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
struct drm_buddy_block {
#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
@@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
void drm_buddy_block_print(struct drm_buddy *mm,
struct drm_buddy_block *block,
struct drm_printer *p);
-
#endif
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index 7213ce15e4ff..cf06cee4343f 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -35,7 +35,7 @@
#include <linux/types.h>
#include <linux/seq_file.h>
-#include <drm/drm_gpuva_mgr.h>
+#include <drm/drm_gpuvm.h>
/**
* DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space
@@ -152,7 +152,7 @@ void drm_debugfs_add_files(struct drm_device *dev,
const struct drm_debugfs_info *files, int count);
int drm_debugfs_gpuva_info(struct seq_file *m,
- struct drm_gpuva_manager *mgr);
+ struct drm_gpuvm *gpuvm);
#else
static inline void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
@@ -160,7 +160,8 @@ static inline void drm_debugfs_create_files(const struct drm_info_list *files,
{}
static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
- int count, struct drm_minor *minor)
+ int count, struct dentry *root,
+ struct drm_minor *minor)
{
return 0;
}
@@ -176,7 +177,7 @@ static inline void drm_debugfs_add_files(struct drm_device *dev,
{}
static inline int drm_debugfs_gpuva_info(struct seq_file *m,
- struct drm_gpuva_manager *mgr)
+ struct drm_gpuvm *gpuvm)
{
return 0;
}
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 489cc3758a82..e1b5b4282f75 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -254,8 +254,15 @@ struct drm_file {
/** @master_lookup_lock: Serializes @master. */
spinlock_t master_lookup_lock;
- /** @pid: Process that opened this file. */
- struct pid *pid;
+ /**
+ * @pid: Process that is using this file.
+ *
+ * Must only be dereferenced under a rcu_read_lock or equivalent.
+ *
+ * Updates are guarded with dev->filelist_mutex and reference must be
+ * dropped after a RCU grace period to accommodate lockless readers.
+ */
+ struct pid __rcu *pid;
/** @client_id: A unique id for fdinfo */
u64 client_id;
@@ -418,6 +425,8 @@ static inline bool drm_is_accel_client(const struct drm_file *file_priv)
return file_priv->minor->type == DRM_MINOR_ACCEL;
}
+void drm_file_update_pid(struct drm_file *);
+
int drm_open(struct inode *inode, struct file *filp);
int drm_open_helper(struct file *filp, struct drm_minor *minor);
ssize_t drm_read(struct file *filp, char __user *buffer,
diff --git a/include/drm/drm_gpuva_mgr.h b/include/drm/drm_gpuvm.h
index ed8d50200cc3..c7ed6bf441d4 100644
--- a/include/drm/drm_gpuva_mgr.h
+++ b/include/drm/drm_gpuvm.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef __DRM_GPUVA_MGR_H__
-#define __DRM_GPUVA_MGR_H__
+#ifndef __DRM_GPUVM_H__
+#define __DRM_GPUVM_H__
/*
* Copyright (c) 2022 Red Hat.
@@ -31,8 +31,8 @@
#include <drm/drm_gem.h>
-struct drm_gpuva_manager;
-struct drm_gpuva_fn_ops;
+struct drm_gpuvm;
+struct drm_gpuvm_ops;
/**
* enum drm_gpuva_flags - flags for struct drm_gpuva
@@ -62,15 +62,15 @@ enum drm_gpuva_flags {
* struct drm_gpuva - structure to track a GPU VA mapping
*
* This structure represents a GPU VA mapping and is associated with a
- * &drm_gpuva_manager.
+ * &drm_gpuvm.
*
* Typically, this structure is embedded in bigger driver structures.
*/
struct drm_gpuva {
/**
- * @mgr: the &drm_gpuva_manager this object is associated with
+ * @vm: the &drm_gpuvm this object is associated with
*/
- struct drm_gpuva_manager *mgr;
+ struct drm_gpuvm *vm;
/**
* @flags: the &drm_gpuva_flags for this mapping
@@ -137,20 +137,18 @@ struct drm_gpuva {
} rb;
};
-int drm_gpuva_insert(struct drm_gpuva_manager *mgr, struct drm_gpuva *va);
+int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va);
void drm_gpuva_remove(struct drm_gpuva *va);
void drm_gpuva_link(struct drm_gpuva *va);
void drm_gpuva_unlink(struct drm_gpuva *va);
-struct drm_gpuva *drm_gpuva_find(struct drm_gpuva_manager *mgr,
+struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
-struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
+struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
-struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start);
-struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end);
-
-bool drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start);
+struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end);
static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
struct drm_gem_object *obj, u64 offset)
@@ -186,7 +184,7 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
}
/**
- * struct drm_gpuva_manager - DRM GPU VA Manager
+ * struct drm_gpuvm - DRM GPU VA Manager
*
* The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
* &maple_tree structures. Typically, this structure is embedded in bigger
@@ -197,7 +195,7 @@ static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
*
* There should be one manager instance per GPU virtual address space.
*/
-struct drm_gpuva_manager {
+struct drm_gpuvm {
/**
* @name: the name of the DRM GPU VA space
*/
@@ -237,100 +235,101 @@ struct drm_gpuva_manager {
struct drm_gpuva kernel_alloc_node;
/**
- * @ops: &drm_gpuva_fn_ops providing the split/merge steps to drivers
+ * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers
*/
- const struct drm_gpuva_fn_ops *ops;
+ const struct drm_gpuvm_ops *ops;
};
-void drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
- const char *name,
- u64 start_offset, u64 range,
- u64 reserve_offset, u64 reserve_range,
- const struct drm_gpuva_fn_ops *ops);
-void drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr);
+void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
+ u64 start_offset, u64 range,
+ u64 reserve_offset, u64 reserve_range,
+ const struct drm_gpuvm_ops *ops);
+void drm_gpuvm_destroy(struct drm_gpuvm *gpuvm);
+
+bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range);
static inline struct drm_gpuva *
__drm_gpuva_next(struct drm_gpuva *va)
{
- if (va && !list_is_last(&va->rb.entry, &va->mgr->rb.list))
+ if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list))
return list_next_entry(va, rb.entry);
return NULL;
}
/**
- * drm_gpuva_for_each_va_range() - iterate over a range of &drm_gpuvas
+ * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas
* @va__: &drm_gpuva structure to assign to in each iteration step
- * @mgr__: &drm_gpuva_manager to walk over
+ * @gpuvm__: &drm_gpuvm to walk over
* @start__: starting offset, the first gpuva will overlap this
* @end__: ending offset, the last gpuva will start before this (but may
* overlap)
*
- * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
* between @start__ and @end__. It is implemented similarly to list_for_each(),
- * but is using the &drm_gpuva_manager's internal interval tree to accelerate
+ * but is using the &drm_gpuvm's internal interval tree to accelerate
* the search for the starting &drm_gpuva, and hence isn't safe against removal
* of elements. It assumes that @end__ is within (or is the upper limit of) the
- * &drm_gpuva_manager. This iterator does not skip over the &drm_gpuva_manager's
+ * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's
* @kernel_alloc_node.
*/
-#define drm_gpuva_for_each_va_range(va__, mgr__, start__, end__) \
- for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)); \
+#define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \
va__ && (va__->va.addr < (end__)); \
va__ = __drm_gpuva_next(va__))
/**
- * drm_gpuva_for_each_va_range_safe() - safely iterate over a range of
+ * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of
* &drm_gpuvas
* @va__: &drm_gpuva to assign to in each iteration step
* @next__: another &drm_gpuva to use as temporary storage
- * @mgr__: &drm_gpuva_manager to walk over
+ * @gpuvm__: &drm_gpuvm to walk over
* @start__: starting offset, the first gpuva will overlap this
* @end__: ending offset, the last gpuva will start before this (but may
* overlap)
*
- * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie
* between @start__ and @end__. It is implemented similarly to
- * list_for_each_safe(), but is using the &drm_gpuva_manager's internal interval
+ * list_for_each_safe(), but is using the &drm_gpuvm's internal interval
* tree to accelerate the search for the starting &drm_gpuva, and hence is safe
* against removal of elements. It assumes that @end__ is within (or is the
- * upper limit of) the &drm_gpuva_manager. This iterator does not skip over the
- * &drm_gpuva_manager's @kernel_alloc_node.
+ * upper limit of) the &drm_gpuvm. This iterator does not skip over the
+ * &drm_gpuvm's @kernel_alloc_node.
*/
-#define drm_gpuva_for_each_va_range_safe(va__, next__, mgr__, start__, end__) \
- for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)), \
+#define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \
next__ = __drm_gpuva_next(va__); \
va__ && (va__->va.addr < (end__)); \
va__ = next__, next__ = __drm_gpuva_next(va__))
/**
- * drm_gpuva_for_each_va() - iterate over all &drm_gpuvas
+ * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas
* @va__: &drm_gpuva to assign to in each iteration step
- * @mgr__: &drm_gpuva_manager to walk over
+ * @gpuvm__: &drm_gpuvm to walk over
*
* This iterator walks over all &drm_gpuva structures associated with the given
- * &drm_gpuva_manager.
+ * &drm_gpuvm.
*/
-#define drm_gpuva_for_each_va(va__, mgr__) \
- list_for_each_entry(va__, &(mgr__)->rb.list, rb.entry)
+#define drm_gpuvm_for_each_va(va__, gpuvm__) \
+ list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry)
/**
- * drm_gpuva_for_each_va_safe() - safely iterate over all &drm_gpuvas
+ * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas
* @va__: &drm_gpuva to assign to in each iteration step
* @next__: another &drm_gpuva to use as temporary storage
- * @mgr__: &drm_gpuva_manager to walk over
+ * @gpuvm__: &drm_gpuvm to walk over
*
* This iterator walks over all &drm_gpuva structures associated with the given
- * &drm_gpuva_manager. It is implemented with list_for_each_entry_safe(), and
+ * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and
* hence safe against the removal of elements.
*/
-#define drm_gpuva_for_each_va_safe(va__, next__, mgr__) \
- list_for_each_entry_safe(va__, next__, &(mgr__)->rb.list, rb.entry)
+#define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \
+ list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry)
/**
* enum drm_gpuva_op_type - GPU VA operation type
*
- * Operations to alter the GPU VA mappings tracked by the &drm_gpuva_manager.
+ * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm.
*/
enum drm_gpuva_op_type {
/**
@@ -413,7 +412,7 @@ struct drm_gpuva_op_unmap {
*
* Optionally, if &keep is set, drivers may keep the actual page table
* mappings for this &drm_gpuva, adding the missing page table entries
- * only and update the &drm_gpuva_manager accordingly.
+ * only and update the &drm_gpuvm accordingly.
*/
bool keep;
};
@@ -584,22 +583,22 @@ struct drm_gpuva_ops {
#define drm_gpuva_next_op(op) list_next_entry(op, entry)
struct drm_gpuva_ops *
-drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
+drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range,
struct drm_gem_object *obj, u64 offset);
struct drm_gpuva_ops *
-drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
+drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
struct drm_gpuva_ops *
-drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
+drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range);
struct drm_gpuva_ops *
-drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
+drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj);
-void drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
+void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
struct drm_gpuva_ops *ops);
static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
@@ -610,15 +609,15 @@ static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
}
/**
- * struct drm_gpuva_fn_ops - callbacks for split/merge steps
+ * struct drm_gpuvm_ops - callbacks for split/merge steps
*
- * This structure defines the callbacks used by &drm_gpuva_sm_map and
- * &drm_gpuva_sm_unmap to provide the split/merge steps for map and unmap
+ * This structure defines the callbacks used by &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap
* operations to drivers.
*/
-struct drm_gpuva_fn_ops {
+struct drm_gpuvm_ops {
/**
- * @op_alloc: called when the &drm_gpuva_manager allocates
+ * @op_alloc: called when the &drm_gpuvm allocates
* a struct drm_gpuva_op
*
* Some drivers may want to embed struct drm_gpuva_op into driver
@@ -630,7 +629,7 @@ struct drm_gpuva_fn_ops {
struct drm_gpuva_op *(*op_alloc)(void);
/**
- * @op_free: called when the &drm_gpuva_manager frees a
+ * @op_free: called when the &drm_gpuvm frees a
* struct drm_gpuva_op
*
* Some drivers may want to embed struct drm_gpuva_op into driver
@@ -642,19 +641,19 @@ struct drm_gpuva_fn_ops {
void (*op_free)(struct drm_gpuva_op *op);
/**
- * @sm_step_map: called from &drm_gpuva_sm_map to finally insert the
+ * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the
* mapping once all previous steps were completed
*
* The &priv pointer matches the one the driver passed to
- * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
*
- * Can be NULL if &drm_gpuva_sm_map is used.
+ * Can be NULL if &drm_gpuvm_sm_map is used.
*/
int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
/**
- * @sm_step_remap: called from &drm_gpuva_sm_map and
- * &drm_gpuva_sm_unmap to split up an existent mapping
+ * @sm_step_remap: called from &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to split up an existent mapping
*
* This callback is called when existent mapping needs to be split up.
* This is the case when either a newly requested mapping overlaps or
@@ -662,38 +661,38 @@ struct drm_gpuva_fn_ops {
* mapping is requested.
*
* The &priv pointer matches the one the driver passed to
- * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
*
- * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
+ * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
* used.
*/
int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
/**
- * @sm_step_unmap: called from &drm_gpuva_sm_map and
- * &drm_gpuva_sm_unmap to unmap an existent mapping
+ * @sm_step_unmap: called from &drm_gpuvm_sm_map and
+ * &drm_gpuvm_sm_unmap to unmap an existent mapping
*
* This callback is called when existent mapping needs to be unmapped.
* This is the case when either a newly requested mapping encloses an
* existent mapping or an unmap of an existent mapping is requested.
*
* The &priv pointer matches the one the driver passed to
- * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+ * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively.
*
- * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
+ * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is
* used.
*/
int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
};
-int drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
+int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
u64 addr, u64 range,
struct drm_gem_object *obj, u64 offset);
-int drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
+int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
u64 addr, u64 range);
-void drm_gpuva_map(struct drm_gpuva_manager *mgr,
+void drm_gpuva_map(struct drm_gpuvm *gpuvm,
struct drm_gpuva *va,
struct drm_gpuva_op_map *op);
@@ -703,4 +702,4 @@ void drm_gpuva_remap(struct drm_gpuva *prev,
void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
-#endif /* __DRM_GPUVA_MGR_H__ */
+#endif /* __DRM_GPUVM_H__ */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index c14576458228..94e2c44c6569 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -591,8 +591,6 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
/* fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
-extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
-extern int fb_show_logo(struct fb_info *fb_info, int rotate);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx,
u32 height, u32 shift_high, u32 shift_low, u32 mod);
@@ -603,9 +601,6 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var,
extern int fb_get_options(const char *name, char **option);
extern int fb_new_modelist(struct fb_info *info);
-extern bool fb_center_logo;
-extern int fb_logo_count;
-
static inline void lock_fb_info(struct fb_info *info)
{
mutex_lock(&info->lock);
diff --git a/include/uapi/drm/qaic_accel.h b/include/uapi/drm/qaic_accel.h
index 2d348744a853..43ac5d864512 100644
--- a/include/uapi/drm/qaic_accel.h
+++ b/include/uapi/drm/qaic_accel.h
@@ -242,12 +242,12 @@ struct qaic_attach_slice_entry {
* @dbc_id: In. Associate the sliced BO with this DBC.
* @handle: In. GEM handle of the BO to slice.
* @dir: In. Direction of data flow. 1 = DMA_TO_DEVICE, 2 = DMA_FROM_DEVICE
- * @size: In. Total length of the BO.
- * If BO is imported (DMABUF/PRIME) then this size
- * should not exceed the size of DMABUF provided.
- * If BO is allocated using DRM_IOCTL_QAIC_CREATE_BO
- * then this size should be exactly same as the size
- * provided during DRM_IOCTL_QAIC_CREATE_BO.
+ * @size: In. Total length of BO being used. This should not exceed base
+ * size of BO (struct drm_gem_object.base)
+ * For BOs being allocated using DRM_IOCTL_QAIC_CREATE_BO, size of
+ * BO requested is PAGE_SIZE aligned then allocated hence allocated
+ * BO size maybe bigger. This size should not exceed the new
+ * PAGE_SIZE aligned BO size.
* @dev_addr: In. Device address this slice pushes to or pulls from.
* @db_addr: In. Address of the doorbell to ring.
* @db_data: In. Data to write to the doorbell.
@@ -372,6 +372,16 @@ struct qaic_perf_stats_entry {
__u32 pad;
};
+/**
+ * struct qaic_detach_slice - Detaches slicing configuration from BO.
+ * @handle: In. GEM handle of the BO to detach slicing configuration.
+ * @pad: Structure padding. Must be 0.
+ */
+struct qaic_detach_slice {
+ __u32 handle;
+ __u32 pad;
+};
+
#define DRM_QAIC_MANAGE 0x00
#define DRM_QAIC_CREATE_BO 0x01
#define DRM_QAIC_MMAP_BO 0x02
@@ -380,6 +390,7 @@ struct qaic_perf_stats_entry {
#define DRM_QAIC_PARTIAL_EXECUTE_BO 0x05
#define DRM_QAIC_WAIT_BO 0x06
#define DRM_QAIC_PERF_STATS_BO 0x07
+#define DRM_QAIC_DETACH_SLICE_BO 0x08
#define DRM_IOCTL_QAIC_MANAGE DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_MANAGE, struct qaic_manage_msg)
#define DRM_IOCTL_QAIC_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_CREATE_BO, struct qaic_create_bo)
@@ -389,6 +400,7 @@ struct qaic_perf_stats_entry {
#define DRM_IOCTL_QAIC_PARTIAL_EXECUTE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_PARTIAL_EXECUTE_BO, struct qaic_execute)
#define DRM_IOCTL_QAIC_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_WAIT_BO, struct qaic_wait)
#define DRM_IOCTL_QAIC_PERF_STATS_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_QAIC_PERF_STATS_BO, struct qaic_perf_stats)
+#define DRM_IOCTL_QAIC_DETACH_SLICE_BO DRM_IOW(DRM_COMMAND_BASE + DRM_QAIC_DETACH_SLICE_BO, struct qaic_detach_slice)
#if defined(__cplusplus)
}