diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/drm/bridge/dw_hdmi.h | 11 | ||||
-rw-r--r-- | include/drm/dp/drm_dp_helper.h | 1 | ||||
-rw-r--r-- | include/drm/ttm/ttm_bo_api.h | 2 | ||||
-rw-r--r-- | include/drm/ttm/ttm_bo_driver.h | 11 | ||||
-rw-r--r-- | include/drm/ttm/ttm_resource.h | 7 | ||||
-rw-r--r-- | include/linux/dma-buf.h | 24 | ||||
-rw-r--r-- | include/linux/dma-resv.h | 180 | ||||
-rw-r--r-- | include/linux/fb.h | 1 | ||||
-rw-r--r-- | include/linux/seqlock.h | 8 |
9 files changed, 147 insertions, 98 deletions
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index 2a1f85f9a8a3..f668e75fbabe 100644 --- a/include/drm/bridge/dw_hdmi.h +++ b/include/drm/bridge/dw_hdmi.h @@ -143,6 +143,11 @@ struct dw_hdmi_plat_data { const struct drm_display_info *info, const struct drm_display_mode *mode); + /* Platform-specific audio enable/disable (optional) */ + void (*enable_audio)(struct dw_hdmi *hdmi, int channel, + int width, int rate, int non_pcm); + void (*disable_audio)(struct dw_hdmi *hdmi); + /* Vendor PHY support */ const struct dw_hdmi_phy_ops *phy_ops; const char *phy_name; @@ -173,6 +178,8 @@ void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense); int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn, struct device *codec_dev); +void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm); +void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width); void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate); void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt); void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status); @@ -187,9 +194,11 @@ void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address); void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data, unsigned char addr); +void dw_hdmi_phy_gen1_reset(struct dw_hdmi *hdmi); + void dw_hdmi_phy_gen2_pddq(struct dw_hdmi *hdmi, u8 enable); void dw_hdmi_phy_gen2_txpwron(struct dw_hdmi *hdmi, u8 enable); -void dw_hdmi_phy_reset(struct dw_hdmi *hdmi); +void dw_hdmi_phy_gen2_reset(struct dw_hdmi *hdmi); enum drm_connector_status dw_hdmi_phy_read_hpd(struct dw_hdmi *hdmi, void *data); diff --git a/include/drm/dp/drm_dp_helper.h b/include/drm/dp/drm_dp_helper.h index 1eccd9741943..91af98e6617c 100644 --- a/include/drm/dp/drm_dp_helper.h +++ b/include/drm/dp/drm_dp_helper.h @@ -2053,6 +2053,7 @@ struct drm_dp_aux { bool is_remote; }; +int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset); ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, void *buffer, size_t size); ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index c76932b68a33..2d524f8b0802 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -94,7 +94,6 @@ struct ttm_tt; * @deleted: True if the object is only a zombie and already deleted. * @ddestroy: List head for the delayed destroy list. * @swap: List head for swap LRU list. - * @moving: Fence set when BO is moving * @offset: The current GPU offset, which can have different meanings * depending on the memory type. For SYSTEM type memory, it should be 0. * @cur_placement: Hint of current placement. @@ -147,7 +146,6 @@ struct ttm_buffer_object { * Members protected by a bo reservation. */ - struct dma_fence *moving; unsigned priority; unsigned pin_count; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 059a595e14e5..897b88f0bd59 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -245,7 +245,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_resource *new_mem); /** - * ttm_bo_move_accel_cleanup. + * ttm_bo_move_sync_cleanup. * * @bo: A pointer to a struct ttm_buffer_object. * @new_mem: struct ttm_resource indicating where to move. @@ -253,13 +253,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed * by the caller to be idle. Typically used after memcpy buffer moves. */ -static inline void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, - struct ttm_resource *new_mem) -{ - int ret = ttm_bo_move_accel_cleanup(bo, NULL, true, false, new_mem); - - WARN_ON(ret); -} +void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem); /** * ttm_bo_pipeline_gutting. diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 314f73de4280..441653693970 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -215,8 +215,7 @@ struct ttm_lru_bulk_move_pos { /** * struct ttm_lru_bulk_move * - * @tt: first/last lru entry for resources in the TT domain - * @vram: first/last lru entry for resources in the VRAM domain + * @pos: first/last lru entry for resources in the each domain/priority * * Container for the current bulk move state. Should be used with * ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move(). @@ -382,4 +381,8 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, void ttm_kmap_iter_linear_io_fini(struct ttm_kmap_iter_linear_io *iter_io, struct ttm_device *bdev, struct ttm_resource *mem); + +void ttm_resource_manager_create_debugfs(struct ttm_resource_manager *man, + struct dentry * parent, + const char *name); #endif diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 6fb91956ab8d..71731796c8c3 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -393,27 +393,30 @@ struct dma_buf { * e.g. exposed in `Implicit Fence Poll Support`_ must follow the * below rules. * - * - Drivers must add a shared fence through dma_resv_add_shared_fence() - * for anything the userspace API considers a read access. This highly - * depends upon the API and window system. + * - Drivers must add a read fence through dma_resv_add_fence() with the + * DMA_RESV_USAGE_READ flag for anything the userspace API considers a + * read access. This highly depends upon the API and window system. * - * - Similarly drivers must set the exclusive fence through - * dma_resv_add_excl_fence() for anything the userspace API considers - * write access. + * - Similarly drivers must add a write fence through + * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for + * anything the userspace API considers write access. * - * - Drivers may just always set the exclusive fence, since that only + * - Drivers may just always add a write fence, since that only * causes unecessarily synchronization, but no correctness issues. * * - Some drivers only expose a synchronous userspace API with no * pipelining across drivers. These do not set any fences for their * access. An example here is v4l. * + * - Driver should use dma_resv_usage_rw() when retrieving fences as + * dependency for implicit synchronization. + * * DYNAMIC IMPORTER RULES: * * Dynamic importers, see dma_buf_attachment_is_dynamic(), have * additional constraints on how they set up fences: * - * - Dynamic importers must obey the exclusive fence and wait for it to + * - Dynamic importers must obey the write fences and wait for them to * signal before allowing access to the buffer's underlying storage * through the device. * @@ -423,8 +426,9 @@ struct dma_buf { * * IMPORTANT: * - * All drivers must obey the struct dma_resv rules, specifically the - * rules for updating and obeying fences. + * All drivers and memory management related functions must obey the + * struct dma_resv rules, specifically the rules for updating and + * obeying fences. See enum dma_resv_usage for further descriptions. */ struct dma_resv *resv; diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 5fa04d0fccad..c8ccbc94d5d2 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -50,10 +50,86 @@ extern struct ww_class reservation_ww_class; struct dma_resv_list; /** + * enum dma_resv_usage - how the fences from a dma_resv obj are used + * + * This enum describes the different use cases for a dma_resv object and + * controls which fences are returned when queried. + * + * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and + * when the dma_resv object is asked for fences for one use case the fences + * for the lower use case are returned as well. + * + * For example when asking for WRITE fences then the KERNEL fences are returned + * as well. Similar when asked for READ fences then both WRITE and KERNEL + * fences are returned as well. + */ +enum dma_resv_usage { + /** + * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only. + * + * This should only be used for things like copying or clearing memory + * with a DMA hardware engine for the purpose of kernel memory + * management. + * + * Drivers *always* must wait for those fences before accessing the + * resource protected by the dma_resv object. The only exception for + * that is when the resource is known to be locked down in place by + * pinning it previously. + */ + DMA_RESV_USAGE_KERNEL, + + /** + * @DMA_RESV_USAGE_WRITE: Implicit write synchronization. + * + * This should only be used for userspace command submissions which add + * an implicit write dependency. + */ + DMA_RESV_USAGE_WRITE, + + /** + * @DMA_RESV_USAGE_READ: Implicit read synchronization. + * + * This should only be used for userspace command submissions which add + * an implicit read dependency. + */ + DMA_RESV_USAGE_READ, + + /** + * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync. + * + * This should be used by submissions which don't want to participate in + * implicit synchronization. + * + * The most common case are preemption fences as well as page table + * updates and their TLB flushes. + */ + DMA_RESV_USAGE_BOOKKEEP +}; + +/** + * dma_resv_usage_rw - helper for implicit sync + * @write: true if we create a new implicit sync write + * + * This returns the implicit synchronization usage for write or read accesses, + * see enum dma_resv_usage and &dma_buf.resv. + */ +static inline enum dma_resv_usage dma_resv_usage_rw(bool write) +{ + /* This looks confusing at first sight, but is indeed correct. + * + * The rational is that new write operations needs to wait for the + * existing read and write operations to finish. + * But a new read operation only needs to wait for the existing write + * operations to finish. + */ + return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE; +} + +/** * struct dma_resv - a reservation object manages fences for a buffer * - * There are multiple uses for this, with sometimes slightly different rules in - * how the fence slots are used. + * This is a container for dma_fence objects which needs to handle multiple use + * cases. * * One use is to synchronize cross-driver access to a struct dma_buf, either for * dynamic buffer management or just to handle implicit synchronization between @@ -80,50 +156,16 @@ struct dma_resv { struct ww_mutex lock; /** - * @seq: - * - * Sequence count for managing RCU read-side synchronization, allows - * read-only access to @fence_excl and @fence while ensuring we take a - * consistent snapshot. - */ - seqcount_ww_mutex_t seq; - - /** - * @fence_excl: + * @fences: * - * The exclusive fence, if there is one currently. + * Array of fences which where added to the dma_resv object * - * To guarantee that no fences are lost, this new fence must signal - * only after the previous exclusive fence has signalled. If - * semantically only a new access is added without actually treating the - * previous one as a dependency the exclusive fences can be strung - * together using struct dma_fence_chain. - * - * Note that actual semantics of what an exclusive or shared fence mean - * is defined by the user, for reservation objects shared across drivers - * see &dma_buf.resv. - */ - struct dma_fence __rcu *fence_excl; - - /** - * @fence: - * - * List of current shared fences. - * - * There are no ordering constraints of shared fences against the - * exclusive fence slot. If a waiter needs to wait for all access, it - * has to wait for both sets of fences to signal. - * - * A new fence is added by calling dma_resv_add_shared_fence(). Since - * this often needs to be done past the point of no return in command + * A new fence is added by calling dma_resv_add_fence(). Since this + * often needs to be done past the point of no return in command * submission it cannot fail, and therefore sufficient slots need to be * reserved by calling dma_resv_reserve_fences(). - * - * Note that actual semantics of what an exclusive or shared fence mean - * is defined by the user, for reservation objects shared across drivers - * see &dma_buf.resv. */ - struct dma_resv_list __rcu *fence; + struct dma_resv_list __rcu *fences; }; /** @@ -142,14 +184,14 @@ struct dma_resv_iter { /** @obj: The dma_resv object we iterate over */ struct dma_resv *obj; - /** @all_fences: If all fences should be returned */ - bool all_fences; + /** @usage: Return fences with this usage or lower. */ + enum dma_resv_usage usage; /** @fence: the currently handled fence */ struct dma_fence *fence; - /** @seq: sequence number to check for modifications */ - unsigned int seq; + /** @fence_usage: the usage of the current fence */ + enum dma_resv_usage fence_usage; /** @index: index into the shared fences */ unsigned int index; @@ -157,8 +199,8 @@ struct dma_resv_iter { /** @fences: the shared fences; private, *MUST* not dereference */ struct dma_resv_list *fences; - /** @shared_count: number of shared fences */ - unsigned int shared_count; + /** @num_fences: number of fences */ + unsigned int num_fences; /** @is_restarted: true if this is the first returned fence */ bool is_restarted; @@ -173,14 +215,14 @@ struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor); * dma_resv_iter_begin - initialize a dma_resv_iter object * @cursor: The dma_resv_iter object to initialize * @obj: The dma_resv object which we want to iterate over - * @all_fences: If all fences should be returned or just the exclusive one + * @usage: controls which fences to include, see enum dma_resv_usage. */ static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor, struct dma_resv *obj, - bool all_fences) + enum dma_resv_usage usage) { cursor->obj = obj; - cursor->all_fences = all_fences; + cursor->usage = usage; cursor->fence = NULL; } @@ -197,14 +239,15 @@ static inline void dma_resv_iter_end(struct dma_resv_iter *cursor) } /** - * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one + * dma_resv_iter_usage - Return the usage of the current fence * @cursor: the cursor of the current position * - * Returns true if the currently returned fence is the exclusive one. + * Returns the usage of the currently processed fence. */ -static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor) +static inline enum dma_resv_usage +dma_resv_iter_usage(struct dma_resv_iter *cursor) { - return cursor->index == 0; + return cursor->fence_usage; } /** @@ -241,7 +284,7 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) * dma_resv_for_each_fence - fence iterator * @cursor: a struct dma_resv_iter pointer * @obj: a dma_resv object pointer - * @all_fences: true if all fences should be returned + * @usage: controls which fences to return * @fence: the current fence * * Iterate over the fences in a struct dma_resv object while holding the @@ -250,8 +293,8 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) * valid as long as the lock is held and so no extra reference to the fence is * taken. */ -#define dma_resv_for_each_fence(cursor, obj, all_fences, fence) \ - for (dma_resv_iter_begin(cursor, obj, all_fences), \ +#define dma_resv_for_each_fence(cursor, obj, usage, fence) \ + for (dma_resv_iter_begin(cursor, obj, usage), \ fence = dma_resv_iter_first(cursor); fence; \ fence = dma_resv_iter_next(cursor)) @@ -259,9 +302,9 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) #ifdef CONFIG_DEBUG_MUTEXES -void dma_resv_reset_shared_max(struct dma_resv *obj); +void dma_resv_reset_max_fences(struct dma_resv *obj); #else -static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {} +static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {} #endif /** @@ -407,25 +450,26 @@ static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) */ static inline void dma_resv_unlock(struct dma_resv *obj) { - dma_resv_reset_shared_max(obj); + dma_resv_reset_max_fences(obj); ww_mutex_unlock(&obj->lock); } void dma_resv_init(struct dma_resv *obj); void dma_resv_fini(struct dma_resv *obj); int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences); -void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); +void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, + enum dma_resv_usage usage); void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, - struct dma_fence *fence); -void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); -int dma_resv_get_fences(struct dma_resv *obj, bool write, + struct dma_fence *fence, + enum dma_resv_usage usage); +int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, unsigned int *num_fences, struct dma_fence ***fences); -int dma_resv_get_singleton(struct dma_resv *obj, bool write, +int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, struct dma_fence **fence); int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); -long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, - unsigned long timeout); -bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); +long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, + bool intr, unsigned long timeout); +bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage); void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq); #endif /* _LINUX_RESERVATION_H */ diff --git a/include/linux/fb.h b/include/linux/fb.h index 9a77ab615c36..f95da1af9ff6 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -450,7 +450,6 @@ struct fb_info { struct fb_var_screeninfo var; /* Current var */ struct fb_fix_screeninfo fix; /* Current fix */ struct fb_monspecs monspecs; /* Current Monitor specs */ - struct work_struct queue; /* Framebuffer event queue */ struct fb_pixmap pixmap; /* Image hardware mapper */ struct fb_pixmap sprite; /* Cursor hardware mapper */ struct fb_cmap cmap; /* Current cmap */ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 37ded6b8fee6..3926e9027947 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -17,7 +17,6 @@ #include <linux/kcsan-checks.h> #include <linux/lockdep.h> #include <linux/mutex.h> -#include <linux/ww_mutex.h> #include <linux/preempt.h> #include <linux/spinlock.h> @@ -164,7 +163,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) * static initializer or init function. This enables lockdep to validate * that the write side critical section is properly serialized. * - * LOCKNAME: raw_spinlock, spinlock, rwlock, mutex, or ww_mutex. + * LOCKNAME: raw_spinlock, spinlock, rwlock or mutex */ /* @@ -184,7 +183,6 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s) #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock) #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex) -#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex) /* * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers @@ -277,7 +275,6 @@ SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_s SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock)) SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock)) SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock)) -SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mutex, ww_mutex_lock(s->lock, NULL)) /* * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t @@ -304,8 +301,7 @@ SEQCOUNT_LOCKNAME(ww_mutex, struct ww_mutex, true, &s->lock->base, ww_mu __seqprop_case((s), raw_spinlock, prop), \ __seqprop_case((s), spinlock, prop), \ __seqprop_case((s), rwlock, prop), \ - __seqprop_case((s), mutex, prop), \ - __seqprop_case((s), ww_mutex, prop)) + __seqprop_case((s), mutex, prop)) #define seqprop_ptr(s) __seqprop(s, ptr) #define seqprop_sequence(s) __seqprop(s, sequence) |