summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-01-21 19:36:59 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-01-21 19:36:59 +0100
commitdd7d99dc688d0fc448976f52f8517fbdeccdccda (patch)
tree4ad59de5d584cf03e772ee153260c24ba0d717bd /include
parent42bbdd99221bac206dde2b5e87098177fcd2a48e (diff)
parentdef9d2780727cec3313ed3522d0123158d87224d (diff)
Merge 5.5-rc7 into usb-next
We need the USB fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/cacheflush.h33
-rw-r--r--include/drm/drm_dp_mst_helper.h6
-rw-r--r--include/dt-bindings/reset/amlogic,meson8b-reset.h6
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/linux/mm.h18
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/skmsg.h13
-rw-r--r--include/linux/tnum.h2
-rw-r--r--include/net/cfg80211.h5
-rw-r--r--include/net/devlink.h2
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/trace/events/afs.h12
-rw-r--r--include/trace/events/huge_memory.h3
15 files changed, 89 insertions, 35 deletions
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index a950a22c4890..cac7404b2bdd 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -11,71 +11,102 @@
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
+#ifndef flush_cache_all
static inline void flush_cache_all(void)
{
}
+#endif
+#ifndef flush_cache_mm
static inline void flush_cache_mm(struct mm_struct *mm)
{
}
+#endif
+#ifndef flush_cache_dup_mm
static inline void flush_cache_dup_mm(struct mm_struct *mm)
{
}
+#endif
+#ifndef flush_cache_range
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
}
+#endif
+#ifndef flush_cache_page
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr,
unsigned long pfn)
{
}
+#endif
+#ifndef flush_dcache_page
static inline void flush_dcache_page(struct page *page)
{
}
+#endif
+#ifndef flush_dcache_mmap_lock
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{
}
+#endif
+#ifndef flush_dcache_mmap_unlock
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
{
}
+#endif
+#ifndef flush_icache_range
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
}
+#endif
+#ifndef flush_icache_page
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
+#endif
+#ifndef flush_icache_user_range
static inline void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page,
unsigned long addr, int len)
{
}
+#endif
+#ifndef flush_cache_vmap
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
}
+#endif
+#ifndef flush_cache_vunmap
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
+#endif
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+#ifndef copy_to_user_page
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
+#endif
+
+#ifndef copy_from_user_page
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
+#endif
#endif /* __ASM_CACHEFLUSH_H */
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index d5fc90b30487..c1bda7030e2d 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -605,6 +605,12 @@ struct drm_dp_mst_topology_mgr {
* &drm_dp_sideband_msg_tx.state once they are queued
*/
struct mutex qlock;
+
+ /**
+ * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
+ */
+ bool is_waiting_for_dwn_reply;
+
/**
* @tx_msg_downq: List of pending down replies.
*/
diff --git a/include/dt-bindings/reset/amlogic,meson8b-reset.h b/include/dt-bindings/reset/amlogic,meson8b-reset.h
index c614438bcbdb..fbc524a900da 100644
--- a/include/dt-bindings/reset/amlogic,meson8b-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson8b-reset.h
@@ -46,9 +46,9 @@
#define RESET_VD_RMEM 64
#define RESET_AUDIN 65
#define RESET_DBLK 66
-#define RESET_PIC_DC 66
-#define RESET_PSC 66
-#define RESET_NAND 66
+#define RESET_PIC_DC 67
+#define RESET_PSC 68
+#define RESET_NAND 69
#define RESET_GE2D 70
#define RESET_PARSER_REG 71
#define RESET_PARSER_FETCH 72
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 47eb22a3b7f9..4c636c42ad68 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -328,6 +328,7 @@ struct queue_limits {
unsigned int max_sectors;
unsigned int max_segment_size;
unsigned int physical_block_size;
+ unsigned int logical_block_size;
unsigned int alignment_offset;
unsigned int io_min;
unsigned int io_opt;
@@ -338,7 +339,6 @@ struct queue_limits {
unsigned int discard_granularity;
unsigned int discard_alignment;
- unsigned short logical_block_size;
unsigned short max_segments;
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
@@ -1077,7 +1077,7 @@ extern void blk_queue_max_write_same_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
-extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
@@ -1291,7 +1291,7 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
return q->limits.max_segment_size;
}
-static inline unsigned short queue_logical_block_size(const struct request_queue *q)
+static inline unsigned queue_logical_block_size(const struct request_queue *q)
{
int retval = 512;
@@ -1301,7 +1301,7 @@ static inline unsigned short queue_logical_block_size(const struct request_queue
return retval;
}
-static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
{
return queue_logical_block_size(bdev_get_queue(bdev));
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 80a9162b406c..cfaa8feecfe8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2658,14 +2658,26 @@ static inline bool want_init_on_free(void)
!page_poisoning_enabled();
}
-#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
-DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern void init_debug_pagealloc(void);
#else
-DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
+static inline void init_debug_pagealloc(void) {}
#endif
+extern bool _debug_pagealloc_enabled_early;
+DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
static inline bool debug_pagealloc_enabled(void)
{
+ return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
+ _debug_pagealloc_enabled_early;
+}
+
+/*
+ * For use in fast paths after init_debug_pagealloc() has run, or when a
+ * false negative result is not harmful when called too early.
+ */
+static inline bool debug_pagealloc_enabled_static(void)
+{
if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
return false;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 89d8ff06c9ce..5334ad8fc7bd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -215,9 +215,8 @@ enum node_stat_item {
NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */
NR_UNEVICTABLE, /* " " " " " */
- NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */
- NR_SLAB_UNRECLAIMABLE, /* and this one without looking at
- * memcg_flush_percpu_vmstats() first. */
+ NR_SLAB_RECLAIMABLE,
+ NR_SLAB_UNRECLAIMABLE,
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
WORKINGSET_NODES,
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 7fe7b87a3ded..07bfb0874033 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -34,7 +34,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
/* internal use only */
#define LOOKUP_PARENT 0x0010
-#define LOOKUP_NO_REVAL 0x0080
#define LOOKUP_JUMPED 0x1000
#define LOOKUP_ROOT 0x2000
#define LOOKUP_ROOT_GRABBED 0x0008
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 467d26046416..716ad1d8d95e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1929,11 +1929,11 @@ static inline void rseq_migrate(struct task_struct *t)
/*
* If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread.
+ * child inherits. Unregister rseq for a clone with CLONE_VM set.
*/
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
- if (clone_flags & CLONE_THREAD) {
+ if (clone_flags & CLONE_VM) {
t->rseq = NULL;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index ef7031f8a304..14d61bba0b79 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -358,17 +358,22 @@ static inline void sk_psock_update_proto(struct sock *sk,
static inline void sk_psock_restore_proto(struct sock *sk,
struct sk_psock *psock)
{
- sk->sk_write_space = psock->saved_write_space;
+ sk->sk_prot->unhash = psock->saved_unhash;
if (psock->sk_proto) {
struct inet_connection_sock *icsk = inet_csk(sk);
bool has_ulp = !!icsk->icsk_ulp_data;
- if (has_ulp)
- tcp_update_ulp(sk, psock->sk_proto);
- else
+ if (has_ulp) {
+ tcp_update_ulp(sk, psock->sk_proto,
+ psock->saved_write_space);
+ } else {
sk->sk_prot = psock->sk_proto;
+ sk->sk_write_space = psock->saved_write_space;
+ }
psock->sk_proto = NULL;
+ } else {
+ sk->sk_write_space = psock->saved_write_space;
}
}
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index c17af77f3fae..ea627d1ab7e3 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -30,7 +30,7 @@ struct tnum tnum_lshift(struct tnum a, u8 shift);
/* Shift (rsh) a tnum right (by a fixed shift) */
struct tnum tnum_rshift(struct tnum a, u8 shift);
/* Shift (arsh) a tnum right (by a fixed min_shift) */
-struct tnum tnum_arshift(struct tnum a, u8 min_shift);
+struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness);
/* Add two tnums, return @a + @b */
struct tnum tnum_add(struct tnum a, struct tnum b);
/* Subtract two tnums, return @a - @b */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 059524b87c4c..f22bd6c838a3 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -3548,6 +3548,9 @@ struct cfg80211_update_owe_info {
*
* @start_radar_detection: Start radar detection in the driver.
*
+ * @end_cac: End running CAC, probably because a related CAC
+ * was finished on another phy.
+ *
* @update_ft_ies: Provide updated Fast BSS Transition information to the
* driver. If the SME is in the driver/firmware, this information can be
* used in building Authentication and Reassociation Request frames.
@@ -3874,6 +3877,8 @@ struct cfg80211_ops {
struct net_device *dev,
struct cfg80211_chan_def *chandef,
u32 cac_time_ms);
+ void (*end_cac)(struct wiphy *wiphy,
+ struct net_device *dev);
int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_update_ft_ies_params *ftie);
int (*crit_proto_start)(struct wiphy *wiphy,
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 47f87b2fcf63..38b4acb93f74 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -938,7 +938,7 @@ struct devlink_region *devlink_region_create(struct devlink *devlink,
u32 region_max_snapshots,
u64 region_size);
void devlink_region_destroy(struct devlink_region *region);
-u32 devlink_region_shapshot_id_get(struct devlink *devlink);
+u32 devlink_region_snapshot_id_get(struct devlink *devlink);
int devlink_region_snapshot_create(struct devlink_region *region,
u8 *data, u32 snapshot_id,
devlink_snapshot_data_dest_t *data_destructor);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e460ea7f767b..e6f48384dc71 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2147,7 +2147,8 @@ struct tcp_ulp_ops {
/* initialize ulp */
int (*init)(struct sock *sk);
/* update ulp */
- void (*update)(struct sock *sk, struct proto *p);
+ void (*update)(struct sock *sk, struct proto *p,
+ void (*write_space)(struct sock *sk));
/* cleanup ulp */
void (*release)(struct sock *sk);
/* diagnostic */
@@ -2162,7 +2163,8 @@ void tcp_unregister_ulp(struct tcp_ulp_ops *type);
int tcp_set_ulp(struct sock *sk, const char *name);
void tcp_get_available_ulp(char *buf, size_t len);
void tcp_cleanup_ulp(struct sock *sk);
-void tcp_update_ulp(struct sock *sk, struct proto *p);
+void tcp_update_ulp(struct sock *sk, struct proto *p,
+ void (*write_space)(struct sock *sk));
#define MODULE_ALIAS_TCP_ULP(name) \
__MODULE_INFO(alias, alias_userspace, name); \
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index d5ec4fac82ae..564ba1b5cf57 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -915,9 +915,9 @@ TRACE_EVENT(afs_call_state,
TRACE_EVENT(afs_lookup,
TP_PROTO(struct afs_vnode *dvnode, const struct qstr *name,
- struct afs_vnode *vnode),
+ struct afs_fid *fid),
- TP_ARGS(dvnode, name, vnode),
+ TP_ARGS(dvnode, name, fid),
TP_STRUCT__entry(
__field_struct(struct afs_fid, dfid )
@@ -928,13 +928,7 @@ TRACE_EVENT(afs_lookup,
TP_fast_assign(
int __len = min_t(int, name->len, 23);
__entry->dfid = dvnode->fid;
- if (vnode) {
- __entry->fid = vnode->fid;
- } else {
- __entry->fid.vid = 0;
- __entry->fid.vnode = 0;
- __entry->fid.unique = 0;
- }
+ __entry->fid = *fid;
memcpy(__entry->name, name->name, __len);
__entry->name[__len] = 0;
),
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index dd4db334bd63..d82a0f4e824d 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -31,7 +31,8 @@
EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \
EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \
EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \
- EMe(SCAN_TRUNCATED, "truncated") \
+ EM( SCAN_TRUNCATED, "truncated") \
+ EMe(SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
#undef EM
#undef EMe