diff options
Diffstat (limited to 'include/linux')
31 files changed, 312 insertions, 359 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 21ee41b92e8a..f1d5c5acc8dd 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -171,12 +171,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl); void bpf_register_map_type(struct bpf_map_type_list *tl); struct bpf_prog *bpf_prog_get(u32 ufd); +struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); void bpf_prog_put_rcu(struct bpf_prog *prog); struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *__bpf_map_get(struct fd f); -void bpf_map_inc(struct bpf_map *map, bool uref); +struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); int bpf_map_precharge_memlock(u32 pages); diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index 260d78b587c4..1563265d2097 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h @@ -12,9 +12,12 @@ */ struct ceph_auth_client; -struct ceph_authorizer; struct ceph_msg; +struct ceph_authorizer { + void (*destroy)(struct ceph_authorizer *); +}; + struct ceph_auth_handshake { struct ceph_authorizer *authorizer; void *authorizer_buf; @@ -62,8 +65,6 @@ struct ceph_auth_client_ops { struct ceph_auth_handshake *auth); int (*verify_authorizer_reply)(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len); - void (*destroy_authorizer)(struct ceph_auth_client *ac, - struct ceph_authorizer *a); void (*invalidate_authorizer)(struct ceph_auth_client *ac, int peer_type); @@ -112,8 +113,7 @@ extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth); -extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, - struct ceph_authorizer *a); +void ceph_auth_destroy_authorizer(struct ceph_authorizer *a); extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *a); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 4343df806710..cbf460927c42 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -16,7 +16,6 @@ struct ceph_msg; struct ceph_snap_context; struct ceph_osd_request; struct ceph_osd_client; -struct ceph_authorizer; /* * completion callback for async writepages diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 3e39ae5bc799..5b17de62c962 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -444,6 +444,7 @@ struct cgroup_subsys { int (*can_attach)(struct cgroup_taskset *tset); void (*cancel_attach)(struct cgroup_taskset *tset); void (*attach)(struct cgroup_taskset *tset); + void (*post_attach)(void); int (*can_fork)(struct task_struct *task); void (*cancel_fork)(struct task_struct *task); void (*fork)(struct task_struct *task); diff --git a/include/linux/cpufreq-dt.h b/include/linux/cpufreq-dt.h deleted file mode 100644 index 0414009e2c30..000000000000 --- a/include/linux/cpufreq-dt.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright (C) 2014 Marvell - * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#ifndef __CPUFREQ_DT_H__ -#define __CPUFREQ_DT_H__ - -struct cpufreq_dt_platform_data { - /* - * True when each CPU has its own clock to control its - * frequency, false when all CPUs are controlled by a single - * clock. - */ - bool independent_clocks; -}; - -#endif /* __CPUFREQ_DT_H__ */ diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 718e8725de8a..4e81e08db752 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -102,6 +102,17 @@ struct cpufreq_policy { */ struct rw_semaphore rwsem; + /* + * Fast switch flags: + * - fast_switch_possible should be set by the driver if it can + * guarantee that frequency can be changed on any CPU sharing the + * policy and that the change will affect all of the policy CPUs then. + * - fast_switch_enabled is to be set by governors that support fast + * freqnency switching with the help of cpufreq_enable_fast_switch(). + */ + bool fast_switch_possible; + bool fast_switch_enabled; + /* Synchronization for frequency transitions */ bool transition_ongoing; /* Tracks transition status */ spinlock_t transition_lock; @@ -156,6 +167,8 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); int cpufreq_update_policy(unsigned int cpu); bool have_governor_per_policy(void); struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); +void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); +void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); #else static inline unsigned int cpufreq_get(unsigned int cpu) { @@ -236,6 +249,8 @@ struct cpufreq_driver { unsigned int relation); /* Deprecated */ int (*target_index)(struct cpufreq_policy *policy, unsigned int index); + unsigned int (*fast_switch)(struct cpufreq_policy *policy, + unsigned int target_freq); /* * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION * unset. @@ -426,6 +441,20 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, #define CPUFREQ_POLICY_POWERSAVE (1) #define CPUFREQ_POLICY_PERFORMANCE (2) +/* + * The polling frequency depends on the capability of the processor. Default + * polling frequency is 1000 times the transition latency of the processor. The + * ondemand governor will work on any processor with transition latency <= 10ms, + * using appropriate sampling rate. + * + * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL) + * the ondemand governor will not work. All times here are in us (microseconds). + */ +#define MIN_SAMPLING_RATE_RATIO (2) +#define LATENCY_MULTIPLIER (1000) +#define MIN_LATENCY_MULTIPLIER (20) +#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) + /* Governor Events */ #define CPUFREQ_GOV_START 1 #define CPUFREQ_GOV_STOP 2 @@ -450,6 +479,8 @@ struct cpufreq_governor { }; /* Pass a target to the cpufreq driver */ +unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, + unsigned int target_freq); int cpufreq_driver_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation); @@ -462,6 +493,29 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor); struct cpufreq_governor *cpufreq_default_governor(void); struct cpufreq_governor *cpufreq_fallback_governor(void); +/* Governor attribute set */ +struct gov_attr_set { + struct kobject kobj; + struct list_head policy_list; + struct mutex update_lock; + int usage_count; +}; + +/* sysfs ops for cpufreq governors */ +extern const struct sysfs_ops governor_sysfs_ops; + +void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); +void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); +unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node); + +/* Governor sysfs attribute */ +struct governor_attr { + struct attribute attr; + ssize_t (*show)(struct gov_attr_set *attr_set, char *buf); + ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf, + size_t count); +}; + /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index fea160ee5803..85a868ccb493 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask) task_unlock(current); } -extern void cpuset_post_attach_flush(void); - #else /* !CONFIG_CPUSETS */ static inline bool cpusets_enabled(void) { return false; } @@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq) return false; } -static inline void cpuset_post_attach_flush(void) -{ -} - #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 6fa02a20eb63..2de4e2eea180 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -19,6 +19,13 @@ #define DEVFREQ_NAME_LEN 16 +/* DEVFREQ notifier interface */ +#define DEVFREQ_TRANSITION_NOTIFIER (0) + +/* Transition notifiers of DEVFREQ_TRANSITION_NOTIFIER */ +#define DEVFREQ_PRECHANGE (0) +#define DEVFREQ_POSTCHANGE (1) + struct devfreq; /** @@ -143,6 +150,7 @@ struct devfreq_governor { * @trans_table: Statistics of devfreq transitions * @time_in_state: Statistics of devfreq states * @last_stat_updated: The last time stat updated + * @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier * * This structure stores the devfreq information for a give device. * @@ -177,6 +185,13 @@ struct devfreq { unsigned int *trans_table; unsigned long *time_in_state; unsigned long last_stat_updated; + + struct srcu_notifier_head transition_notifier_list; +}; + +struct devfreq_freqs { + unsigned long old; + unsigned long new; }; #if defined(CONFIG_PM_DEVFREQ) @@ -207,6 +222,22 @@ extern int devm_devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq); extern void devm_devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq); +extern int devfreq_register_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern int devfreq_unregister_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern int devm_devfreq_register_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern void devm_devfreq_unregister_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list); +extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, + int index); /** * devfreq_update_stats() - update the last_status pointer in struct devfreq @@ -241,6 +272,39 @@ struct devfreq_simple_ondemand_data { }; #endif +#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE) +/** + * struct devfreq_passive_data - void *data fed to struct devfreq + * and devfreq_add_device + * @parent: the devfreq instance of parent device. + * @get_target_freq: Optional callback, Returns desired operating frequency + * for the device using passive governor. That is called + * when passive governor should decide the next frequency + * by using the new frequency of parent devfreq device + * using governors except for passive governor. + * If the devfreq device has the specific method to decide + * the next frequency, should use this callback. + * @this: the devfreq instance of own device. + * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER list + * + * The devfreq_passive_data have to set the devfreq instance of parent + * device with governors except for the passive governor. But, don't need to + * initialize the 'this' and 'nb' field because the devfreq core will handle + * them. + */ +struct devfreq_passive_data { + /* Should set the devfreq instance of parent device */ + struct devfreq *parent; + + /* Optional callback to decide the next frequency of passvice device */ + int (*get_target_freq)(struct devfreq *this, unsigned long *freq); + + /* For passive governor's internal use. Don't need to set them */ + struct devfreq *this; + struct notifier_block nb; +}; +#endif + #else /* !CONFIG_PM_DEVFREQ */ static inline struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, @@ -307,6 +371,41 @@ static inline void devm_devfreq_unregister_opp_notifier(struct device *dev, { } +static inline int devfreq_register_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ + return 0; +} + +static inline int devfreq_unregister_notifier(struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ + return 0; +} + +static inline int devm_devfreq_register_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ + return 0; +} + +static inline void devm_devfreq_unregister_notifier(struct device *dev, + struct devfreq *devfreq, + struct notifier_block *nb, + unsigned int list) +{ +} + +static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, + int index) +{ + return ERR_PTR(-ENODEV); +} + static inline int devfreq_update_stats(struct devfreq *df) { return -EINVAL; diff --git a/include/linux/device.h b/include/linux/device.h index 002c59728dbe..b130304f9b1b 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -956,11 +956,6 @@ static inline bool device_async_suspend_enabled(struct device *dev) return !!dev->power.async_suspend; } -static inline void pm_suspend_ignore_children(struct device *dev, bool enable) -{ - dev->power.ignore_children = enable; -} - static inline void dev_pm_syscore_device(struct device *dev, bool val) { #ifdef CONFIG_PM_SLEEP diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h index 358a4db72a27..5871f292b596 100644 --- a/include/linux/devpts_fs.h +++ b/include/linux/devpts_fs.h @@ -27,11 +27,11 @@ int devpts_new_index(struct pts_fs_info *); void devpts_kill_index(struct pts_fs_info *, int); /* mknod in devpts */ -struct inode *devpts_pty_new(struct pts_fs_info *, dev_t, int, void *); +struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *); /* get private structure */ -void *devpts_get_priv(struct inode *pts_inode); +void *devpts_get_priv(struct dentry *); /* unlink */ -void devpts_pty_kill(struct inode *inode); +void devpts_pty_kill(struct dentry *); #endif diff --git a/include/linux/hash.h b/include/linux/hash.h index 1afde47e1528..79c52fa81cac 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h @@ -32,12 +32,28 @@ #error Wordsize not 32 or 64 #endif +/* + * The above primes are actively bad for hashing, since they are + * too sparse. The 32-bit one is mostly ok, the 64-bit one causes + * real problems. Besides, the "prime" part is pointless for the + * multiplicative hash. + * + * Although a random odd number will do, it turns out that the golden + * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice + * properties. + * + * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2. + * (See Knuth vol 3, section 6.4, exercise 9.) + */ +#define GOLDEN_RATIO_32 0x61C88647 +#define GOLDEN_RATIO_64 0x61C8864680B583EBull + static __always_inline u64 hash_64(u64 val, unsigned int bits) { u64 hash = val; -#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 - hash = hash * GOLDEN_RATIO_PRIME_64; +#if BITS_PER_LONG == 64 + hash = hash * GOLDEN_RATIO_64; #else /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ u64 n = hash; diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 7008623e24b1..d7b9e5346fba 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -152,6 +152,7 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) } struct page *get_huge_zero_page(void); +void put_huge_zero_page(void); #else /* CONFIG_TRANSPARENT_HUGEPAGE */ #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) @@ -208,6 +209,10 @@ static inline bool is_huge_zero_page(struct page *page) return false; } +static inline void put_huge_zero_page(void) +{ + BUILD_BUG(); +} static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, int flags) diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index d5569734f672..548fd535fd02 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -28,6 +28,11 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) return (struct ethhdr *)skb_mac_header(skb); } +static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb) +{ + return (struct ethhdr *)skb_inner_mac_header(skb); +} + int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index d026b190c530..d10ef06971b5 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -196,9 +196,11 @@ struct lock_list { * We record lock dependency chains, so that we can cache them: */ struct lock_chain { - u8 irq_context; - u8 depth; - u16 base; + /* see BUILD_BUG_ON()s in lookup_chain_cache() */ + unsigned int irq_context : 2, + depth : 6, + base : 24; + /* 4 byte hole */ struct hlist_node entry; u64 chain_key; }; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8156e3c9239c..b3575f392492 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -392,6 +392,17 @@ enum { MLX5_CAP_OFF_CMDIF_CSUM = 46, }; +enum { + /* + * Max wqe size for rdma read is 512 bytes, so this + * limits our max_sge_rd as the wqe needs to fit: + * - ctrl segment (16 bytes) + * - rdma segment (16 bytes) + * - scatter elements (16 bytes each) + */ + MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 +}; + struct mlx5_inbox_hdr { __be16 opcode; u8 rsvd[4]; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index dcd5ac8d3b14..369c837d40f5 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -519,8 +519,9 @@ enum mlx5_device_state { }; enum mlx5_interface_state { - MLX5_INTERFACE_STATE_DOWN, - MLX5_INTERFACE_STATE_UP, + MLX5_INTERFACE_STATE_DOWN = BIT(0), + MLX5_INTERFACE_STATE_UP = BIT(1), + MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2), }; enum mlx5_pci_status { @@ -544,7 +545,7 @@ struct mlx5_core_dev { enum mlx5_device_state state; /* sync interface state */ struct mutex intf_state_mutex; - enum mlx5_interface_state interface_state; + unsigned long intf_state; void (*event) (struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index a1d145abd4eb..b30250ab7604 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -54,9 +54,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status *status); -int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); -void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); -void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, u8 port); int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index bd93e6323603..301da4a5e6bf 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h @@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr); int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev, u16 vport, u8 *addr); +int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu); +int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, u64 *system_image_guid); int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); diff --git a/include/linux/mm.h b/include/linux/mm.h index a55e5be0894f..864d7221de84 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1031,6 +1031,8 @@ static inline bool page_mapped(struct page *page) page = compound_head(page); if (atomic_read(compound_mapcount_ptr(page)) >= 0) return true; + if (PageHuge(page)) + return false; for (i = 0; i < hpage_nr_pages(page); i++) { if (atomic_read(&page[i]._mapcount) >= 0) return true; @@ -1138,6 +1140,8 @@ struct zap_details { struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte); +struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, + pmd_t pmd); int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size); diff --git a/include/linux/net.h b/include/linux/net.h index 49175e4ced11..f840d77c6c31 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -246,7 +246,15 @@ do { \ net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__) #define net_info_ratelimited(fmt, ...) \ net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__) -#if defined(DEBUG) +#if defined(CONFIG_DYNAMIC_DEBUG) +#define net_dbg_ratelimited(fmt, ...) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ + net_ratelimit()) \ + __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ +} while (0) +#elif defined(DEBUG) #define net_dbg_ratelimited(fmt, ...) \ net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__) #else diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8395308a2445..b3c46b019ac1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4004,7 +4004,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb); static inline bool net_gso_ok(netdev_features_t features, int gso_type) { - netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; + netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT; /* check flags correspondence */ BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); diff --git a/include/linux/of.h b/include/linux/of.h index 7fcb681baadf..31758036787c 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -133,7 +133,7 @@ void of_core_init(void); static inline bool is_of_node(struct fwnode_handle *fwnode) { - return fwnode && fwnode->type == FWNODE_OF; + return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF; } static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index f4ed4f1b0c77..6b052aa7b5b7 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -517,6 +517,27 @@ static inline int PageTransCompound(struct page *page) } /* + * PageTransCompoundMap is the same as PageTransCompound, but it also + * guarantees the primary MMU has the entire compound page mapped + * through pmd_trans_huge, which in turn guarantees the secondary MMUs + * can also map the entire compound page. This allows the secondary + * MMUs to call get_user_pages() only once for each compound page and + * to immediately map the entire compound page with a single secondary + * MMU fault. If there will be a pmd split later, the secondary MMUs + * will get an update through the MMU notifier invalidation through + * split_huge_pmd(). + * + * Unlike PageTransCompound, this is safe to be called only while + * split_huge_pmd() cannot run from under us, like if protected by the + * MMU notifier, otherwise it may result in page->_mapcount < 0 false + * positives. + */ +static inline int PageTransCompoundMap(struct page *page) +{ + return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0; +} + +/* * PageTransTail returns true for both transparent huge pages * and hugetlbfs pages, so it should only be called when it's known * that hugetlbfs pages aren't involved. @@ -559,6 +580,7 @@ static inline int TestClearPageDoubleMap(struct page *page) #else TESTPAGEFLAG_FALSE(TransHuge) TESTPAGEFLAG_FALSE(TransCompound) +TESTPAGEFLAG_FALSE(TransCompoundMap) TESTPAGEFLAG_FALSE(TransTail) TESTPAGEFLAG_FALSE(DoubleMap) TESTSETFLAG_FALSE(DoubleMap) diff --git a/include/linux/pm.h b/include/linux/pm.h index 6a5d654f4447..06eb353182ab 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h @@ -563,7 +563,6 @@ struct dev_pm_info { bool is_suspended:1; /* Ditto */ bool is_noirq_suspended:1; bool is_late_suspended:1; - bool ignore_children:1; bool early_init:1; /* Owned by the PM core */ bool direct_complete:1; /* Owned by the PM core */ spinlock_t lock; @@ -591,6 +590,7 @@ struct dev_pm_info { unsigned int deferred_resume:1; unsigned int run_wake:1; unsigned int runtime_auto:1; + bool ignore_children:1; unsigned int no_callbacks:1; unsigned int irq_safe:1; unsigned int use_autosuspend:1; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 49cd8890b873..39285c7bd3f5 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -28,14 +28,12 @@ enum gpd_status { struct dev_power_governor { bool (*power_down_ok)(struct dev_pm_domain *domain); - bool (*stop_ok)(struct device *dev); + bool (*suspend_ok)(struct device *dev); }; struct gpd_dev_ops { int (*start)(struct device *dev); int (*stop)(struct device *dev); - int (*save_state)(struct device *dev); - int (*restore_state)(struct device *dev); bool (*active_wakeup)(struct device *dev); }; @@ -94,7 +92,7 @@ struct gpd_timing_data { s64 resume_latency_ns; s64 effective_constraint_ns; bool constraint_changed; - bool cached_stop_ok; + bool cached_suspend_ok; }; struct pm_domain_data { diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index cccaf4a29e9f..bca26157f5b6 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -65,6 +65,10 @@ void dev_pm_opp_put_prop_name(struct device *dev); int dev_pm_opp_set_regulator(struct device *dev, const char *name); void dev_pm_opp_put_regulator(struct device *dev); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); +int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask); +int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); +void dev_pm_opp_remove_table(struct device *dev); +void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask); #else static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) { @@ -109,25 +113,25 @@ static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available) { - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq) { - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOTSUPP); } static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq) { - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOTSUPP); } static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) @@ -147,73 +151,85 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq) static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( struct device *dev) { - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENOTSUPP); } static inline int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, unsigned int count) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_put_prop_name(struct device *dev) {} static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_put_regulator(struct device *dev) {} static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) { + return -ENOTSUPP; +} + +static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask) +{ + return -ENOTSUPP; +} + +static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) +{ return -EINVAL; } +static inline void dev_pm_opp_remove_table(struct device *dev) +{ +} + +static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) +{ +} + #endif /* CONFIG_PM_OPP */ #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) int dev_pm_opp_of_add_table(struct device *dev); void dev_pm_opp_of_remove_table(struct device *dev); -int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask); -void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask); -int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); -int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); +int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask); +void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask); +int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask); #else static inline int dev_pm_opp_of_add_table(struct device *dev) { - return -EINVAL; + return -ENOTSUPP; } static inline void dev_pm_opp_of_remove_table(struct device *dev) { } -static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask) -{ - return -ENOSYS; -} - -static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask) +static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) { + return -ENOTSUPP; } -static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) +static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) { - return -ENOSYS; } -static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) +static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - return -ENOSYS; + return -ENOTSUPP; } #endif diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 7af093d6a4dd..2e14d2667b6c 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -56,6 +56,11 @@ extern void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns); extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); +static inline void pm_suspend_ignore_children(struct device *dev, bool enable) +{ + dev->power.ignore_children = enable; +} + static inline bool pm_children_suspended(struct device *dev) { return dev->power.ignore_children @@ -156,6 +161,7 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {} static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} +static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} static inline bool pm_children_suspended(struct device *dev) { return false; } static inline void pm_runtime_get_noresume(struct device *dev) {} static inline void pm_runtime_put_noidle(struct device *dev) {} diff --git a/include/linux/rio_mport_cdev.h b/include/linux/rio_mport_cdev.h deleted file mode 100644 index b65d19df76d2..000000000000 --- a/include/linux/rio_mport_cdev.h +++ /dev/null @@ -1,271 +0,0 @@ -/* - * Copyright (c) 2015-2016, Integrated Device Technology Inc. - * Copyright (c) 2015, Prodrive Technologies - * Copyright (c) 2015, Texas Instruments Incorporated - * Copyright (c) 2015, RapidIO Trade Association - * All rights reserved. - * - * This software is available to you under a choice of one of two licenses. - * You may choose to be licensed under the terms of the GNU General Public - * License(GPL) Version 2, or the BSD-3 Clause license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3. Neither the name of the copyright holder nor the names of its contributors - * may be used to endorse or promote products derived from this software without - * specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _RIO_MPORT_CDEV_H_ -#define _RIO_MPORT_CDEV_H_ - -#ifndef __user -#define __user -#endif - -struct rio_mport_maint_io { - uint32_t rioid; /* destID of remote device */ - uint32_t hopcount; /* hopcount to remote device */ - uint32_t offset; /* offset in register space */ - size_t length; /* length in bytes */ - void __user *buffer; /* data buffer */ -}; - -/* - * Definitions for RapidIO data transfers: - * - memory mapped (MAPPED) - * - packet generation from memory (TRANSFER) - */ -#define RIO_TRANSFER_MODE_MAPPED (1 << 0) -#define RIO_TRANSFER_MODE_TRANSFER (1 << 1) -#define RIO_CAP_DBL_SEND (1 << 2) -#define RIO_CAP_DBL_RECV (1 << 3) -#define RIO_CAP_PW_SEND (1 << 4) -#define RIO_CAP_PW_RECV (1 << 5) -#define RIO_CAP_MAP_OUTB (1 << 6) -#define RIO_CAP_MAP_INB (1 << 7) - -struct rio_mport_properties { - uint16_t hdid; - uint8_t id; /* Physical port ID */ - uint8_t index; - uint32_t flags; - uint32_t sys_size; /* Default addressing size */ - uint8_t port_ok; - uint8_t link_speed; - uint8_t link_width; - uint32_t dma_max_sge; - uint32_t dma_max_size; - uint32_t dma_align; - uint32_t transfer_mode; /* Default transfer mode */ - uint32_t cap_sys_size; /* Capable system sizes */ - uint32_t cap_addr_size; /* Capable addressing sizes */ - uint32_t cap_transfer_mode; /* Capable transfer modes */ - uint32_t cap_mport; /* Mport capabilities */ -}; - -/* - * Definitions for RapidIO events; - * - incoming port-writes - * - incoming doorbells - */ -#define RIO_DOORBELL (1 << 0) -#define RIO_PORTWRITE (1 << 1) - -struct rio_doorbell { - uint32_t rioid; - uint16_t payload; -}; - -struct rio_doorbell_filter { - uint32_t rioid; /* 0xffffffff to match all ids */ - uint16_t low; - uint16_t high; -}; - - -struct rio_portwrite { - uint32_t payload[16]; -}; - -struct rio_pw_filter { - uint32_t mask; - uint32_t low; - uint32_t high; -}; - -/* RapidIO base address for inbound requests set to value defined below - * indicates that no specific RIO-to-local address translation is requested - * and driver should use direct (one-to-one) address mapping. -*/ -#define RIO_MAP_ANY_ADDR (uint64_t)(~((uint64_t) 0)) - -struct rio_mmap { - uint32_t rioid; - uint64_t rio_addr; - uint64_t length; - uint64_t handle; - void *address; -}; - -struct rio_dma_mem { - uint64_t length; /* length of DMA memory */ - uint64_t dma_handle; /* handle associated with this memory */ - void *buffer; /* pointer to this memory */ -}; - - -struct rio_event { - unsigned int header; /* event type RIO_DOORBELL or RIO_PORTWRITE */ - union { - struct rio_doorbell doorbell; /* header for RIO_DOORBELL */ - struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */ - } u; -}; - -enum rio_transfer_sync { - RIO_TRANSFER_SYNC, /* synchronous transfer */ - RIO_TRANSFER_ASYNC, /* asynchronous transfer */ - RIO_TRANSFER_FAF, /* fire-and-forget transfer */ -}; - -enum rio_transfer_dir { - RIO_TRANSFER_DIR_READ, /* Read operation */ - RIO_TRANSFER_DIR_WRITE, /* Write operation */ -}; - -/* - * RapidIO data exchange transactions are lists of individual transfers. Each - * transfer exchanges data between two RapidIO devices by remote direct memory - * access and has its own completion code. - * - * The RapidIO specification defines four types of data exchange requests: - * NREAD, NWRITE, SWRITE and NWRITE_R. The RapidIO DMA channel interface allows - * to specify the required type of write operation or combination of them when - * only the last data packet requires response. - * - * NREAD: read up to 256 bytes from remote device memory into local memory - * NWRITE: write up to 256 bytes from local memory to remote device memory - * without confirmation - * SWRITE: as NWRITE, but all addresses and payloads must be 64-bit aligned - * NWRITE_R: as NWRITE, but expect acknowledgment from remote device. - * - * The default exchange is chosen from NREAD and any of the WRITE modes as the - * driver sees fit. For write requests the user can explicitly choose between - * any of the write modes for each transaction. - */ -enum rio_exchange { - RIO_EXCHANGE_DEFAULT, /* Default method */ - RIO_EXCHANGE_NWRITE, /* All packets using NWRITE */ - RIO_EXCHANGE_SWRITE, /* All packets using SWRITE */ - RIO_EXCHANGE_NWRITE_R, /* Last packet NWRITE_R, others NWRITE */ - RIO_EXCHANGE_SWRITE_R, /* Last packet NWRITE_R, others SWRITE */ - RIO_EXCHANGE_NWRITE_R_ALL, /* All packets using NWRITE_R */ -}; - -struct rio_transfer_io { - uint32_t rioid; /* Target destID */ - uint64_t rio_addr; /* Address in target's RIO mem space */ - enum rio_exchange method; /* Data exchange method */ - void __user *loc_addr; - uint64_t handle; - uint64_t offset; /* Offset in buffer */ - uint64_t length; /* Length in bytes */ - uint32_t completion_code; /* Completion code for this transfer */ -}; - -struct rio_transaction { - uint32_t transfer_mode; /* Data transfer mode */ - enum rio_transfer_sync sync; /* Synchronization method */ - enum rio_transfer_dir dir; /* Transfer direction */ - size_t count; /* Number of transfers */ - struct rio_transfer_io __user *block; /* Array of <count> transfers */ -}; - -struct rio_async_tx_wait { - uint32_t token; /* DMA transaction ID token */ - uint32_t timeout; /* Wait timeout in msec, if 0 use default TO */ -}; - -#define RIO_MAX_DEVNAME_SZ 20 - -struct rio_rdev_info { - uint32_t destid; - uint8_t hopcount; - uint32_t comptag; - char name[RIO_MAX_DEVNAME_SZ + 1]; -}; - -/* Driver IOCTL codes */ -#define RIO_MPORT_DRV_MAGIC 'm' - -#define RIO_MPORT_MAINT_HDID_SET \ - _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t) -#define RIO_MPORT_MAINT_COMPTAG_SET \ - _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t) -#define RIO_MPORT_MAINT_PORT_IDX_GET \ - _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t) -#define RIO_MPORT_GET_PROPERTIES \ - _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties) -#define RIO_MPORT_MAINT_READ_LOCAL \ - _IOR(RIO_MPORT_DRV_MAGIC, 5, struct rio_mport_maint_io) -#define RIO_MPORT_MAINT_WRITE_LOCAL \ - _IOW(RIO_MPORT_DRV_MAGIC, 6, struct rio_mport_maint_io) -#define RIO_MPORT_MAINT_READ_REMOTE \ - _IOR(RIO_MPORT_DRV_MAGIC, 7, struct rio_mport_maint_io) -#define RIO_MPORT_MAINT_WRITE_REMOTE \ - _IOW(RIO_MPORT_DRV_MAGIC, 8, struct rio_mport_maint_io) -#define RIO_ENABLE_DOORBELL_RANGE \ - _IOW(RIO_MPORT_DRV_MAGIC, 9, struct rio_doorbell_filter) -#define RIO_DISABLE_DOORBELL_RANGE \ - _IOW(RIO_MPORT_DRV_MAGIC, 10, struct rio_doorbell_filter) -#define RIO_ENABLE_PORTWRITE_RANGE \ - _IOW(RIO_MPORT_DRV_MAGIC, 11, struct rio_pw_filter) -#define RIO_DISABLE_PORTWRITE_RANGE \ - _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter) -#define RIO_SET_EVENT_MASK \ - _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int) -#define RIO_GET_EVENT_MASK \ - _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int) -#define RIO_MAP_OUTBOUND \ - _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap) -#define RIO_UNMAP_OUTBOUND \ - _IOW(RIO_MPORT_DRV_MAGIC, 16, struct rio_mmap) -#define RIO_MAP_INBOUND \ - _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap) -#define RIO_UNMAP_INBOUND \ - _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t) -#define RIO_ALLOC_DMA \ - _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem) -#define RIO_FREE_DMA \ - _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t) -#define RIO_TRANSFER \ - _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction) -#define RIO_WAIT_FOR_ASYNC \ - _IOW(RIO_MPORT_DRV_MAGIC, 22, struct rio_async_tx_wait) -#define RIO_DEV_ADD \ - _IOW(RIO_MPORT_DRV_MAGIC, 23, struct rio_rdev_info) -#define RIO_DEV_DEL \ - _IOW(RIO_MPORT_DRV_MAGIC, 24, struct rio_rdev_info) - -#endif /* _RIO_MPORT_CDEV_H_ */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 52c4847b05e2..8344e1947eec 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -3240,7 +3240,10 @@ struct update_util_data { u64 time, unsigned long util, unsigned long max); }; -void cpufreq_set_update_util_data(int cpu, struct update_util_data *data); +void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, + void (*func)(struct update_util_data *data, u64 time, + unsigned long util, unsigned long max)); +void cpufreq_remove_update_util_hook(int cpu); #endif /* CONFIG_CPU_FREQ */ #endif diff --git a/include/linux/swap.h b/include/linux/swap.h index 2b83359c19ca..0a4cd4703f40 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void) #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { + /* Cgroup2 doesn't have per-cgroup swappiness */ + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + return vm_swappiness; + /* root ? */ if (mem_cgroup_disabled() || !memcg->css.parent) return vm_swappiness; diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 161052477f77..b742b5e47cc2 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -7,7 +7,7 @@ * defined; unless noted otherwise, they are optional, and can be * filled in with a null pointer. * - * struct tty_struct * (*lookup)(struct tty_driver *self, int idx) + * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx) * * Return the tty device corresponding to idx, NULL if there is not * one currently in use and an ERR_PTR value on error. Called under @@ -250,7 +250,7 @@ struct serial_icounter_struct; struct tty_operations { struct tty_struct * (*lookup)(struct tty_driver *driver, - struct inode *inode, int idx); + struct file *filp, int idx); int (*install)(struct tty_driver *driver, struct tty_struct *tty); void (*remove)(struct tty_driver *driver, struct tty_struct *tty); int (*open)(struct tty_struct * tty, struct file * filp); |