diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 6 | ||||
-rw-r--r-- | mm/hugetlb.c | 6 | ||||
-rw-r--r-- | mm/page-writeback.c | 10 | ||||
-rw-r--r-- | mm/page_alloc.c | 14 | ||||
-rw-r--r-- | mm/util.c | 6 | ||||
-rw-r--r-- | mm/vmstat.c | 4 |
6 files changed, 23 insertions, 23 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 6cb901b63482..eb95e9b435d0 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2962,7 +2962,7 @@ static int compact_nodes(void) return 0; } -static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, +static int compaction_proactiveness_sysctl_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc, nid; @@ -2992,7 +2992,7 @@ static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int * This is the entry point for compacting all nodes via * /proc/sys/vm/compact_memory */ -static int sysctl_compaction_handler(struct ctl_table *table, int write, +static int sysctl_compaction_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int ret; @@ -3303,7 +3303,7 @@ static int kcompactd_cpu_online(unsigned int cpu) return 0; } -static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table, +static int proc_dointvec_minmax_warn_RT_change(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret, old; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0858a1827207..aaf508be0a2b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4925,7 +4925,7 @@ out: return ret; } -static int hugetlb_sysctl_handler(struct ctl_table *table, int write, +static int hugetlb_sysctl_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { @@ -4934,7 +4934,7 @@ static int hugetlb_sysctl_handler(struct ctl_table *table, int write, } #ifdef CONFIG_NUMA -static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, +static int hugetlb_mempolicy_sysctl_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { return hugetlb_sysctl_handler_common(true, table, write, @@ -4942,7 +4942,7 @@ static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write, } #endif /* CONFIG_NUMA */ -static int hugetlb_overcommit_handler(struct ctl_table *table, int write, +static int hugetlb_overcommit_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { struct hstate *h = &default_hstate; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index acff24e9fae4..4430ac68e4c4 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -506,7 +506,7 @@ bool node_dirty_ok(struct pglist_data *pgdat) } #ifdef CONFIG_SYSCTL -static int dirty_background_ratio_handler(struct ctl_table *table, int write, +static int dirty_background_ratio_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; @@ -517,7 +517,7 @@ static int dirty_background_ratio_handler(struct ctl_table *table, int write, return ret; } -static int dirty_background_bytes_handler(struct ctl_table *table, int write, +static int dirty_background_bytes_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; @@ -535,7 +535,7 @@ static int dirty_background_bytes_handler(struct ctl_table *table, int write, return ret; } -static int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer, +static int dirty_ratio_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int old_ratio = vm_dirty_ratio; @@ -549,7 +549,7 @@ static int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer, return ret; } -static int dirty_bytes_handler(struct ctl_table *table, int write, +static int dirty_bytes_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { unsigned long old_bytes = vm_dirty_bytes; @@ -2203,7 +2203,7 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) /* * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs */ -static int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, +static int dirty_writeback_centisecs_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { unsigned int old_interval = dirty_writeback_interval; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3398d914ed83..8337926b89d4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5127,7 +5127,7 @@ static char numa_zonelist_order[] = "Node"; /* * sysctl handler for numa_zonelist_order */ -static int numa_zonelist_order_handler(struct ctl_table *table, int write, +static int numa_zonelist_order_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { if (write) @@ -6091,7 +6091,7 @@ postcore_initcall(init_per_zone_wmark_min) * that we can call two helper functions whenever min_free_kbytes * changes. */ -static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, +static int min_free_kbytes_sysctl_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; @@ -6107,7 +6107,7 @@ static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, return 0; } -static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, +static int watermark_scale_factor_sysctl_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; @@ -6137,7 +6137,7 @@ static void setup_min_unmapped_ratio(void) } -static int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, +static int sysctl_min_unmapped_ratio_sysctl_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; @@ -6164,7 +6164,7 @@ static void setup_min_slab_ratio(void) sysctl_min_slab_ratio) / 100; } -static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, +static int sysctl_min_slab_ratio_sysctl_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; @@ -6188,7 +6188,7 @@ static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int wri * minimum watermarks. The lowmem reserve ratio can only make sense * if in function of the boot time zone sizes. */ -static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, +static int lowmem_reserve_ratio_sysctl_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int i; @@ -6209,7 +6209,7 @@ static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, * cpu. It is the fraction of total pages in each zone that a hot per cpu * pagelist can have before it gets flushed back to buddy allocator. */ -static int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, +static int percpu_pagelist_high_fraction_sysctl_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { struct zone *zone; diff --git a/mm/util.c b/mm/util.c index bc488f0121a7..bd283e2132e0 100644 --- a/mm/util.c +++ b/mm/util.c @@ -868,7 +868,7 @@ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ -int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer, +int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; @@ -884,7 +884,7 @@ static void sync_overcommit_as(struct work_struct *dummy) percpu_counter_sync(&vm_committed_as); } -int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, +int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; @@ -920,7 +920,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, return ret; } -int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer, +int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; diff --git a/mm/vmstat.c b/mm/vmstat.c index 73d791d1caad..04a1cb6cc636 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -74,7 +74,7 @@ static void invalid_numa_statistics(void) static DEFINE_MUTEX(vm_numa_stat_lock); -int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write, +int sysctl_vm_numa_stat_handler(const struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int ret, oldval; @@ -1888,7 +1888,7 @@ static void refresh_vm_stats(struct work_struct *work) refresh_cpu_vm_stats(true); } -int vmstat_refresh(struct ctl_table *table, int write, +int vmstat_refresh(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { long val; |