diff options
| -rw-r--r-- | arch/sh/kernel/cpu/shmobile/cpuidle.c | 8 | ||||
| -rw-r--r-- | drivers/acpi/ec.c | 36 | ||||
| -rw-r--r-- | drivers/acpi/sleep.c | 26 | ||||
| -rw-r--r-- | drivers/base/Kconfig | 4 | ||||
| -rw-r--r-- | drivers/base/power/Makefile | 1 | ||||
| -rw-r--r-- | drivers/base/power/qos-test.c | 117 | ||||
| -rw-r--r-- | drivers/base/power/qos.c | 73 | ||||
| -rw-r--r-- | drivers/cpufreq/Kconfig.powerpc | 8 | ||||
| -rw-r--r-- | drivers/cpufreq/Kconfig.x86 | 16 | ||||
| -rw-r--r-- | drivers/cpuidle/Kconfig | 16 | ||||
| -rw-r--r-- | drivers/cpuidle/Kconfig.arm | 22 | ||||
| -rw-r--r-- | drivers/cpuidle/cpuidle.c | 2 | ||||
| -rw-r--r-- | drivers/cpuidle/poll_state.c | 1 | ||||
| -rw-r--r-- | drivers/devfreq/devfreq.c | 4 | ||||
| -rw-r--r-- | drivers/idle/intel_idle.c | 6 | ||||
| -rw-r--r-- | drivers/power/avs/Kconfig | 12 | ||||
| -rw-r--r-- | include/linux/cpuidle.h | 2 | ||||
| -rw-r--r-- | include/linux/pm_qos.h | 86 | ||||
| -rw-r--r-- | kernel/power/qos.c | 4 | 
19 files changed, 324 insertions, 120 deletions
| diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c index dbd2cdec2ddb..b0f9c8f8fd14 100644 --- a/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c @@ -67,7 +67,7 @@ static struct cpuidle_driver cpuidle_driver = {  			.enter = cpuidle_sleep_enter,  			.name = "C2",  			.desc = "SuperH Sleep Mode [SF]", -			.disabled = true, +			.flags = CPUIDLE_FLAG_UNUSABLE,  		},  		{  			.exit_latency = 2300, @@ -76,7 +76,7 @@ static struct cpuidle_driver cpuidle_driver = {  			.enter = cpuidle_sleep_enter,  			.name = "C3",  			.desc = "SuperH Mobile Standby Mode [SF]", -			.disabled = true, +			.flags = CPUIDLE_FLAG_UNUSABLE,  		},  	},  	.safe_state_index = 0, @@ -86,10 +86,10 @@ static struct cpuidle_driver cpuidle_driver = {  int __init sh_mobile_setup_cpuidle(void)  {  	if (sh_mobile_sleep_supported & SUSP_SH_SF) -		cpuidle_driver.states[1].disabled = false; +		cpuidle_driver.states[1].flags = CPUIDLE_FLAG_NONE;  	if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) -		cpuidle_driver.states[2].disabled = false; +		cpuidle_driver.states[2].flags = CPUIDLE_FLAG_NONE;  	return cpuidle_register(&cpuidle_driver, NULL);  } diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 4fd84fbdac29..d05be13c1022 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -533,26 +533,10 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)  }  #ifdef CONFIG_PM_SLEEP -static bool acpi_ec_query_flushed(struct acpi_ec *ec) +static void __acpi_ec_flush_work(void)  { -	bool flushed; -	unsigned long flags; - -	spin_lock_irqsave(&ec->lock, flags); -	flushed = !ec->nr_pending_queries; -	spin_unlock_irqrestore(&ec->lock, flags); -	return flushed; -} - -static void __acpi_ec_flush_event(struct acpi_ec *ec) -{ -	/* -	 * When ec_freeze_events is true, we need to flush events in -	 * the proper position before entering the noirq stage. -	 */ -	wait_event(ec->wait, acpi_ec_query_flushed(ec)); -	if (ec_query_wq) -		flush_workqueue(ec_query_wq); +	flush_scheduled_work(); /* flush ec->work */ +	flush_workqueue(ec_query_wq); /* flush queries */  }  static void acpi_ec_disable_event(struct acpi_ec *ec) @@ -562,15 +546,21 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)  	spin_lock_irqsave(&ec->lock, flags);  	__acpi_ec_disable_event(ec);  	spin_unlock_irqrestore(&ec->lock, flags); -	__acpi_ec_flush_event(ec); + +	/* +	 * When ec_freeze_events is true, we need to flush events in +	 * the proper position before entering the noirq stage. +	 */ +	__acpi_ec_flush_work();  }  void acpi_ec_flush_work(void)  { -	if (first_ec) -		__acpi_ec_flush_event(first_ec); +	/* Without ec_query_wq there is nothing to flush. */ +	if (!ec_query_wq) +		return; -	flush_scheduled_work(); +	__acpi_ec_flush_work();  }  #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 2af937a8b1c5..6747a279621b 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -977,6 +977,16 @@ static int acpi_s2idle_prepare_late(void)  	return 0;  } +static void acpi_s2idle_sync(void) +{ +	/* +	 * The EC driver uses the system workqueue and an additional special +	 * one, so those need to be flushed too. +	 */ +	acpi_ec_flush_work(); +	acpi_os_wait_events_complete(); /* synchronize Notify handling */ +} +  static void acpi_s2idle_wake(void)  {  	/* @@ -1001,13 +1011,8 @@ static void acpi_s2idle_wake(void)  		 * should be missed by canceling the wakeup here.  		 */  		pm_system_cancel_wakeup(); -		/* -		 * The EC driver uses the system workqueue and an additional -		 * special one, so those need to be flushed too. -		 */ -		acpi_os_wait_events_complete(); /* synchronize EC GPE processing */ -		acpi_ec_flush_work(); -		acpi_os_wait_events_complete(); /* synchronize Notify handling */ + +		acpi_s2idle_sync();  		rearm_wake_irq(acpi_sci_irq);  	} @@ -1024,6 +1029,13 @@ static void acpi_s2idle_restore_early(void)  static void acpi_s2idle_restore(void)  { +	/* +	 * Drain pending events before restoring the working-state configuration +	 * of GPEs. +	 */ +	acpi_os_wait_events_complete(); /* synchronize GPE processing */ +	acpi_s2idle_sync(); +  	s2idle_wakeup = false;  	acpi_enable_all_runtime_gpes(); diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 28b92e3cc570..c3b3b5c0b0da 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -148,6 +148,10 @@ config DEBUG_TEST_DRIVER_REMOVE  	  unusable. You should say N here unless you are explicitly looking to  	  test this functionality. +config PM_QOS_KUNIT_TEST +	bool "KUnit Test for PM QoS features" +	depends on KUNIT +  config HMEM_REPORTING  	bool  	default n diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index ec5bb190b9d0..8fdd0073eeeb 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -4,5 +4,6 @@ obj-$(CONFIG_PM_SLEEP)	+= main.o wakeup.o wakeup_stats.o  obj-$(CONFIG_PM_TRACE_RTC)	+= trace.o  obj-$(CONFIG_PM_GENERIC_DOMAINS)	+=  domain.o domain_governor.o  obj-$(CONFIG_HAVE_CLK)	+= clock_ops.o +obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o  ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/power/qos-test.c b/drivers/base/power/qos-test.c new file mode 100644 index 000000000000..3115db08d56b --- /dev/null +++ b/drivers/base/power/qos-test.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2019 NXP + */ +#include <kunit/test.h> +#include <linux/pm_qos.h> + +/* Basic test for aggregating two "min" requests */ +static void freq_qos_test_min(struct kunit *test) +{ +	struct freq_constraints	qos; +	struct freq_qos_request	req1, req2; +	int ret; + +	freq_constraints_init(&qos); +	memset(&req1, 0, sizeof(req1)); +	memset(&req2, 0, sizeof(req2)); + +	ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MIN, 1000); +	KUNIT_EXPECT_EQ(test, ret, 1); +	ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MIN, 2000); +	KUNIT_EXPECT_EQ(test, ret, 1); + +	KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000); + +	ret = freq_qos_remove_request(&req2); +	KUNIT_EXPECT_EQ(test, ret, 1); +	KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000); + +	ret = freq_qos_remove_request(&req1); +	KUNIT_EXPECT_EQ(test, ret, 1); +	KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), +			FREQ_QOS_MIN_DEFAULT_VALUE); +} + +/* Test that requests for MAX_DEFAULT_VALUE have no effect */ +static void freq_qos_test_maxdef(struct kunit *test) +{ +	struct freq_constraints	qos; +	struct freq_qos_request	req1, req2; +	int ret; + +	freq_constraints_init(&qos); +	memset(&req1, 0, sizeof(req1)); +	memset(&req2, 0, sizeof(req2)); +	KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), +			FREQ_QOS_MAX_DEFAULT_VALUE); + +	ret = freq_qos_add_request(&qos, &req1, FREQ_QOS_MAX, +			FREQ_QOS_MAX_DEFAULT_VALUE); +	KUNIT_EXPECT_EQ(test, ret, 0); +	ret = freq_qos_add_request(&qos, &req2, FREQ_QOS_MAX, +			FREQ_QOS_MAX_DEFAULT_VALUE); +	KUNIT_EXPECT_EQ(test, ret, 0); + +	/* Add max 1000 */ +	ret = freq_qos_update_request(&req1, 1000); +	KUNIT_EXPECT_EQ(test, ret, 1); +	KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000); + +	/* Add max 2000, no impact */ +	ret = freq_qos_update_request(&req2, 2000); +	KUNIT_EXPECT_EQ(test, ret, 0); +	KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 1000); + +	/* Remove max 1000, new max 2000 */ +	ret = freq_qos_remove_request(&req1); +	KUNIT_EXPECT_EQ(test, ret, 1); +	KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MAX), 2000); +} + +/* + * Test that a freq_qos_request can be added again after removal + * + * This issue was solved by commit 05ff1ba412fd ("PM: QoS: Invalidate frequency + * QoS requests after removal") + */ +static void freq_qos_test_readd(struct kunit *test) +{ +	struct freq_constraints	qos; +	struct freq_qos_request	req; +	int ret; + +	freq_constraints_init(&qos); +	memset(&req, 0, sizeof(req)); +	KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), +			FREQ_QOS_MIN_DEFAULT_VALUE); + +	/* Add */ +	ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 1000); +	KUNIT_EXPECT_EQ(test, ret, 1); +	KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 1000); + +	/* Remove */ +	ret = freq_qos_remove_request(&req); +	KUNIT_EXPECT_EQ(test, ret, 1); +	KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), +			FREQ_QOS_MIN_DEFAULT_VALUE); + +	/* Add again */ +	ret = freq_qos_add_request(&qos, &req, FREQ_QOS_MIN, 2000); +	KUNIT_EXPECT_EQ(test, ret, 1); +	KUNIT_EXPECT_EQ(test, freq_qos_read_value(&qos, FREQ_QOS_MIN), 2000); +} + +static struct kunit_case pm_qos_test_cases[] = { +	KUNIT_CASE(freq_qos_test_min), +	KUNIT_CASE(freq_qos_test_maxdef), +	KUNIT_CASE(freq_qos_test_readd), +	{}, +}; + +static struct kunit_suite pm_qos_test_module = { +	.name = "qos-kunit-test", +	.test_cases = pm_qos_test_cases, +}; +kunit_test_suite(pm_qos_test_module); diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 350dcafd751f..8e93167f1783 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -115,10 +115,20 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)  	spin_lock_irqsave(&dev->power.lock, flags); -	if (type == DEV_PM_QOS_RESUME_LATENCY) { +	switch (type) { +	case DEV_PM_QOS_RESUME_LATENCY:  		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT  			: pm_qos_read_value(&qos->resume_latency); -	} else { +		break; +	case DEV_PM_QOS_MIN_FREQUENCY: +		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE +			: freq_qos_read_value(&qos->freq, FREQ_QOS_MIN); +		break; +	case DEV_PM_QOS_MAX_FREQUENCY: +		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE +			: freq_qos_read_value(&qos->freq, FREQ_QOS_MAX); +		break; +	default:  		WARN_ON(1);  		ret = 0;  	} @@ -159,6 +169,10 @@ static int apply_constraint(struct dev_pm_qos_request *req,  			req->dev->power.set_latency_tolerance(req->dev, value);  		}  		break; +	case DEV_PM_QOS_MIN_FREQUENCY: +	case DEV_PM_QOS_MAX_FREQUENCY: +		ret = freq_qos_apply(&req->data.freq, action, value); +		break;  	case DEV_PM_QOS_FLAGS:  		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,  					  action, value); @@ -209,6 +223,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)  	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;  	c->type = PM_QOS_MIN; +	freq_constraints_init(&qos->freq); +  	INIT_LIST_HEAD(&qos->flags.list);  	spin_lock_irq(&dev->power.lock); @@ -269,6 +285,20 @@ void dev_pm_qos_constraints_destroy(struct device *dev)  		memset(req, 0, sizeof(*req));  	} +	c = &qos->freq.min_freq; +	plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) { +		apply_constraint(req, PM_QOS_REMOVE_REQ, +				 PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE); +		memset(req, 0, sizeof(*req)); +	} + +	c = &qos->freq.max_freq; +	plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) { +		apply_constraint(req, PM_QOS_REMOVE_REQ, +				 PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); +		memset(req, 0, sizeof(*req)); +	} +  	f = &qos->flags;  	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {  		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); @@ -314,11 +344,22 @@ static int __dev_pm_qos_add_request(struct device *dev,  		ret = dev_pm_qos_constraints_allocate(dev);  	trace_dev_pm_qos_add_request(dev_name(dev), type, value); -	if (!ret) { -		req->dev = dev; -		req->type = type; +	if (ret) +		return ret; + +	req->dev = dev; +	req->type = type; +	if (req->type == DEV_PM_QOS_MIN_FREQUENCY) +		ret = freq_qos_add_request(&dev->power.qos->freq, +					   &req->data.freq, +					   FREQ_QOS_MIN, value); +	else if (req->type == DEV_PM_QOS_MAX_FREQUENCY) +		ret = freq_qos_add_request(&dev->power.qos->freq, +					   &req->data.freq, +					   FREQ_QOS_MAX, value); +	else  		ret = apply_constraint(req, PM_QOS_ADD_REQ, value); -	} +  	return ret;  } @@ -382,6 +423,10 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,  	case DEV_PM_QOS_LATENCY_TOLERANCE:  		curr_value = req->data.pnode.prio;  		break; +	case DEV_PM_QOS_MIN_FREQUENCY: +	case DEV_PM_QOS_MAX_FREQUENCY: +		curr_value = req->data.freq.pnode.prio; +		break;  	case DEV_PM_QOS_FLAGS:  		curr_value = req->data.flr.flags;  		break; @@ -507,6 +552,14 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,  		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,  						       notifier);  		break; +	case DEV_PM_QOS_MIN_FREQUENCY: +		ret = freq_qos_add_notifier(&dev->power.qos->freq, +					    FREQ_QOS_MIN, notifier); +		break; +	case DEV_PM_QOS_MAX_FREQUENCY: +		ret = freq_qos_add_notifier(&dev->power.qos->freq, +					    FREQ_QOS_MAX, notifier); +		break;  	default:  		WARN_ON(1);  		ret = -EINVAL; @@ -546,6 +599,14 @@ int dev_pm_qos_remove_notifier(struct device *dev,  		ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,  							 notifier);  		break; +	case DEV_PM_QOS_MIN_FREQUENCY: +		ret = freq_qos_remove_notifier(&dev->power.qos->freq, +					       FREQ_QOS_MIN, notifier); +		break; +	case DEV_PM_QOS_MAX_FREQUENCY: +		ret = freq_qos_remove_notifier(&dev->power.qos->freq, +					       FREQ_QOS_MAX, notifier); +		break;  	default:  		WARN_ON(1);  		ret = -EINVAL; diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc index 35b4f700f054..58151ca56695 100644 --- a/drivers/cpufreq/Kconfig.powerpc +++ b/drivers/cpufreq/Kconfig.powerpc @@ -48,9 +48,9 @@ config PPC_PASEMI_CPUFREQ  	  PWRficient processors.  config POWERNV_CPUFREQ -       tristate "CPU frequency scaling for IBM POWERNV platform" -       depends on PPC_POWERNV -       default y -       help +	tristate "CPU frequency scaling for IBM POWERNV platform" +	depends on PPC_POWERNV +	default y +	help  	 This adds support for CPU frequency switching on IBM POWERNV  	 platform diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index dfa6457deaf6..a6528388952e 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -4,17 +4,17 @@  #  config X86_INTEL_PSTATE -       bool "Intel P state control" -       depends on X86 -       select ACPI_PROCESSOR if ACPI -       select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO -       help -          This driver provides a P state for Intel core processors. +	bool "Intel P state control" +	depends on X86 +	select ACPI_PROCESSOR if ACPI +	select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO +	help +	  This driver provides a P state for Intel core processors.  	  The driver implements an internal governor and will become -          the scaling driver and governor for Sandy bridge processors. +	  the scaling driver and governor for Sandy bridge processors.  	  When this driver is enabled it will become the preferred -          scaling driver for Sandy bridge processors. +	  scaling driver for Sandy bridge processors.  	  If in doubt, say N. diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index 88727b7c0d59..c0aeedd66f02 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig @@ -16,7 +16,7 @@ config CPU_IDLE  if CPU_IDLE  config CPU_IDLE_MULTIPLE_DRIVERS -        bool +	bool  config CPU_IDLE_GOV_LADDER  	bool "Ladder governor (for periodic timer tick)" @@ -63,13 +63,13 @@ source "drivers/cpuidle/Kconfig.powerpc"  endmenu  config HALTPOLL_CPUIDLE -       tristate "Halt poll cpuidle driver" -       depends on X86 && KVM_GUEST -       default y -       help -         This option enables halt poll cpuidle driver, which allows to poll -         before halting in the guest (more efficient than polling in the -         host via halt_poll_ns for some scenarios). +	tristate "Halt poll cpuidle driver" +	depends on X86 && KVM_GUEST +	default y +	help +	 This option enables halt poll cpuidle driver, which allows to poll +	 before halting in the guest (more efficient than polling in the +	 host via halt_poll_ns for some scenarios).  endif diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index d8530475493c..a224d33dda7f 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -3,15 +3,15 @@  # ARM CPU Idle drivers  #  config ARM_CPUIDLE -        bool "Generic ARM/ARM64 CPU idle Driver" -        select DT_IDLE_STATES +	bool "Generic ARM/ARM64 CPU idle Driver" +	select DT_IDLE_STATES  	select CPU_IDLE_MULTIPLE_DRIVERS -        help -          Select this to enable generic cpuidle driver for ARM. -          It provides a generic idle driver whose idle states are configured -          at run-time through DT nodes. The CPUidle suspend backend is -          initialized by calling the CPU operations init idle hook -          provided by architecture code. +	help +	  Select this to enable generic cpuidle driver for ARM. +	  It provides a generic idle driver whose idle states are configured +	  at run-time through DT nodes. The CPUidle suspend backend is +	  initialized by calling the CPU operations init idle hook +	  provided by architecture code.  config ARM_PSCI_CPUIDLE  	bool "PSCI CPU idle Driver" @@ -65,21 +65,21 @@ config ARM_U8500_CPUIDLE  	bool "Cpu Idle Driver for the ST-E u8500 processors"  	depends on ARCH_U8500 && !ARM64  	help -	  Select this to enable cpuidle for ST-E u8500 processors +	  Select this to enable cpuidle for ST-E u8500 processors.  config ARM_AT91_CPUIDLE  	bool "Cpu Idle Driver for the AT91 processors"  	default y  	depends on ARCH_AT91 && !ARM64  	help -	  Select this to enable cpuidle for AT91 processors +	  Select this to enable cpuidle for AT91 processors.  config ARM_EXYNOS_CPUIDLE  	bool "Cpu Idle Driver for the Exynos processors"  	depends on ARCH_EXYNOS && !ARM64  	select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP  	help -	  Select this to enable cpuidle for Exynos processors +	  Select this to enable cpuidle for Exynos processors.  config ARM_MVEBU_V7_CPUIDLE  	bool "CPU Idle Driver for mvebu v7 family processors" diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 569dbac443bd..0005be5ea2b4 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -572,7 +572,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)  		return -EINVAL;  	for (i = 0; i < drv->state_count; i++) -		if (drv->states[i].disabled) +		if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE)  			dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;  	per_cpu(cpuidle_devices, dev->cpu) = dev; diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c index 9f1ace9c53da..f7e83613ae94 100644 --- a/drivers/cpuidle/poll_state.c +++ b/drivers/cpuidle/poll_state.c @@ -53,7 +53,6 @@ void cpuidle_poll_state_init(struct cpuidle_driver *drv)  	state->target_residency_ns = 0;  	state->power_usage = -1;  	state->enter = poll_idle; -	state->disabled = false;  	state->flags = CPUIDLE_FLAG_POLLING;  }  EXPORT_SYMBOL_GPL(cpuidle_poll_state_init); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index f840e61e5a27..425149e8bab0 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -921,7 +921,9 @@ int devfreq_suspend_device(struct devfreq *devfreq)  	}  	if (devfreq->suspend_freq) { +		mutex_lock(&devfreq->lock);  		ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0); +		mutex_unlock(&devfreq->lock);  		if (ret)  			return ret;  	} @@ -949,7 +951,9 @@ int devfreq_resume_device(struct devfreq *devfreq)  		return 0;  	if (devfreq->resume_freq) { +		mutex_lock(&devfreq->lock);  		ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0); +		mutex_unlock(&devfreq->lock);  		if (ret)  			return ret;  	} diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 347b08b56042..75fd2a7b0842 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1291,8 +1291,8 @@ static void sklh_idle_state_table_update(void)  			return;  	} -	skl_cstates[5].disabled = 1;	/* C8-SKL */ -	skl_cstates[6].disabled = 1;	/* C9-SKL */ +	skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE;	/* C8-SKL */ +	skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE;	/* C9-SKL */  }  /*   * intel_idle_state_table_update() @@ -1355,7 +1355,7 @@ static void __init intel_idle_cpuidle_driver_init(void)  			continue;  		/* if state marked as disabled, skip it */ -		if (cpuidle_state_table[cstate].disabled != 0) { +		if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_UNUSABLE) {  			pr_debug("state %s is disabled\n",  				 cpuidle_state_table[cstate].name);  			continue; diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig index b5a217b828dc..089b6244b716 100644 --- a/drivers/power/avs/Kconfig +++ b/drivers/power/avs/Kconfig @@ -13,9 +13,9 @@ menuconfig POWER_AVS  	  Say Y here to enable Adaptive Voltage Scaling class support.  config ROCKCHIP_IODOMAIN -        tristate "Rockchip IO domain support" -        depends on POWER_AVS && ARCH_ROCKCHIP && OF -        help -          Say y here to enable support io domains on Rockchip SoCs. It is -          necessary for the io domain setting of the SoC to match the -          voltage supplied by the regulators. +	tristate "Rockchip IO domain support" +	depends on POWER_AVS && ARCH_ROCKCHIP && OF +	help +	  Say y here to enable support io domains on Rockchip SoCs. It is +	  necessary for the io domain setting of the SoC to match the +	  voltage supplied by the regulators. diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 2dbe46b7c213..1dabe36bd011 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -54,7 +54,6 @@ struct cpuidle_state {  	unsigned int	exit_latency; /* in US */  	int		power_usage; /* in mW */  	unsigned int	target_residency; /* in US */ -	bool		disabled; /* disabled on all CPUs */  	int (*enter)	(struct cpuidle_device *dev,  			struct cpuidle_driver *drv, @@ -77,6 +76,7 @@ struct cpuidle_state {  #define CPUIDLE_FLAG_POLLING	BIT(0) /* polling state */  #define CPUIDLE_FLAG_COUPLED	BIT(1) /* state applies to multiple cpus */  #define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ +#define CPUIDLE_FLAG_UNUSABLE	BIT(3) /* avoid using this state */  struct cpuidle_device_kobj;  struct cpuidle_state_kobj; diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index ebf5ef17cc2a..19eafca5680e 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -34,6 +34,8 @@ enum pm_qos_flags_status {  #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT	PM_QOS_LATENCY_ANY  #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS	PM_QOS_LATENCY_ANY_NS  #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE	0 +#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE	0 +#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE	FREQ_QOS_MAX_DEFAULT_VALUE  #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT	(-1)  #define PM_QOS_FLAG_NO_POWER_OFF	(1 << 0) @@ -49,21 +51,6 @@ struct pm_qos_flags_request {  	s32 flags;	/* Do not change to 64 bit */  }; -enum dev_pm_qos_req_type { -	DEV_PM_QOS_RESUME_LATENCY = 1, -	DEV_PM_QOS_LATENCY_TOLERANCE, -	DEV_PM_QOS_FLAGS, -}; - -struct dev_pm_qos_request { -	enum dev_pm_qos_req_type type; -	union { -		struct plist_node pnode; -		struct pm_qos_flags_request flr; -	} data; -	struct device *dev; -}; -  enum pm_qos_type {  	PM_QOS_UNITIALIZED,  	PM_QOS_MAX,		/* return the largest value */ @@ -90,9 +77,51 @@ struct pm_qos_flags {  	s32 effective_flags;	/* Do not change to 64 bit */  }; + +#define FREQ_QOS_MIN_DEFAULT_VALUE	0 +#define FREQ_QOS_MAX_DEFAULT_VALUE	S32_MAX + +enum freq_qos_req_type { +	FREQ_QOS_MIN = 1, +	FREQ_QOS_MAX, +}; + +struct freq_constraints { +	struct pm_qos_constraints min_freq; +	struct blocking_notifier_head min_freq_notifiers; +	struct pm_qos_constraints max_freq; +	struct blocking_notifier_head max_freq_notifiers; +}; + +struct freq_qos_request { +	enum freq_qos_req_type type; +	struct plist_node pnode; +	struct freq_constraints *qos; +}; + + +enum dev_pm_qos_req_type { +	DEV_PM_QOS_RESUME_LATENCY = 1, +	DEV_PM_QOS_LATENCY_TOLERANCE, +	DEV_PM_QOS_MIN_FREQUENCY, +	DEV_PM_QOS_MAX_FREQUENCY, +	DEV_PM_QOS_FLAGS, +}; + +struct dev_pm_qos_request { +	enum dev_pm_qos_req_type type; +	union { +		struct plist_node pnode; +		struct pm_qos_flags_request flr; +		struct freq_qos_request freq; +	} data; +	struct device *dev; +}; +  struct dev_pm_qos {  	struct pm_qos_constraints resume_latency;  	struct pm_qos_constraints latency_tolerance; +	struct freq_constraints freq;  	struct pm_qos_flags flags;  	struct dev_pm_qos_request *resume_latency_req;  	struct dev_pm_qos_request *latency_tolerance_req; @@ -191,6 +220,10 @@ static inline s32 dev_pm_qos_read_value(struct device *dev,  	switch (type) {  	case DEV_PM_QOS_RESUME_LATENCY:  		return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; +	case DEV_PM_QOS_MIN_FREQUENCY: +		return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE; +	case DEV_PM_QOS_MAX_FREQUENCY: +		return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;  	default:  		WARN_ON(1);  		return 0; @@ -255,27 +288,6 @@ static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)  }  #endif -#define FREQ_QOS_MIN_DEFAULT_VALUE	0 -#define FREQ_QOS_MAX_DEFAULT_VALUE	(-1) - -enum freq_qos_req_type { -	FREQ_QOS_MIN = 1, -	FREQ_QOS_MAX, -}; - -struct freq_constraints { -	struct pm_qos_constraints min_freq; -	struct blocking_notifier_head min_freq_notifiers; -	struct pm_qos_constraints max_freq; -	struct blocking_notifier_head max_freq_notifiers; -}; - -struct freq_qos_request { -	enum freq_qos_req_type type; -	struct plist_node pnode; -	struct freq_constraints *qos; -}; -  static inline int freq_qos_request_active(struct freq_qos_request *req)  {  	return !IS_ERR_OR_NULL(req->qos); @@ -291,6 +303,8 @@ int freq_qos_add_request(struct freq_constraints *qos,  			 enum freq_qos_req_type type, s32 value);  int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);  int freq_qos_remove_request(struct freq_qos_request *req); +int freq_qos_apply(struct freq_qos_request *req, +		   enum pm_qos_req_action action, s32 value);  int freq_qos_add_notifier(struct freq_constraints *qos,  			  enum freq_qos_req_type type, diff --git a/kernel/power/qos.c b/kernel/power/qos.c index a45cba7df0ae..83edf8698118 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c @@ -714,8 +714,10 @@ s32 freq_qos_read_value(struct freq_constraints *qos,   * @req: Constraint request to apply.   * @action: Action to perform (add/update/remove).   * @value: Value to assign to the QoS request. + * + * This is only meant to be called from inside pm_qos, not drivers.   */ -static int freq_qos_apply(struct freq_qos_request *req, +int freq_qos_apply(struct freq_qos_request *req,  			  enum pm_qos_req_action action, s32 value)  {  	int ret; | 
