diff options
| -rw-r--r-- | Documentation/admin-guide/pm/intel_pstate.rst | 133 | ||||
| -rw-r--r-- | drivers/cpufreq/acpi-cpufreq.c | 2 | ||||
| -rw-r--r-- | drivers/cpufreq/amd-pstate.c | 35 | ||||
| -rw-r--r-- | drivers/cpufreq/cppc_cpufreq.c | 17 | ||||
| -rw-r--r-- | drivers/cpufreq/cpufreq-dt-platdev.c | 1 | ||||
| -rw-r--r-- | drivers/cpufreq/cpufreq-nforce2.c | 3 | ||||
| -rw-r--r-- | drivers/cpufreq/cpufreq.c | 11 | ||||
| -rw-r--r-- | drivers/cpufreq/intel_pstate.c | 227 | ||||
| -rw-r--r-- | drivers/cpufreq/qcom-cpufreq-nvmem.c | 35 | ||||
| -rw-r--r-- | drivers/cpufreq/s5pv210-cpufreq.c | 6 | ||||
| -rw-r--r-- | drivers/cpufreq/tegra186-cpufreq.c | 150 | ||||
| -rw-r--r-- | drivers/cpufreq/tegra194-cpufreq.c | 3 |
12 files changed, 389 insertions, 234 deletions
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst index 26e702c7016e..fde967b0c2e0 100644 --- a/Documentation/admin-guide/pm/intel_pstate.rst +++ b/Documentation/admin-guide/pm/intel_pstate.rst @@ -48,8 +48,9 @@ only way to pass early-configuration-time parameters to it is via the kernel command line. However, its configuration can be adjusted via ``sysfs`` to a great extent. In some configurations it even is possible to unregister it via ``sysfs`` which allows another ``CPUFreq`` scaling driver to be loaded and -registered (see `below <status_attr_>`_). +registered (see :ref:`below <status_attr>`). +.. _operation_modes: Operation Modes =============== @@ -62,6 +63,8 @@ a certain performance scaling algorithm. Which of them will be in effect depends on what kernel command line options are used and on the capabilities of the processor. +.. _active_mode: + Active Mode ----------- @@ -94,6 +97,8 @@ Which of the P-state selection algorithms is used by default depends on the Namely, if that option is set, the ``performance`` algorithm will be used by default, and the other one will be used by default if it is not set. +.. _active_mode_hwp: + Active Mode With HWP ~~~~~~~~~~~~~~~~~~~~ @@ -123,7 +128,7 @@ Energy-Performance Bias (EPB) knob (otherwise), which means that the processor's internal P-state selection logic is expected to focus entirely on performance. This will override the EPP/EPB setting coming from the ``sysfs`` interface -(see `Energy vs Performance Hints`_ below). Moreover, any attempts to change +(see :ref:`energy_performance_hints` below). Moreover, any attempts to change the EPP/EPB to a value different from 0 ("performance") via ``sysfs`` in this configuration will be rejected. @@ -192,6 +197,8 @@ This is the default P-state selection algorithm if the :c:macro:`CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE` kernel configuration option is not set. +.. _passive_mode: + Passive Mode ------------ @@ -289,12 +296,12 @@ Unlike ``_PSS`` objects in the ACPI tables, ``intel_pstate`` always exposes the entire range of available P-states, including the whole turbo range, to the ``CPUFreq`` core and (in the passive mode) to generic scaling governors. This generally causes turbo P-states to be set more often when ``intel_pstate`` is -used relative to ACPI-based CPU performance scaling (see `below <acpi-cpufreq_>`_ -for more information). +used relative to ACPI-based CPU performance scaling (see +:ref:`below <acpi-cpufreq>` for more information). Moreover, since ``intel_pstate`` always knows what the real turbo threshold is (even if the Configurable TDP feature is enabled in the processor), its -``no_turbo`` attribute in ``sysfs`` (described `below <no_turbo_attr_>`_) should +``no_turbo`` attribute in ``sysfs`` (described :ref:`below <no_turbo_attr>`) should work as expected in all cases (that is, if set to disable turbo P-states, it always should prevent ``intel_pstate`` from using them). @@ -307,12 +314,12 @@ pieces of information on it to be known, including: * The minimum supported P-state. - * The maximum supported `non-turbo P-state <turbo_>`_. + * The maximum supported :ref:`non-turbo P-state <turbo>`. * Whether or not turbo P-states are supported at all. - * The maximum supported `one-core turbo P-state <turbo_>`_ (if turbo P-states - are supported). + * The maximum supported :ref:`one-core turbo P-state <turbo>` (if turbo + P-states are supported). * The scaling formula to translate the driver's internal representation of P-states into frequencies and the other way around. @@ -400,10 +407,10 @@ Energy-Aware Scheduling Support If ``CONFIG_ENERGY_MODEL`` has been set during kernel configuration and ``intel_pstate`` runs on a hybrid processor without SMT, in addition to enabling -`CAS <CAS_>`_ it registers an Energy Model for the processor. This allows the +:ref:`CAS` it registers an Energy Model for the processor. This allows the Energy-Aware Scheduling (EAS) support to be enabled in the CPU scheduler if ``schedutil`` is used as the ``CPUFreq`` governor which requires ``intel_pstate`` -to operate in the `passive mode <Passive Mode_>`_. +to operate in the :ref:`passive mode <passive_mode>`. The Energy Model registered by ``intel_pstate`` is artificial (that is, it is based on abstract cost values and it does not include any real power numbers) @@ -432,6 +439,8 @@ the ``energy_model`` directory in ``debugfs`` (typlically mounted on User Space Interface in ``sysfs`` ================================= +.. _global_attributes: + Global Attributes ----------------- @@ -444,8 +453,8 @@ argument is passed to the kernel in the command line. ``max_perf_pct`` Maximum P-state the driver is allowed to set in percent of the - maximum supported performance level (the highest supported `turbo - P-state <turbo_>`_). + maximum supported performance level (the highest supported :ref:`turbo + P-state <turbo>`). This attribute will not be exposed if the ``intel_pstate=per_cpu_perf_limits`` argument is present in the kernel @@ -453,8 +462,8 @@ argument is passed to the kernel in the command line. ``min_perf_pct`` Minimum P-state the driver is allowed to set in percent of the - maximum supported performance level (the highest supported `turbo - P-state <turbo_>`_). + maximum supported performance level (the highest supported :ref:`turbo + P-state <turbo>`). This attribute will not be exposed if the ``intel_pstate=per_cpu_perf_limits`` argument is present in the kernel @@ -463,18 +472,18 @@ argument is passed to the kernel in the command line. ``num_pstates`` Number of P-states supported by the processor (between 0 and 255 inclusive) including both turbo and non-turbo P-states (see - `Turbo P-states Support`_). + :ref:`turbo`). This attribute is present only if the value exposed by it is the same for all of the CPUs in the system. The value of this attribute is not affected by the ``no_turbo`` - setting described `below <no_turbo_attr_>`_. + setting described :ref:`below <no_turbo_attr>`. This attribute is read-only. ``turbo_pct`` - Ratio of the `turbo range <turbo_>`_ size to the size of the entire + Ratio of the :ref:`turbo range <turbo>` size to the size of the entire range of supported P-states, in percent. This attribute is present only if the value exposed by it is the same @@ -486,7 +495,7 @@ argument is passed to the kernel in the command line. ``no_turbo`` If set (equal to 1), the driver is not allowed to set any turbo P-states - (see `Turbo P-states Support`_). If unset (equal to 0, which is the + (see :ref:`turbo`). If unset (equal to 0, which is the default), turbo P-states can be set by the driver. [Note that ``intel_pstate`` does not support the general ``boost`` attribute (supported by some other scaling drivers) which is replaced @@ -495,11 +504,11 @@ argument is passed to the kernel in the command line. This attribute does not affect the maximum supported frequency value supplied to the ``CPUFreq`` core and exposed via the policy interface, but it affects the maximum possible value of per-policy P-state limits - (see `Interpretation of Policy Attributes`_ below for details). + (see :ref:`policy_attributes_interpretation` below for details). ``hwp_dynamic_boost`` This attribute is only present if ``intel_pstate`` works in the - `active mode with the HWP feature enabled <Active Mode With HWP_>`_ in + :ref:`active mode with the HWP feature enabled <active_mode_hwp>` in the processor. If set (equal to 1), it causes the minimum P-state limit to be increased dynamically for a short time whenever a task previously waiting on I/O is selected to run on a given logical CPU (the purpose @@ -514,12 +523,12 @@ argument is passed to the kernel in the command line. Operation mode of the driver: "active", "passive" or "off". "active" - The driver is functional and in the `active mode - <Active Mode_>`_. + The driver is functional and in the :ref:`active mode + <active_mode>`. "passive" - The driver is functional and in the `passive mode - <Passive Mode_>`_. + The driver is functional and in the :ref:`passive mode + <passive_mode>`. "off" The driver is not functional (it is not registered as a scaling @@ -547,13 +556,15 @@ argument is passed to the kernel in the command line. attribute to "1" enables the energy-efficiency optimizations and setting to "0" disables them. +.. _policy_attributes_interpretation: + Interpretation of Policy Attributes ----------------------------------- The interpretation of some ``CPUFreq`` policy attributes described in Documentation/admin-guide/pm/cpufreq.rst is special with ``intel_pstate`` as the current scaling driver and it generally depends on the driver's -`operation mode <Operation Modes_>`_. +:ref:`operation mode <operation_modes>`. First of all, the values of the ``cpuinfo_max_freq``, ``cpuinfo_min_freq`` and ``scaling_cur_freq`` attributes are produced by applying a processor-specific @@ -562,9 +573,10 @@ Also, the values of the ``scaling_max_freq`` and ``scaling_min_freq`` attributes are capped by the frequency corresponding to the maximum P-state that the driver is allowed to set. -If the ``no_turbo`` `global attribute <no_turbo_attr_>`_ is set, the driver is -not allowed to use turbo P-states, so the maximum value of ``scaling_max_freq`` -and ``scaling_min_freq`` is limited to the maximum non-turbo P-state frequency. +If the ``no_turbo`` :ref:`global attribute <no_turbo_attr>` is set, the driver +is not allowed to use turbo P-states, so the maximum value of +``scaling_max_freq`` and ``scaling_min_freq`` is limited to the maximum +non-turbo P-state frequency. Accordingly, setting ``no_turbo`` causes ``scaling_max_freq`` and ``scaling_min_freq`` to go down to that value if they were above it before. However, the old values of ``scaling_max_freq`` and ``scaling_min_freq`` will be @@ -576,7 +588,7 @@ and ``scaling_min_freq`` corresponds to the maximum supported turbo P-state, which also is the value of ``cpuinfo_max_freq`` in either case. Next, the following policy attributes have special meaning if -``intel_pstate`` works in the `active mode <Active Mode_>`_: +``intel_pstate`` works in the :ref:`active mode <active_mode>`: ``scaling_available_governors`` List of P-state selection algorithms provided by ``intel_pstate``. @@ -597,20 +609,22 @@ processor: Shows the base frequency of the CPU. Any frequency above this will be in the turbo frequency range. -The meaning of these attributes in the `passive mode <Passive Mode_>`_ is the +The meaning of these attributes in the :ref:`passive mode <passive_mode>` is the same as for other scaling drivers. Additionally, the value of the ``scaling_driver`` attribute for ``intel_pstate`` depends on the operation mode of the driver. Namely, it is either -"intel_pstate" (in the `active mode <Active Mode_>`_) or "intel_cpufreq" (in the -`passive mode <Passive Mode_>`_). +"intel_pstate" (in the :ref:`active mode <active_mode>`) or "intel_cpufreq" +(in the :ref:`passive mode <passive_mode>`). + +.. _pstate_limits_coordination: Coordination of P-State Limits ------------------------------ ``intel_pstate`` allows P-state limits to be set in two ways: with the help of -the ``max_perf_pct`` and ``min_perf_pct`` `global attributes -<Global Attributes_>`_ or via the ``scaling_max_freq`` and ``scaling_min_freq`` +the ``max_perf_pct`` and ``min_perf_pct`` :ref:`global attributes +<global_attributes>` or via the ``scaling_max_freq`` and ``scaling_min_freq`` ``CPUFreq`` policy attributes. The coordination between those limits is based on the following rules, regardless of the current operation mode of the driver: @@ -632,17 +646,18 @@ on the following rules, regardless of the current operation mode of the driver: 3. The global and per-policy limits can be set independently. -In the `active mode with the HWP feature enabled <Active Mode With HWP_>`_, the +In the :ref:`active mode with the HWP feature enabled <active_mode_hwp>`, the resulting effective values are written into hardware registers whenever the limits change in order to request its internal P-state selection logic to always set P-states within these limits. Otherwise, the limits are taken into account -by scaling governors (in the `passive mode <Passive Mode_>`_) and by the driver -every time before setting a new P-state for a CPU. +by scaling governors (in the :ref:`passive mode <passive_mode>`) and by the +driver every time before setting a new P-state for a CPU. Additionally, if the ``intel_pstate=per_cpu_perf_limits`` command line argument is passed to the kernel, ``max_perf_pct`` and ``min_perf_pct`` are not exposed at all and the only way to set the limits is by using the policy attributes. +.. _energy_performance_hints: Energy vs Performance Hints --------------------------- @@ -702,9 +717,9 @@ output. On those systems each ``_PSS`` object returns a list of P-states supported by the corresponding CPU which basically is a subset of the P-states range that can be used by ``intel_pstate`` on the same system, with one exception: the whole -`turbo range <turbo_>`_ is represented by one item in it (the topmost one). By -convention, the frequency returned by ``_PSS`` for that item is greater by 1 MHz -than the frequency of the highest non-turbo P-state listed by it, but the +:ref:`turbo range <turbo>` is represented by one item in it (the topmost one). +By convention, the frequency returned by ``_PSS`` for that item is greater by +1 MHz than the frequency of the highest non-turbo P-state listed by it, but the corresponding P-state representation (following the hardware specification) returned for it matches the maximum supported turbo P-state (or is the special value 255 meaning essentially "go as high as you can get"). @@ -730,18 +745,18 @@ benefit from running at turbo frequencies will be given non-turbo P-states instead. One more issue related to that may appear on systems supporting the -`Configurable TDP feature <turbo_>`_ allowing the platform firmware to set the -turbo threshold. Namely, if that is not coordinated with the lists of P-states -returned by ``_PSS`` properly, there may be more than one item corresponding to -a turbo P-state in those lists and there may be a problem with avoiding the -turbo range (if desirable or necessary). Usually, to avoid using turbo -P-states overall, ``acpi-cpufreq`` simply avoids using the topmost state listed -by ``_PSS``, but that is not sufficient when there are other turbo P-states in -the list returned by it. +:ref:`Configurable TDP feature <turbo>` allowing the platform firmware to set +the turbo threshold. Namely, if that is not coordinated with the lists of +P-states returned by ``_PSS`` properly, there may be more than one item +corresponding to a turbo P-state in those lists and there may be a problem with +avoiding the turbo range (if desirable or necessary). Usually, to avoid using +turbo P-states overall, ``acpi-cpufreq`` simply avoids using the topmost state +listed by ``_PSS``, but that is not sufficient when there are other turbo +P-states in the list returned by it. Apart from the above, ``acpi-cpufreq`` works like ``intel_pstate`` in the -`passive mode <Passive Mode_>`_, except that the number of P-states it can set -is limited to the ones listed by the ACPI ``_PSS`` objects. +:ref:`passive mode <passive_mode>`, except that the number of P-states it can +set is limited to the ones listed by the ACPI ``_PSS`` objects. Kernel Command Line Options for ``intel_pstate`` @@ -756,11 +771,11 @@ of them have to be prepended with the ``intel_pstate=`` prefix. processor is supported by it. ``active`` - Register ``intel_pstate`` in the `active mode <Active Mode_>`_ to start - with. + Register ``intel_pstate`` in the :ref:`active mode <active_mode>` to + start with. ``passive`` - Register ``intel_pstate`` in the `passive mode <Passive Mode_>`_ to + Register ``intel_pstate`` in the :ref:`passive mode <passive_mode>` to start with. ``force`` @@ -793,12 +808,12 @@ of them have to be prepended with the ``intel_pstate=`` prefix. and this option has no effect. ``per_cpu_perf_limits`` - Use per-logical-CPU P-State limits (see `Coordination of P-state - Limits`_ for details). + Use per-logical-CPU P-State limits (see + :ref:`pstate_limits_coordination` for details). ``no_cas`` - Do not enable `capacity-aware scheduling <CAS_>`_ which is enabled by - default on hybrid systems without SMT. + Do not enable :ref:`capacity-aware scheduling <CAS>` which is enabled + by default on hybrid systems without SMT. Diagnostics and Tuning ====================== @@ -810,7 +825,7 @@ There are two static trace events that can be used for ``intel_pstate`` diagnostics. One of them is the ``cpu_frequency`` trace event generally used by ``CPUFreq``, and the other one is the ``pstate_sample`` trace event specific to ``intel_pstate``. Both of them are triggered by ``intel_pstate`` only if -it works in the `active mode <Active Mode_>`_. +it works in the :ref:`active mode <active_mode>`. The following sequence of shell commands can be used to enable them and see their output (if the kernel is generally configured to support event tracing):: @@ -822,7 +837,7 @@ their output (if the kernel is generally configured to support event tracing):: gnome-terminal--4510 [001] ..s. 1177.680733: pstate_sample: core_busy=107 scaled=94 from=26 to=26 mperf=1143818 aperf=1230607 tsc=29838618 freq=2474476 cat-5235 [002] ..s. 1177.681723: cpu_frequency: state=2900000 cpu_id=2 -If ``intel_pstate`` works in the `passive mode <Passive Mode_>`_, the +If ``intel_pstate`` works in the :ref:`passive mode <passive_mode>`, the ``cpu_frequency`` trace event will be triggered either by the ``schedutil`` scaling governor (for the policies it is attached to), or by the ``CPUFreq`` core (for the policies with other scaling governors). diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 083d8369a591..e73a66785d69 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -395,7 +395,7 @@ static unsigned int check_freqs(struct cpufreq_policy *policy, cur_freq = extract_freq(policy, get_cur_val(mask, data)); if (cur_freq == freq) return 1; - udelay(10); + usleep_range(10, 15); } return 0; } diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index b44f0f7a5ba1..c45bc98721d2 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -65,13 +65,13 @@ static const char * const amd_pstate_mode_string[] = { [AMD_PSTATE_PASSIVE] = "passive", [AMD_PSTATE_ACTIVE] = "active", [AMD_PSTATE_GUIDED] = "guided", - NULL, }; +static_assert(ARRAY_SIZE(amd_pstate_mode_string) == AMD_PSTATE_MAX); const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode) { - if (mode < 0 || mode >= AMD_PSTATE_MAX) - return NULL; + if (mode < AMD_PSTATE_UNDEFINED || mode >= AMD_PSTATE_MAX) + mode = AMD_PSTATE_UNDEFINED; return amd_pstate_mode_string[mode]; } EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string); @@ -110,6 +110,7 @@ enum energy_perf_value_index { EPP_INDEX_BALANCE_PERFORMANCE, EPP_INDEX_BALANCE_POWERSAVE, EPP_INDEX_POWERSAVE, + EPP_INDEX_MAX, }; static const char * const energy_perf_strings[] = { @@ -118,8 +119,8 @@ static const char * const energy_perf_strings[] = { [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance", [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power", [EPP_INDEX_POWERSAVE] = "power", - NULL }; +static_assert(ARRAY_SIZE(energy_perf_strings) == EPP_INDEX_MAX); static unsigned int epp_values[] = { [EPP_INDEX_DEFAULT] = 0, @@ -127,7 +128,8 @@ static unsigned int epp_values[] = { [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE, [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE, [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE, - }; +}; +static_assert(ARRAY_SIZE(epp_values) == EPP_INDEX_MAX); typedef int (*cppc_mode_transition_fn)(int); @@ -183,7 +185,7 @@ static inline int get_mode_idx_from_str(const char *str, size_t size) { int i; - for (i=0; i < AMD_PSTATE_MAX; i++) { + for (i = 0; i < AMD_PSTATE_MAX; i++) { if (!strncmp(str, amd_pstate_mode_string[i], size)) return i; } @@ -1137,16 +1139,15 @@ static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy, static ssize_t show_energy_performance_available_preferences( struct cpufreq_policy *policy, char *buf) { - int i = 0; - int offset = 0; + int offset = 0, i; struct amd_cpudata *cpudata = policy->driver_data; if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) return sysfs_emit_at(buf, offset, "%s\n", energy_perf_strings[EPP_INDEX_PERFORMANCE]); - while (energy_perf_strings[i] != NULL) - offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]); + for (i = 0; i < ARRAY_SIZE(energy_perf_strings); i++) + offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i]); offset += sysfs_emit_at(buf, offset, "\n"); @@ -1157,15 +1158,10 @@ static ssize_t store_energy_performance_preference( struct cpufreq_policy *policy, const char *buf, size_t count) { struct amd_cpudata *cpudata = policy->driver_data; - char str_preference[21]; ssize_t ret; u8 epp; - ret = sscanf(buf, "%20s", str_preference); - if (ret != 1) - return -EINVAL; - - ret = match_string(energy_perf_strings, -1, str_preference); + ret = sysfs_match_string(energy_perf_strings, buf); if (ret < 0) return -EINVAL; @@ -1282,7 +1278,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode) if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE) return 0; - for_each_present_cpu(cpu) { + for_each_online_cpu(cpu) { cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1); } @@ -1353,9 +1349,8 @@ int amd_pstate_update_status(const char *buf, size_t size) return -EINVAL; mode_idx = get_mode_idx_from_str(buf, size); - - if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX) - return -EINVAL; + if (mode_idx < 0) + return mode_idx; if (mode_state_machine[cppc_state][mode_idx]) { guard(mutex)(&amd_pstate_driver_lock); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index e23d9abea135..9eac77c4f294 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -142,16 +142,15 @@ static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy) init_irq_work(&cppc_fi->irq_work, cppc_irq_work); ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs); - if (ret) { - pr_warn("%s: failed to read perf counters for cpu:%d: %d\n", - __func__, cpu, ret); - /* - * Don't abort if the CPU was offline while the driver - * was getting registered. - */ - if (cpu_online(cpu)) - return; + /* + * Don't abort as the CPU was offline while the driver was + * getting registered. + */ + if (ret && cpu_online(cpu)) { + pr_debug("%s: failed to read perf counters for cpu:%d: %d\n", + __func__, cpu, ret); + return; } } diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index cd1816a12bb9..dc11b62399ad 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -87,6 +87,7 @@ static const struct of_device_id allowlist[] __initconst = { { .compatible = "st-ericsson,u9540", }, { .compatible = "starfive,jh7110", }, + { .compatible = "starfive,jh7110s", }, { .compatible = "ti,omap2", }, { .compatible = "ti,omap4", }, diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index fedad1081973..fbbbe501cf2d 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -145,6 +145,8 @@ static unsigned int nforce2_fsb_read(int bootfsb) pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb); fsb /= 1000000; + pci_dev_put(nforce2_sub5); + /* Check if PLL register is already set */ pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp); @@ -426,6 +428,7 @@ static int __init nforce2_init(void) static void __exit nforce2_exit(void) { cpufreq_unregister_driver(&nforce2_driver); + pci_dev_put(nforce2_dev); } module_init(nforce2_init); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 852e024facc3..4472bb1ec83c 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1421,9 +1421,12 @@ static int cpufreq_policy_online(struct cpufreq_policy *policy, * If there is a problem with its frequency table, take it * offline and drop it. */ - ret = cpufreq_table_validate_and_sort(policy); - if (ret) - goto out_offline_policy; + if (policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_ASCENDING && + policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_DESCENDING) { + ret = cpufreq_table_validate_and_sort(policy); + if (ret) + goto out_offline_policy; + } /* related_cpus should at least include policy->cpus. */ cpumask_copy(policy->related_cpus, policy->cpus); @@ -2550,7 +2553,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) for_each_inactive_policy(policy) { if (!strcmp(policy->last_governor, governor->name)) { policy->governor = NULL; - strcpy(policy->last_governor, "\0"); + policy->last_governor[0] = '\0'; } } read_unlock_irqrestore(&cpufreq_driver_lock, flags); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 492a10f1bdbf..ec4abe374573 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -575,13 +575,18 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) int scaling = cpu->pstate.scaling; int freq; - pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); - pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo); - pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling); + pr_debug("CPU%d: PERF_CTL max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); + pr_debug("CPU%d: PERF_CTL turbo = %d\n", cpu->cpu, perf_ctl_turbo); + pr_debug("CPU%d: PERF_CTL scaling = %d\n", cpu->cpu, perf_ctl_scaling); pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); + if (scaling == perf_ctl_scaling) + return; + + hwp_is_hybrid = true; + cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling, perf_ctl_scaling); cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, @@ -909,6 +914,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = { [HWP_CPUFREQ_ATTR_COUNT] = NULL, }; +static u8 hybrid_get_cpu_type(unsigned int cpu) +{ + return cpu_data(cpu).topo.intel_type; +} + static bool no_cas __ro_after_init; static struct cpudata *hybrid_max_perf_cpu __read_mostly; @@ -925,11 +935,8 @@ static int hybrid_active_power(struct device *dev, unsigned long *power, unsigned long *freq) { /* - * Create "utilization bins" of 0-40%, 40%-60%, 60%-80%, and 80%-100% - * of the maximum capacity such that two CPUs of the same type will be - * regarded as equally attractive if the utilization of each of them - * falls into the same bin, which should prevent tasks from being - * migrated between them too often. + * Create four "states" corresponding to 40%, 60%, 80%, and 100% of the + * full capacity. * * For this purpose, return the "frequency" of 2 for the first * performance level and otherwise leave the value set by the caller. @@ -943,38 +950,40 @@ static int hybrid_active_power(struct device *dev, unsigned long *power, return 0; } +static bool hybrid_has_l3(unsigned int cpu) +{ + struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(cpu); + unsigned int i; + + if (!cacheinfo) + return false; + + for (i = 0; i < cacheinfo->num_leaves; i++) { + if (cacheinfo->info_list[i].level == 3) + return true; + } + + return false; +} + static int hybrid_get_cost(struct device *dev, unsigned long freq, unsigned long *cost) { - struct pstate_data *pstate = &all_cpu_data[dev->id]->pstate; - struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(dev->id); - + /* Facilitate load balancing between CPUs of the same type. */ + *cost = freq; /* - * The smaller the perf-to-frequency scaling factor, the larger the IPC - * ratio between the given CPU and the least capable CPU in the system. - * Regard that IPC ratio as the primary cost component and assume that - * the scaling factors for different CPU types will differ by at least - * 5% and they will not be above INTEL_PSTATE_CORE_SCALING. + * Adjust the cost depending on CPU type. * - * Add the freq value to the cost, so that the cost of running on CPUs - * of the same type in different "utilization bins" is different. - */ - *cost = div_u64(100ULL * INTEL_PSTATE_CORE_SCALING, pstate->scaling) + freq; - /* - * Increase the cost slightly for CPUs able to access L3 to avoid - * touching it in case some other CPUs of the same type can do the work - * without it. + * The idea is to start loading up LPE-cores before E-cores and start + * to populate E-cores when LPE-cores are utilized above 60% of the + * capacity. Similarly, P-cores start to be populated when E-cores are + * utilized above 60% of the capacity. */ - if (cacheinfo) { - unsigned int i; - - /* Check if L3 cache is there. */ - for (i = 0; i < cacheinfo->num_leaves; i++) { - if (cacheinfo->info_list[i].level == 3) { - *cost += 2; - break; - } - } + if (hybrid_get_cpu_type(dev->id) == INTEL_CPU_TYPE_ATOM) { + if (hybrid_has_l3(dev->id)) /* E-core */ + *cost += 1; + } else { /* P-core */ + *cost += 2; } return 0; @@ -1037,9 +1046,9 @@ static void hybrid_set_cpu_capacity(struct cpudata *cpu) topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu)); - pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu, - cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf, - cpu->pstate.max_pstate_physical); + pr_debug("CPU%d: capacity perf = %u, base perf = %u, sys max perf = %u\n", + cpu->cpu, cpu->capacity_perf, cpu->pstate.max_pstate_physical, + hybrid_max_perf_cpu->capacity_perf); } static void hybrid_clear_cpu_capacity(unsigned int cpunum) @@ -1384,7 +1393,8 @@ static void set_power_ctl_ee_state(bool input) { u64 power_ctl; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); + rdmsrq(MSR_IA32_POWER_CTL, power_ctl); if (input) { power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE); @@ -1394,7 +1404,6 @@ static void set_power_ctl_ee_state(bool input) power_ctl_ee_state = POWER_CTL_EE_DISABLE; } wrmsrq(MSR_IA32_POWER_CTL, power_ctl); - mutex_unlock(&intel_pstate_driver_lock); } static void intel_pstate_hwp_enable(struct cpudata *cpudata); @@ -1516,13 +1525,9 @@ static int intel_pstate_update_status(const char *buf, size_t size); static ssize_t show_status(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - ssize_t ret; - - mutex_lock(&intel_pstate_driver_lock); - ret = intel_pstate_show_status(buf); - mutex_unlock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); - return ret; + return intel_pstate_show_status(buf); } static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, @@ -1531,11 +1536,13 @@ static ssize_t store_status(struct kobject *a, struct kobj_attribute *b, char *p = memchr(buf, '\n', count); int ret; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); + ret = intel_pstate_update_status(buf, p ? p - buf : count); - mutex_unlock(&intel_pstate_driver_lock); + if (ret < 0) + return ret; - return ret < 0 ? ret : count; + return count; } static ssize_t show_turbo_pct(struct kobject *kobj, @@ -1545,12 +1552,10 @@ static ssize_t show_turbo_pct(struct kobject *kobj, int total, no_turbo, turbo_pct; uint32_t turbo_fp; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); - if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); + if (!intel_pstate_driver) return -EAGAIN; - } cpu = all_cpu_data[0]; @@ -1559,8 +1564,6 @@ static ssize_t show_turbo_pct(struct kobject *kobj, turbo_fp = div_fp(no_turbo, total); turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100))); - mutex_unlock(&intel_pstate_driver_lock); - return sprintf(buf, "%u\n", turbo_pct); } @@ -1570,38 +1573,26 @@ static ssize_t show_num_pstates(struct kobject *kobj, struct cpudata *cpu; int total; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); - if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); + if (!intel_pstate_driver) return -EAGAIN; - } cpu = all_cpu_data[0]; total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1; - mutex_unlock(&intel_pstate_driver_lock); - return sprintf(buf, "%u\n", total); } static ssize_t show_no_turbo(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { - ssize_t ret; + guard(mutex)(&intel_pstate_driver_lock); - mutex_lock(&intel_pstate_driver_lock); - - if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); + if (!intel_pstate_driver) return -EAGAIN; - } - - ret = sprintf(buf, "%u\n", global.no_turbo); - - mutex_unlock(&intel_pstate_driver_lock); - return ret; + return sprintf(buf, "%u\n", global.no_turbo); } static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, @@ -1613,29 +1604,25 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, if (sscanf(buf, "%u", &input) != 1) return -EINVAL; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); - if (!intel_pstate_driver) { - count = -EAGAIN; - goto unlock_driver; - } + if (!intel_pstate_driver) + return -EAGAIN; no_turbo = !!clamp_t(int, input, 0, 1); WRITE_ONCE(global.turbo_disabled, turbo_is_disabled()); if (global.turbo_disabled && !no_turbo) { pr_notice("Turbo disabled by BIOS or unavailable on processor\n"); - count = -EPERM; if (global.no_turbo) - goto unlock_driver; - else - no_turbo = 1; - } + return -EPERM; - if (no_turbo == global.no_turbo) { - goto unlock_driver; + no_turbo = 1; } + if (no_turbo == global.no_turbo) + return count; + WRITE_ONCE(global.no_turbo, no_turbo); mutex_lock(&intel_pstate_limits_lock); @@ -1654,9 +1641,6 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b, intel_pstate_update_limits_for_all(); arch_set_max_freq_ratio(no_turbo); -unlock_driver: - mutex_unlock(&intel_pstate_driver_lock); - return count; } @@ -1706,12 +1690,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, if (ret != 1) return -EINVAL; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); - if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); + if (!intel_pstate_driver) return -EAGAIN; - } mutex_lock(&intel_pstate_limits_lock); @@ -1724,8 +1706,6 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b, else update_qos_requests(FREQ_QOS_MAX); - mutex_unlock(&intel_pstate_driver_lock); - return count; } @@ -1739,12 +1719,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, if (ret != 1) return -EINVAL; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); - if (!intel_pstate_driver) { - mutex_unlock(&intel_pstate_driver_lock); + if (!intel_pstate_driver) return -EAGAIN; - } mutex_lock(&intel_pstate_limits_lock); @@ -1758,8 +1736,6 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b, else update_qos_requests(FREQ_QOS_MIN); - mutex_unlock(&intel_pstate_driver_lock); - return count; } @@ -1780,10 +1756,10 @@ static ssize_t store_hwp_dynamic_boost(struct kobject *a, if (ret) return ret; - mutex_lock(&intel_pstate_driver_lock); + guard(mutex)(&intel_pstate_driver_lock); + hwp_boost = !!input; intel_pstate_update_policies(); - mutex_unlock(&intel_pstate_driver_lock); return count; } @@ -2072,6 +2048,18 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) intel_pstate_update_epp_defaults(cpudata); } +static u64 get_perf_ctl_val(int pstate) +{ + u64 val; + + val = (u64)pstate << 8; + if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) && + cpu_feature_enabled(X86_FEATURE_IDA)) + val |= (u64)1 << 32; + + return val; +} + static int atom_get_min_pstate(int not_used) { u64 value; @@ -2098,15 +2086,10 @@ static int atom_get_turbo_pstate(int not_used) static u64 atom_get_val(struct cpudata *cpudata, int pstate) { - u64 val; + u64 val = get_perf_ctl_val(pstate); int32_t vid_fp; u32 vid; - val = (u64)pstate << 8; - if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) && - cpu_feature_enabled(X86_FEATURE_IDA)) - val |= (u64)1 << 32; - vid_fp = cpudata->vid.min + mul_fp( int_tofp(pstate - cpudata->pstate.min_pstate), cpudata->vid.ratio); @@ -2266,14 +2249,7 @@ static int core_get_turbo_pstate(int cpu) static u64 core_get_val(struct cpudata *cpudata, int pstate) { - u64 val; - - val = (u64)pstate << 8; - if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) && - cpu_feature_enabled(X86_FEATURE_IDA)) - val |= (u64)1 << 32; - - return val; + return get_perf_ctl_val(pstate); } static int knl_get_aperf_mperf_shift(void) @@ -2297,18 +2273,14 @@ static int knl_get_turbo_pstate(int cpu) static int hwp_get_cpu_scaling(int cpu) { if (hybrid_scaling_factor) { - struct cpuinfo_x86 *c = &cpu_data(cpu); - u8 cpu_type = c->topo.intel_type; - /* * Return the hybrid scaling factor for P-cores and use the * default core scaling for E-cores. */ - if (cpu_type == INTEL_CPU_TYPE_CORE) + if (hybrid_get_cpu_type(cpu) == INTEL_CPU_TYPE_CORE) return hybrid_scaling_factor; - if (cpu_type == INTEL_CPU_TYPE_ATOM) - return core_get_scaling(); + return core_get_scaling(); } /* Use core scaling on non-hybrid systems. */ @@ -2343,11 +2315,10 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu) static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { - int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu); int perf_ctl_scaling = pstate_funcs.get_scaling(); + cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(cpu->cpu); cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu); - cpu->pstate.max_pstate_physical = perf_ctl_max_phys; cpu->pstate.perf_ctl_scaling = perf_ctl_scaling; if (hwp_active && !hwp_mode_bdw) { @@ -2355,10 +2326,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) if (pstate_funcs.get_cpu_scaling) { cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu); - if (cpu->pstate.scaling != perf_ctl_scaling) { - intel_pstate_hybrid_hwp_adjust(cpu); - hwp_is_hybrid = true; - } + intel_pstate_hybrid_hwp_adjust(cpu); } else { cpu->pstate.scaling = perf_ctl_scaling; } @@ -2760,6 +2728,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = { X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs), X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs), X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs), + X86_MATCH(INTEL_DIAMONDRAPIDS_X, core_funcs), {} }; #endif @@ -3912,9 +3881,9 @@ hwp_cpu_matched: } - mutex_lock(&intel_pstate_driver_lock); - rc = intel_pstate_register_driver(default_driver); - mutex_unlock(&intel_pstate_driver_lock); + scoped_guard(mutex, &intel_pstate_driver_lock) { + rc = intel_pstate_register_driver(default_driver); + } if (rc) { intel_pstate_sysfs_remove(); return rc; diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index 765a5bb81829..81e16b5a0245 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -256,13 +256,22 @@ len_error: return ret; } +static const struct of_device_id qcom_cpufreq_ipq806x_match_list[] __maybe_unused = { + { .compatible = "qcom,ipq8062", .data = (const void *)QCOM_ID_IPQ8062 }, + { .compatible = "qcom,ipq8064", .data = (const void *)QCOM_ID_IPQ8064 }, + { .compatible = "qcom,ipq8065", .data = (const void *)QCOM_ID_IPQ8065 }, + { .compatible = "qcom,ipq8066", .data = (const void *)QCOM_ID_IPQ8066 }, + { .compatible = "qcom,ipq8068", .data = (const void *)QCOM_ID_IPQ8068 }, + { .compatible = "qcom,ipq8069", .data = (const void *)QCOM_ID_IPQ8069 }, +}; + static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev, struct nvmem_cell *speedbin_nvmem, char **pvs_name, struct qcom_cpufreq_drv *drv) { + int msm_id = -1, ret = 0; int speed = 0, pvs = 0; - int msm_id, ret = 0; u8 *speedbin; size_t len; @@ -279,8 +288,30 @@ static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev, get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin); ret = qcom_smem_get_soc_id(&msm_id); - if (ret) + if (ret == -ENODEV) { + const struct of_device_id *match; + struct device_node *root; + + root = of_find_node_by_path("/"); + if (!root) { + ret = -ENODEV; + goto exit; + } + + /* Fallback to compatible match with no SMEM initialized */ + match = of_match_node(qcom_cpufreq_ipq806x_match_list, root); + of_node_put(root); + if (!match) { + ret = -ENODEV; + goto exit; + } + + /* We found a matching device, get the msm_id from the data entry */ + msm_id = (int)(uintptr_t)match->data; + ret = 0; + } else if (ret) { goto exit; + } switch (msm_id) { case QCOM_ID_IPQ8062: diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index 4215621deb3f..ba8a1c96427a 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -518,7 +518,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy) if (policy->cpu != 0) { ret = -EINVAL; - goto out_dmc1; + goto out; } /* @@ -530,7 +530,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy) if ((mem_type != LPDDR) && (mem_type != LPDDR2)) { pr_err("CPUFreq doesn't support this memory type\n"); ret = -EINVAL; - goto out_dmc1; + goto out; } /* Find current refresh counter and frequency each DMC */ @@ -544,6 +544,8 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy) cpufreq_generic_init(policy, s5pv210_freq_table, 40000); return 0; +out: + clk_put(dmc1_clk); out_dmc1: clk_put(dmc0_clk); out_dmc0: diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 136ab102f636..34ed943c5f34 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/units.h> #include <soc/tegra/bpmp.h> #include <soc/tegra/bpmp-abi.h> @@ -58,7 +59,7 @@ static const struct tegra186_cpufreq_cpu tegra186_cpus[] = { }; struct tegra186_cpufreq_cluster { - struct cpufreq_frequency_table *table; + struct cpufreq_frequency_table *bpmp_lut; u32 ref_clk_khz; u32 div; }; @@ -66,16 +67,119 @@ struct tegra186_cpufreq_cluster { struct tegra186_cpufreq_data { void __iomem *regs; const struct tegra186_cpufreq_cpu *cpus; + bool icc_dram_bw_scaling; struct tegra186_cpufreq_cluster clusters[]; }; +static int tegra_cpufreq_set_bw(struct cpufreq_policy *policy, unsigned long freq_khz) +{ + struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); + struct device *dev; + int ret; + + dev = get_cpu_device(policy->cpu); + if (!dev) + return -ENODEV; + + struct dev_pm_opp *opp __free(put_opp) = + dev_pm_opp_find_freq_exact(dev, freq_khz * HZ_PER_KHZ, true); + if (IS_ERR(opp)) + return PTR_ERR(opp); + + ret = dev_pm_opp_set_opp(dev, opp); + if (ret) + data->icc_dram_bw_scaling = false; + + return ret; +} + +static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *bpmp_lut, + struct cpufreq_frequency_table **opp_table) +{ + struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); + struct cpufreq_frequency_table *freq_table = NULL; + struct cpufreq_frequency_table *pos; + struct device *cpu_dev; + unsigned long rate; + int ret, max_opps; + int j = 0; + + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu); + return -ENODEV; + } + + /* Initialize OPP table mentioned in operating-points-v2 property in DT */ + ret = dev_pm_opp_of_add_table_indexed(cpu_dev, 0); + if (ret) { + dev_err(cpu_dev, "Invalid or empty opp table in device tree\n"); + data->icc_dram_bw_scaling = false; + return ret; + } + + max_opps = dev_pm_opp_get_opp_count(cpu_dev); + if (max_opps <= 0) { + dev_err(cpu_dev, "Failed to add OPPs\n"); + return max_opps; + } + + /* Disable all opps and cross-validate against LUT later */ + for (rate = 0; ; rate++) { + struct dev_pm_opp *opp __free(put_opp) = + dev_pm_opp_find_freq_ceil(cpu_dev, &rate); + if (IS_ERR(opp)) + break; + + dev_pm_opp_disable(cpu_dev, rate); + } + + freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL); + if (!freq_table) + return -ENOMEM; + + /* + * Cross check the frequencies from BPMP-FW LUT against the OPP's present in DT. + * Enable only those DT OPP's which are present in LUT also. + */ + cpufreq_for_each_valid_entry(pos, bpmp_lut) { + struct dev_pm_opp *opp __free(put_opp) = + dev_pm_opp_find_freq_exact(cpu_dev, pos->frequency * HZ_PER_KHZ, false); + if (IS_ERR(opp)) + continue; + + ret = dev_pm_opp_enable(cpu_dev, pos->frequency * HZ_PER_KHZ); + if (ret < 0) + return ret; + + freq_table[j].driver_data = pos->driver_data; + freq_table[j].frequency = pos->frequency; + j++; + } + + freq_table[j].driver_data = pos->driver_data; + freq_table[j].frequency = CPUFREQ_TABLE_END; + + *opp_table = &freq_table[0]; + + dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); + + /* Prime interconnect data */ + tegra_cpufreq_set_bw(policy, freq_table[j - 1].frequency); + + return ret; +} + static int tegra186_cpufreq_init(struct cpufreq_policy *policy) { struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id; + struct cpufreq_frequency_table *freq_table; + struct cpufreq_frequency_table *bpmp_lut; u32 cpu; + int ret; - policy->freq_table = data->clusters[cluster].table; policy->cpuinfo.transition_latency = 300 * 1000; policy->driver_data = NULL; @@ -85,6 +189,20 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy) cpumask_set_cpu(cpu, policy->cpus); } + bpmp_lut = data->clusters[cluster].bpmp_lut; + + if (data->icc_dram_bw_scaling) { + ret = tegra_cpufreq_init_cpufreq_table(policy, bpmp_lut, &freq_table); + if (!ret) { + policy->freq_table = freq_table; + return 0; + } + } + + data->icc_dram_bw_scaling = false; + policy->freq_table = bpmp_lut; + pr_info("OPP tables missing from DT, EMC frequency scaling disabled\n"); + return 0; } @@ -102,6 +220,10 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy, writel(edvd_val, data->regs + edvd_offset); } + if (data->icc_dram_bw_scaling) + tegra_cpufreq_set_bw(policy, tbl->frequency); + + return 0; } @@ -134,7 +256,7 @@ static struct cpufreq_driver tegra186_cpufreq_driver = { .init = tegra186_cpufreq_init, }; -static struct cpufreq_frequency_table *init_vhint_table( +static struct cpufreq_frequency_table *tegra_cpufreq_bpmp_read_lut( struct platform_device *pdev, struct tegra_bpmp *bpmp, struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id, int *num_rates) @@ -229,6 +351,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) { struct tegra186_cpufreq_data *data; struct tegra_bpmp *bpmp; + struct device *cpu_dev; unsigned int i = 0, err, edvd_offset; int num_rates = 0; u32 edvd_val, cpu; @@ -254,9 +377,9 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) for (i = 0; i < TEGRA186_NUM_CLUSTERS; i++) { struct tegra186_cpufreq_cluster *cluster = &data->clusters[i]; - cluster->table = init_vhint_table(pdev, bpmp, cluster, i, &num_rates); - if (IS_ERR(cluster->table)) { - err = PTR_ERR(cluster->table); + cluster->bpmp_lut = tegra_cpufreq_bpmp_read_lut(pdev, bpmp, cluster, i, &num_rates); + if (IS_ERR(cluster->bpmp_lut)) { + err = PTR_ERR(cluster->bpmp_lut); goto put_bpmp; } else if (!num_rates) { err = -EINVAL; @@ -265,7 +388,7 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) { if (data->cpus[cpu].bpmp_cluster_id == i) { - edvd_val = cluster->table[num_rates - 1].driver_data; + edvd_val = cluster->bpmp_lut[num_rates - 1].driver_data; edvd_offset = data->cpus[cpu].edvd_offset; writel(edvd_val, data->regs + edvd_offset); } @@ -274,6 +397,19 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev) tegra186_cpufreq_driver.driver_data = data; + /* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */ + cpu_dev = get_cpu_device(0); + if (!cpu_dev) { + err = -EPROBE_DEFER; + goto put_bpmp; + } + + if (dev_pm_opp_of_get_opp_desc_node(cpu_dev)) { + err = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL); + if (!err) + data->icc_dram_bw_scaling = true; + } + err = cpufreq_register_driver(&tegra186_cpufreq_driver); put_bpmp: diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index 9b4f516f313e..695599e1001f 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -750,7 +750,8 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) if (IS_ERR(bpmp)) return PTR_ERR(bpmp); - read_counters_wq = alloc_workqueue("read_counters_wq", __WQ_LEGACY, 1); + read_counters_wq = alloc_workqueue("read_counters_wq", + __WQ_LEGACY | WQ_PERCPU, 1); if (!read_counters_wq) { dev_err(&pdev->dev, "fail to create_workqueue\n"); err = -EINVAL; |
