[haiku-commits] haiku: hrev54575 - src/add-ons/kernel/power/cpufreq/intel_pstates

  • From: Jérôme Duval <jerome.duval@xxxxxxxxx>
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Thu, 17 Sep 2020 11:47:02 -0400 (EDT)

hrev54575 adds 1 changeset to branch 'master'
old head: 80ad523bc2f5d7879b0d759c24ed6aa4643e178e
new head: dfed08e0de59ac34009ffeae91b6211f3a944afb
overview: 
https://git.haiku-os.org/haiku/log/?qt=range&q=dfed08e0de59+%5E80ad523bc2f5

----------------------------------------------------------------------------

dfed08e0de59: intel_pstates: enable HWP when available
  
  * don't reset the APERF and MPERF MSRs, they could be in use otherwise.
  * use EPP or EPB to select power-save or performance mode
  * HWP_REQUEST_PACKAGE disabled (as other OSes seem to do).
  
  Change-Id: I26eb0b05c402b131a4ab174ad6f9ddf8e9367709
  Reviewed-on: https://review.haiku-os.org/c/haiku/+/3222
  Reviewed-by: Jérôme Duval <jerome.duval@xxxxxxxxx>

                                   [ Jérôme Duval <jerome.duval@xxxxxxxxx> ]

----------------------------------------------------------------------------

Revision:    hrev54575
Commit:      dfed08e0de59ac34009ffeae91b6211f3a944afb
URL:         https://git.haiku-os.org/haiku/commit/?id=dfed08e0de59
Author:      Jérôme Duval <jerome.duval@xxxxxxxxx>
Date:        Thu Sep 10 20:35:12 2020 UTC

----------------------------------------------------------------------------

1 file changed, 77 insertions(+), 10 deletions(-)
.../cpufreq/intel_pstates/intel_pstates.cpp      | 87 +++++++++++++++++---

----------------------------------------------------------------------------

diff --git a/src/add-ons/kernel/power/cpufreq/intel_pstates/intel_pstates.cpp 
b/src/add-ons/kernel/power/cpufreq/intel_pstates/intel_pstates.cpp
index 9d589bbec5..49e33121c5 100644
--- a/src/add-ons/kernel/power/cpufreq/intel_pstates/intel_pstates.cpp
+++ b/src/add-ons/kernel/power/cpufreq/intel_pstates/intel_pstates.cpp
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2020, Jérôme Duval, jerome.duval@xxxxxxxxx.
  * Copyright 2013, Haiku, Inc. All Rights Reserved.
  * Distributed under the terms of the MIT License.
  *
@@ -24,6 +25,13 @@ const int kMinimalInterval = 50000;
 static uint16 sMinPState;
 static uint16 sMaxPState;
 static uint16 sBoostPState;
+static bool sHWPActive;
+static bool sHWPEPP;
+static uint8 sHWPLowest;
+static uint8 sHWPGuaranteed;
+static uint8 sHWPEfficient;
+static uint8 sHWPHighest;
+static bool sHWPPackage;
 
 static bool sAvoidBoost;
 
@@ -34,6 +42,9 @@ struct CPUEntry {
        uint16          fCurrentPState;
 
        bigtime_t       fLastUpdate;
+
+       uint64          fPrevAperf;
+       uint64          fPrevMperf;
 } CACHE_LINE_ALIGN;
 static CPUEntry* sCPUEntries;
 
@@ -46,10 +57,15 @@ CPUEntry::CPUEntry()
 }
 
 
+static void set_normal_pstate(void* /* dummy */, int cpu);
+
+
 static void
 pstates_set_scheduler_mode(scheduler_mode mode)
 {
        sAvoidBoost = mode == SCHEDULER_MODE_POWER_SAVING;
+       if (sHWPActive)
+               call_all_cpus(set_normal_pstate, NULL);
 }
 
 
@@ -61,16 +77,16 @@ measure_pstate(CPUEntry* entry)
        uint64 mperf = x86_read_msr(IA32_MSR_MPERF);
        uint64 aperf = x86_read_msr(IA32_MSR_APERF);
 
-       x86_write_msr(IA32_MSR_MPERF, 0);
-       x86_write_msr(IA32_MSR_APERF, 0);
-
        locker.Unlock();
 
-       if (mperf == 0)
+       if (mperf == 0 || mperf == entry->fPrevMperf)
                return sMinPState;
 
-       int oldPState = sMaxPState * aperf / mperf;
+       int oldPState = sMaxPState * (aperf - entry->fPrevAperf)
+               / (mperf - entry->fPrevMperf);
        oldPState = min_c(max_c(oldPState, sMinPState), sBoostPState);
+       entry->fPrevAperf = aperf;
+       entry->fPrevMperf = mperf;
 
        return oldPState;
 }
@@ -96,6 +112,9 @@ pstates_increase_performance(int delta)
 {
        CPUEntry* entry = &sCPUEntries[smp_get_current_cpu()];
 
+       if (sHWPActive)
+               return B_NOT_SUPPORTED;
+
        if (system_time() - entry->fLastUpdate < kMinimalInterval)
                return B_OK;
 
@@ -115,6 +134,9 @@ pstates_decrease_performance(int delta)
 {
        CPUEntry* entry = &sCPUEntries[smp_get_current_cpu()];
 
+       if (sHWPActive)
+               return B_NOT_SUPPORTED;
+
        if (system_time() - entry->fLastUpdate < kMinimalInterval)
                return B_OK;
 
@@ -161,8 +183,46 @@ is_cpu_model_supported(cpu_ent* cpu)
 static void
 set_normal_pstate(void* /* dummy */, int cpu)
 {
-       measure_pstate(&sCPUEntries[cpu]);
-       set_pstate(sMaxPState);
+       if (sHWPActive) {
+               if (x86_check_feature(IA32_FEATURE_HWP_NOTIFY, FEATURE_6_EAX))
+                       x86_write_msr(IA32_MSR_HWP_INTERRUPT, 0);
+               x86_write_msr(IA32_MSR_PM_ENABLE, 1);
+               uint64 hwpRequest = x86_read_msr(IA32_MSR_HWP_REQUEST);
+               uint64 caps = x86_read_msr(IA32_MSR_HWP_CAPABILITIES);
+               sHWPLowest = IA32_HWP_CAPS_LOWEST_PERFORMANCE(caps);
+               sHWPEfficient = IA32_HWP_CAPS_EFFICIENT_PERFORMANCE(caps);
+               sHWPGuaranteed = IA32_HWP_CAPS_GUARANTEED_PERFORMANCE(caps);
+               sHWPHighest = IA32_HWP_CAPS_HIGHEST_PERFORMANCE(caps);
+
+               hwpRequest &= ~IA32_HWP_REQUEST_DESIRED_PERFORMANCE;
+               hwpRequest &= ~IA32_HWP_REQUEST_ACTIVITY_WINDOW;
+
+               hwpRequest &= ~IA32_HWP_REQUEST_MINIMUM_PERFORMANCE;
+               hwpRequest |= sHWPLowest;
+
+               hwpRequest &= ~IA32_HWP_REQUEST_MAXIMUM_PERFORMANCE;
+               hwpRequest |= sHWPHighest << 8;
+
+               if (x86_check_feature(IA32_FEATURE_HWP_EPP, FEATURE_6_EAX)) {
+                       hwpRequest &= 
~IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE;
+                       hwpRequest |= (sAvoidBoost ? 0x80ULL : 0x0ULL) << 24;
+               } else if (x86_check_feature(IA32_FEATURE_EPB, FEATURE_6_ECX)) {
+                       uint64 perfBias = 
x86_read_msr(IA32_MSR_ENERGY_PERF_BIAS);
+                       perfBias &= ~(0xfULL << 0);
+                       perfBias |= (sAvoidBoost ? 0xfULL : 0x0ULL) << 0;
+                       x86_write_msr(IA32_MSR_ENERGY_PERF_BIAS, perfBias);
+               }
+
+               if (sHWPPackage) {
+                       x86_write_msr(IA32_MSR_HWP_REQUEST, hwpRequest
+                               | IA32_HWP_REQUEST_PACKAGE_CONTROL);
+                       x86_write_msr(IA32_MSR_HWP_REQUEST_PKG, hwpRequest);
+               } else
+                       x86_write_msr(IA32_MSR_HWP_REQUEST, hwpRequest);
+       } else {
+               measure_pstate(&sCPUEntries[cpu]);
+               set_pstate(sMaxPState);
+       }
 }
 
 
@@ -181,13 +241,20 @@ init_pstates()
                        return B_ERROR;
        }
 
-       sMinPState = (x86_read_msr(IA32_MSR_PLATFORM_INFO) >> 40) & 0xff;
-       sMaxPState = (x86_read_msr(IA32_MSR_PLATFORM_INFO) >> 8) & 0xff;
+       uint64 platformInfo = x86_read_msr(IA32_MSR_PLATFORM_INFO);
+       sHWPEPP = x86_check_feature(IA32_FEATURE_HWP_EPP, FEATURE_6_EAX);
+       sHWPActive = (x86_check_feature(IA32_FEATURE_HWP, FEATURE_6_EAX)
+               && sHWPEPP);
+       sMinPState = (platformInfo >> 40) & 0xff;
+       sMaxPState = (platformInfo >> 8) & 0xff;
        sBoostPState
                = max_c(x86_read_msr(IA32_MSR_TURBO_RATIO_LIMIT) & 0xff, 
sMaxPState);
+       /* x86_check_feature(IA32_FEATURE_HWP_PLR, FEATURE_6_EAX)) */
+       sHWPPackage = false;
 
        dprintf("using Intel P-States: min %" B_PRIu16 ", max %" B_PRIu16
-               ", boost %" B_PRIu16 "\n", sMinPState, sMaxPState, 
sBoostPState);
+               ", boost %" B_PRIu16 "%s\n", sMinPState, sMaxPState, 
sBoostPState,
+               sHWPActive ? ", HWP active" : "");
 
        if (sMaxPState <= sMinPState || sMaxPState == 0) {
                dprintf("unexpected or invalid Intel P-States limits, 
aborting\n");


Other related posts:

  • » [haiku-commits] haiku: hrev54575 - src/add-ons/kernel/power/cpufreq/intel_pstates - Jérôme Duval