[haiku-commits] haiku: hrev51782 - src/system/kernel/arch/x86 headers/private/kernel/arch/x86 src/system/kernel/arch/x86/64 src/system/kernel/locks src/system/kernel

  • From: jerome.duval@xxxxxxxxx
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Tue, 30 Jan 2018 15:05:44 -0500 (EST)

hrev51782 adds 1 changeset to branch 'master'
old head: 43e75989a6cad0492d850e922a14e4e584ded13f
new head: 9dd4d2dd055e70a44961e68a19bb8b3b188e23ac
overview: 
http://cgit.haiku-os.org/haiku/log/?qt=range&q=9dd4d2dd055e+%5E43e75989a6ca

----------------------------------------------------------------------------

9dd4d2dd055e: kernel: support for Intel SMAP and SMEP on x86_64.
  
  SMAP will generated page faults when the kernel tries to access user pages 
unless overriden.
  If SMAP is enabled, the override instructions are written where needed in 
memory with
  binary "altcodepatches".
  Support is enabled by default, might be disabled per safemode setting.
  
  Change-Id: Ife26cd765056aeaf65b2ffa3cadd0dcf4e273a96

                                   [ Jérôme Duval <jerome.duval@xxxxxxxxx> ]

----------------------------------------------------------------------------

Revision:    hrev51782
Commit:      9dd4d2dd055e70a44961e68a19bb8b3b188e23ac
URL:         http://cgit.haiku-os.org/haiku/commit/?id=9dd4d2dd055e
Author:      Jérôme Duval <jerome.duval@xxxxxxxxx>
Date:        Wed Jan  3 18:38:49 2018 UTC

----------------------------------------------------------------------------

19 files changed, 214 insertions(+), 15 deletions(-)
.../private/kernel/arch/generic/user_memory.h    |  3 ++
.../private/kernel/arch/x86/arch_altcodepatch.h  | 21 ++++++++++
headers/private/kernel/arch/x86/arch_cpu.h       | 24 ++++++++---
headers/private/system/safemode_defs.h           |  1 +
src/system/boot/platform/bios_ia32/smp.cpp       | 11 +++++
src/system/boot/platform/efi/smp.cpp             | 10 +++++
src/system/kernel/arch/x86/64/arch.S             | 11 +++++
src/system/kernel/arch/x86/64/interrupts.S       |  7 ++++
src/system/kernel/arch/x86/64/syscalls.cpp       |  4 +-
src/system/kernel/arch/x86/64/thread.cpp         |  5 +++
src/system/kernel/arch/x86/arch_cpu.cpp          | 43 ++++++++++++++++++++
src/system/kernel/arch/x86/arch_int.cpp          | 24 +++++++++--
.../x86/paging/64bit/X86PagingMethod64Bit.cpp    |  2 +-
.../arch/x86/paging/pae/X86PagingMethodPAE.cpp   |  2 +-
src/system/kernel/debug/frame_buffer_console.cpp | 12 ++++++
src/system/kernel/elf.cpp                        |  6 +++
src/system/kernel/locks/user_mutex.cpp           | 36 ++++++++++++++--
src/system/kernel/signal.cpp                     |  4 ++
src/system/kernel/thread.cpp                     |  3 ++

----------------------------------------------------------------------------

diff --git a/headers/private/kernel/arch/generic/user_memory.h 
b/headers/private/kernel/arch/generic/user_memory.h
index 6d18fed1c3..e356408889 100644
--- a/headers/private/kernel/arch/generic/user_memory.h
+++ b/headers/private/kernel/arch/generic/user_memory.h
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2018, Jérôme Duval, jerome.duval@xxxxxxxxx.
  * Copyright 2014, Paweł Dziepak, pdziepak@xxxxxxxxxxx.
  * Distributed under the terms of the MIT License.
  */
@@ -50,7 +51,9 @@ bool user_access(Function function)
        // destructor.
        auto fail = setjmp(thread_get_current_thread()->fault_handler_state);
        if (fail == 0) {
+               set_ac();
                function();
+               clear_ac();
                return true;
        }
        return false;
diff --git a/headers/private/kernel/arch/x86/arch_altcodepatch.h 
b/headers/private/kernel/arch/x86/arch_altcodepatch.h
index f8e8961501..99f01e2784 100644
--- a/headers/private/kernel/arch/x86/arch_altcodepatch.h
+++ b/headers/private/kernel/arch/x86/arch_altcodepatch.h
@@ -20,6 +20,10 @@
 #define ASM_NOP9       .byte 0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 
0x00
 
 
+#define ALTCODEPATCH_TAG_STAC          1
+#define ALTCODEPATCH_TAG_CLAC          2
+
+
 #ifdef _ASSEMBLER
 
 #define CODEPATCH_START        990:
@@ -30,6 +34,14 @@
        .short                  tag                             ;       \
        .popsection
 
+#define ASM_STAC       CODEPATCH_START \
+                                       ASM_NOP3        ;        \
+                                       CODEPATCH_END(ALTCODEPATCH_TAG_STAC)
+
+#define ASM_CLAC       CODEPATCH_START \
+                                       ASM_NOP3        ;       \
+                                       CODEPATCH_END(ALTCODEPATCH_TAG_CLAC)
+
 #else
 
 #define _STRINGIFY(x...)       #x
@@ -43,6 +55,15 @@
        ".short                 " STRINGIFY(tag)        "       \n" \
        ".popsection"
 
+#define ASM_STAC       CODEPATCH_START \
+                                       STRINGIFY(ASM_NOP3)     "\n" \
+                                       CODEPATCH_END(ALTCODEPATCH_TAG_STAC)
+
+#define ASM_CLAC       CODEPATCH_START \
+                                       STRINGIFY(ASM_NOP3)     "\n"\
+                                       CODEPATCH_END(ALTCODEPATCH_TAG_CLAC)
+
+
 void arch_altcodepatch_replace(uint16 tag, void* newcodepatch, size_t length);
 
 
diff --git a/headers/private/kernel/arch/x86/arch_cpu.h 
b/headers/private/kernel/arch/x86/arch_cpu.h
index 9a72adfac0..a7672fdb76 100644
--- a/headers/private/kernel/arch/x86/arch_cpu.h
+++ b/headers/private/kernel/arch/x86/arch_cpu.h
@@ -17,6 +17,7 @@
 
 #include <arch_thread_types.h>
 
+#include <arch/x86/arch_altcodepatch.h>
 #include <arch/x86/descriptors.h>
 
 #ifdef __x86_64__
@@ -272,10 +273,6 @@
 #define IA32_FEATURE_AMD_EXT_IBPB      (1 << 12)       /* IBPB Support only 
(no IBRS) */
 
 
-// cr4 flags
-#define IA32_CR4_PAE                                   (1UL << 5)
-#define IA32_CR4_GLOBAL_PAGES                  (1UL << 7)
-
 // Memory type ranges
 #define IA32_MTR_UNCACHED                              0
 #define IA32_MTR_WRITE_COMBINING               1
@@ -299,7 +296,7 @@
 #define X86_EFLAGS_NESTED_TASK                                 0x00004000
 #define X86_EFLAGS_RESUME                                              
0x00010000
 #define X86_EFLAGS_V86_MODE                                            
0x00020000
-#define X86_EFLAGS_ALIGNMENT_CHECK                             0x00040000
+#define X86_EFLAGS_ALIGNMENT_CHECK                             0x00040000      
// also SMAP status
 #define X86_EFLAGS_VIRTUAL_INTERRUPT                   0x00080000
 #define X86_EFLAGS_VIRTUAL_INTERRUPT_PENDING   0x00100000
 #define X86_EFLAGS_ID                                                  
0x00200000
@@ -313,9 +310,20 @@
 #define CR0_FPU_EMULATION              (1UL << 2)
 #define CR0_MONITOR_FPU                        (1UL << 1)
 
+// cr4 flags
+#define IA32_CR4_PAE                   (1UL << 5)
+#define IA32_CR4_GLOBAL_PAGES  (1UL << 7)
 #define CR4_OS_FXSR                            (1UL << 9)
 #define CR4_OS_XMM_EXCEPTION   (1UL << 10)
+#define IA32_CR4_SMEP                  (1UL << 20)
+#define IA32_CR4_SMAP                  (1UL << 21)
 
+// page fault error codes (http://wiki.osdev.org/Page_Fault)
+#define PGFAULT_P                                              0x01    // 
Protection violation
+#define PGFAULT_W                                              0x02    // Write
+#define PGFAULT_U                                              0x04    // 
Usermode
+#define PGFAULT_RSVD                                   0x08    // Reserved bits
+#define PGFAULT_I                                              0x10    // 
Instruction fetch
 
 // iframe types
 #define IFRAME_TYPE_SYSCALL                            0x1
@@ -461,6 +469,12 @@ typedef struct arch_cpu_info {
 #define wbinvd() \
        __asm__("wbinvd")
 
+#define set_ac() \
+       __asm__ volatile (ASM_STAC : : : "memory")
+
+#define clear_ac() \
+       __asm__ volatile (ASM_CLAC : : : "memory")
+
 #define out8(value,port) \
        __asm__ ("outb %%al,%%dx" : : "a" (value), "d" (port))
 
diff --git a/headers/private/system/safemode_defs.h 
b/headers/private/system/safemode_defs.h
index ecbeeb0dd3..77afb1cf65 100644
--- a/headers/private/system/safemode_defs.h
+++ b/headers/private/system/safemode_defs.h
@@ -12,6 +12,7 @@
 #define B_SAFEMODE_DISABLE_ACPI                                "disable_acpi"
 #define B_SAFEMODE_DISABLE_APIC                                "disable_apic"
 #define B_SAFEMODE_ENABLE_X2APIC                       "enable_x2apic"
+#define B_SAFEMODE_DISABLE_SMEP_SMAP           "disable_smep_smap"
 #define B_SAFEMODE_DISABLE_APM                         "disable_apm"
 #define B_SAFEMODE_DISABLE_SMP                         "disable_smp"
 #define B_SAFEMODE_DISABLE_HYPER_THREADING     "disable_hyperthreading"
diff --git a/src/system/boot/platform/bios_ia32/smp.cpp 
b/src/system/boot/platform/bios_ia32/smp.cpp
index 9a2e77ea01..f60cbbec27 100644
--- a/src/system/boot/platform/bios_ia32/smp.cpp
+++ b/src/system/boot/platform/bios_ia32/smp.cpp
@@ -595,6 +595,17 @@ smp_add_safemode_menus(Menu *menu)
                        item->SetHelpText("Enables using X2APIC.");
 #endif
                }
+
+               if (get_current_cpuid(&info, 7, 0) == B_OK
+                               && (info.regs.ebx & (IA32_FEATURE_SMEP
+                                       | IA32_FEATURE_SMAP)) != 0) {
+                       menu->AddItem(item = new(nothrow) MenuItem(
+                               "Disable SMEP and SMAP"));
+                       item->SetType(MENU_ITEM_MARKABLE);
+                       item->SetData(B_SAFEMODE_DISABLE_SMEP_SMAP);
+                       item->SetHelpText("Disables using SMEP and SMAP.");
+               }
+
        }
 
        if (gKernelArgs.num_cpus < 2)
diff --git a/src/system/boot/platform/efi/smp.cpp 
b/src/system/boot/platform/efi/smp.cpp
index d597ea5fde..7b2e5462d9 100644
--- a/src/system/boot/platform/efi/smp.cpp
+++ b/src/system/boot/platform/efi/smp.cpp
@@ -409,6 +409,16 @@ smp_add_safemode_menus(Menu *menu)
                        item->SetHelpText("Enables using X2APIC.");
 #endif
                }
+
+               if (get_current_cpuid(&info, 7, 0) == B_OK
+                               && ((info.regs.ebx & (IA32_FEATURE_SMEP
+                                       | IA32_FEATURE_SMAP) != 0) {
+                       menu->AddItem(item = new(nothrow) MenuItem(
+                               "Disable SMEP and SMAP"));
+                       item->SetType(MENU_ITEM_MARKABLE);
+                       item->SetData(B_SAFEMODE_DISABLE_SMEP_SMAP);
+                       item->SetHelpText("Disables using SMEP and SMAP.");
+               }
        }
 
        if (gKernelArgs.num_cpus < 2)
diff --git a/src/system/kernel/arch/x86/64/arch.S 
b/src/system/kernel/arch/x86/64/arch.S
index 6fbc3d8603..d88b05bfed 100644
--- a/src/system/kernel/arch/x86/64/arch.S
+++ b/src/system/kernel/arch/x86/64/arch.S
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2018, Jérôme Duval, jerome.duval@xxxxxxxxx.
  * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxx.
  * Copyright 2003-2007, Axel Dörfler, axeld@xxxxxxxxxxxxxxxx.
  * Copyright 2012, Rene Gollent, rene@xxxxxxxxxxx.
@@ -108,3 +109,13 @@ FUNCTION(arch_debug_call_with_fault_handler):
        movq    $1, %rsi
        call    longjmp
 FUNCTION_END(arch_debug_call_with_fault_handler)
+
+
+       .section .rodata
+FUNCTION(_stac):
+       stac
+FUNCTION_END(_stac)
+
+FUNCTION(_clac):
+       clac
+FUNCTION_END(_clac)
diff --git a/src/system/kernel/arch/x86/64/interrupts.S 
b/src/system/kernel/arch/x86/64/interrupts.S
index 5d7d9ee90c..882d3a5ff9 100644
--- a/src/system/kernel/arch/x86/64/interrupts.S
+++ b/src/system/kernel/arch/x86/64/interrupts.S
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2018, Jérôme Duval, jerome.duval@xxxxxxxxx.
  * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxx.
  * Distributed under the terms of the MIT License.
  */
@@ -9,6 +10,7 @@
 #include <thread_types.h>
 
 #include <arch/x86/descriptors.h>
+#include <arch/x86/arch_altcodepatch.h>
 #include <arch/x86/arch_cpu.h>
 #include <arch/x86/arch_kernel.h>
 
@@ -143,6 +145,7 @@
 // Interrupt with no error code, pushes a 0 error code.
 #define DEFINE_ISR(nr)                                 \
        .align 16;                                                      \
+       ASM_CLAC                                                        \
        push    $0;                                                     \
        push    $nr;                                            \
        jmp             int_bottom;
@@ -150,6 +153,7 @@
 // Interrupt with an error code.
 #define DEFINE_ISR_E(nr)                               \
        .align 16;                                                      \
+       ASM_CLAC                                                        \
        push    $nr;                                            \
        jmp             int_bottom;
 
@@ -546,10 +550,13 @@ FUNCTION(x86_64_syscall_entry):
        // Set a fault handler.
        movq    $.Lbad_syscall_args, THREAD_fault_handler(%r12)
 
+       ASM_STAC
+
        // Copy them by quadwords.
        shrq    $3, %rcx
        rep
        movsq
+       ASM_CLAC
        movq    $0, THREAD_fault_handler(%r12)
 
        // Perform the call.
diff --git a/src/system/kernel/arch/x86/64/syscalls.cpp 
b/src/system/kernel/arch/x86/64/syscalls.cpp
index 174755f5a0..7bcfeda5ae 100644
--- a/src/system/kernel/arch/x86/64/syscalls.cpp
+++ b/src/system/kernel/arch/x86/64/syscalls.cpp
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2018, Jérôme Duval, jerome.duval@xxxxxxxxx.
  * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxx.
  * Distributed under the terms of the MIT License.
  */
@@ -25,7 +26,8 @@ init_syscall_registers(void* dummy, int cpuNum)
 
        // Flags to clear upon entry. Want interrupts disabled and the direction
        // flag cleared.
-       x86_write_msr(IA32_MSR_FMASK, X86_EFLAGS_INTERRUPT | 
X86_EFLAGS_DIRECTION);
+       x86_write_msr(IA32_MSR_FMASK, X86_EFLAGS_INTERRUPT | 
X86_EFLAGS_DIRECTION
+               | X86_EFLAGS_ALIGNMENT_CHECK);
 
        // Entry point address.
        x86_write_msr(IA32_MSR_LSTAR, (addr_t)x86_64_syscall_entry);
diff --git a/src/system/kernel/arch/x86/64/thread.cpp 
b/src/system/kernel/arch/x86/64/thread.cpp
index 1aab1a43a1..a29697a03f 100644
--- a/src/system/kernel/arch/x86/64/thread.cpp
+++ b/src/system/kernel/arch/x86/64/thread.cpp
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2018, Jérôme Duval, jerome.duval@xxxxxxxxx.
  * Copyright 2012, Alex Smith, alex@xxxxxxxxxxxxxxxx.
  * Copyright 2002-2008, Axel Dörfler, axeld@xxxxxxxxxxxxxxxx.
  * Distributed under the terms of the MIT License.
@@ -223,8 +224,10 @@ arch_thread_enter_userspace(Thread* thread, addr_t entry, 
void* args1,
        // entry function returns to the top of the stack to act as the return
        // address. The stub is inside commpage.
        addr_t commPageAddress = (addr_t)thread->team->commpage_address;
+       set_ac();
        codeAddr = ((addr_t*)commPageAddress)[COMMPAGE_ENTRY_X86_THREAD_EXIT]
                + commPageAddress;
+       clear_ac();
        if (user_memcpy((void*)stackTop, (const void*)&codeAddr, 
sizeof(codeAddr))
                        != B_OK)
                return B_BAD_ADDRESS;
@@ -343,8 +346,10 @@ arch_setup_signal_frame(Thread* thread, struct sigaction* 
action,
        // stack. First argument points to the frame data.
        addr_t* commPageAddress = (addr_t*)thread->team->commpage_address;
        frame->user_sp = (addr_t)userStack;
+       set_ac();
        frame->ip = commPageAddress[COMMPAGE_ENTRY_X86_SIGNAL_HANDLER]
                + (addr_t)commPageAddress;
+       clear_ac();
        frame->di = (addr_t)userSignalFrameData;
 
        return B_OK;
diff --git a/src/system/kernel/arch/x86/arch_cpu.cpp 
b/src/system/kernel/arch/x86/arch_cpu.cpp
index 9afae214bc..85d2108f64 100644
--- a/src/system/kernel/arch/x86/arch_cpu.cpp
+++ b/src/system/kernel/arch/x86/arch_cpu.cpp
@@ -24,6 +24,7 @@
 #include <commpage.h>
 #include <debug.h>
 #include <elf.h>
+#include <safemode.h>
 #include <smp.h>
 #include <util/BitUtils.h>
 #include <vm/vm.h>
@@ -79,6 +80,11 @@ struct set_mtrrs_parameter {
 };
 
 
+#ifdef __x86_64__
+extern addr_t _stac;
+extern addr_t _clac;
+#endif
+
 extern "C" void x86_reboot(void);
        // from arch.S
 
@@ -1159,6 +1165,20 @@ arch_cpu_init(kernel_args* args)
 }
 
 
+static void
+enable_smap(void* dummy, int cpu)
+{
+       x86_write_cr4(x86_read_cr4() | IA32_CR4_SMAP);
+}
+
+
+static void
+enable_smep(void* dummy, int cpu)
+{
+       x86_write_cr4(x86_read_cr4() | IA32_CR4_SMEP);
+}
+
+
 status_t
 arch_cpu_init_post_vm(kernel_args* args)
 {
@@ -1188,6 +1208,29 @@ arch_cpu_init_post_vm(kernel_args* args)
                x86_init_fpu();
        // else fpu gets set up in smp code
 
+#ifdef __x86_64__
+       // if available enable SMEP (Supervisor Memory Execution Protection)
+       if (x86_check_feature(IA32_FEATURE_SMEP, FEATURE_7_EBX)) {
+               if (!get_safemode_boolean(B_SAFEMODE_DISABLE_SMEP_SMAP, false)) 
{
+                       dprintf("enable SMEP\n");
+                       call_all_cpus_sync(&enable_smep, NULL);
+               } else
+                       dprintf("SMEP disabled per safemode setting\n");
+       }
+
+       // if available enable SMAP (Supervisor Memory Access Protection)
+       if (x86_check_feature(IA32_FEATURE_SMAP, FEATURE_7_EBX)) {
+               if (!get_safemode_boolean(B_SAFEMODE_DISABLE_SMEP_SMAP, false)) 
{
+                       dprintf("enable SMAP\n");
+                       call_all_cpus_sync(&enable_smap, NULL);
+
+                       arch_altcodepatch_replace(ALTCODEPATCH_TAG_STAC, 
&_stac, 3);
+                       arch_altcodepatch_replace(ALTCODEPATCH_TAG_CLAC, 
&_clac, 3);
+               } else
+                       dprintf("SMAP disabled per safemode setting\n");
+       }
+#endif
+
        return B_OK;
 }
 
diff --git a/src/system/kernel/arch/x86/arch_int.cpp 
b/src/system/kernel/arch/x86/arch_int.cpp
index 090c3eb861..005a7e8cff 100644
--- a/src/system/kernel/arch/x86/arch_int.cpp
+++ b/src/system/kernel/arch/x86/arch_int.cpp
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2018, Jérôme Duval, jerome.duval@xxxxxxxxx.
  * Copyright 2008-2011, Michael Lotz, mmlr@xxxxxxxx.
  * Copyright 2010, Clemens Zeidler, haiku@xxxxxxxxxxxxxxxxxx.
  * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@xxxxxx.
@@ -295,7 +296,7 @@ x86_page_fault_exception(struct iframe* frame)
                panic("page fault in debugger without fault handler! Touching "
                        "address %p from ip %p\n", (void*)cr2, 
(void*)frame->ip);
                return;
-       } else if ((frame->flags & 0x200) == 0) {
+       } else if ((frame->flags & X86_EFLAGS_INTERRUPT) == 0) {
                // interrupts disabled
 
                // If a page fault handler is installed, we're allowed to be 
here.
@@ -326,14 +327,29 @@ x86_page_fault_exception(struct iframe* frame)
                panic("page fault not allowed at this place. Touching address "
                        "%p from ip %p\n", (void*)cr2, (void*)frame->ip);
                return;
+       } else if ((frame->error_code & PGFAULT_U) == 0
+               && (frame->error_code & PGFAULT_I) == PGFAULT_I
+               && (x86_read_cr4() & IA32_CR4_SMEP) != 0) {
+               // check that:  1. come not from userland,
+               // 2. is an instruction fetch, 3. smep is enabled
+               panic("SMEP violation user-mapped address %p touched from 
kernel %p\n",
+                        (void*)cr2, (void*)frame->ip);
+       } else if ((frame->flags & X86_EFLAGS_ALIGNMENT_CHECK) == 0
+               && (frame->error_code & PGFAULT_U) == 0
+               && (frame->error_code & PGFAULT_P) == PGFAULT_P
+               && (x86_read_cr4() & IA32_CR4_SMAP) != 0) {
+               // check that:  1. AC flag is not set, 2. come not from 
userland,
+               // 3. is a page-protection violation, 4. smap is enabled
+               panic("SMAP violation user-mapped address %p touched from 
kernel %p\n",
+                        (void*)cr2, (void*)frame->ip);
        }
 
        enable_interrupts();
 
        vm_page_fault(cr2, frame->ip,
-               (frame->error_code & 0x2)!= 0,          // write access
-               (frame->error_code & 0x10) != 0,        // instruction fetch
-               (frame->error_code & 0x4) != 0,         // userland
+               (frame->error_code & PGFAULT_W)!= 0,            // write access
+               (frame->error_code & PGFAULT_I) != 0,           // instruction 
fetch
+               (frame->error_code & PGFAULT_U) != 0,           // userland
                &newip);
        if (newip != 0) {
                // the page fault handler wants us to modify the iframe to set 
the
diff --git a/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.cpp 
b/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.cpp
index 2a6ef7986d..cf43091abd 100644
--- a/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.cpp
+++ b/src/system/kernel/arch/x86/paging/64bit/X86PagingMethod64Bit.cpp
@@ -59,7 +59,7 @@ X86PagingMethod64Bit::Init(kernel_args* args,
        fKernelPhysicalPML4 = args->arch_args.phys_pgdir;
        fKernelVirtualPML4 = (uint64*)(addr_t)args->arch_args.vir_pgdir;
 
-       // if availalbe enable NX-bit (No eXecute)
+       // if available enable NX-bit (No eXecute)
        if (x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD))
                call_all_cpus_sync(&_EnableExecutionDisable, NULL);
 
diff --git a/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp 
b/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp
index 73fafd55ad..50c97eb63e 100644
--- a/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp
+++ b/src/system/kernel/arch/x86/paging/pae/X86PagingMethodPAE.cpp
@@ -155,7 +155,7 @@ struct X86PagingMethodPAE::ToPAESwitcher {
                // enable PAE on all CPUs
                call_all_cpus_sync(&_EnablePAE, (void*)(addr_t)physicalPDPT);
 
-               // if availalbe enable NX-bit (No eXecute)
+               // if available enable NX-bit (No eXecute)
                if (x86_check_feature(IA32_FEATURE_AMD_EXT_NX, FEATURE_EXT_AMD))
                        call_all_cpus_sync(&_EnableExecutionDisable, NULL);
 
diff --git a/src/system/kernel/debug/frame_buffer_console.cpp 
b/src/system/kernel/debug/frame_buffer_console.cpp
index f801e14d1c..0b73ad3c8d 100644
--- a/src/system/kernel/debug/frame_buffer_console.cpp
+++ b/src/system/kernel/debug/frame_buffer_console.cpp
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2018, Jérôme Duval, jerome.duval@xxxxxxxxx.
  * Copyright 2005-2009, Axel Dörfler, axeld@xxxxxxxxxxxxxxxx.
  * Distributed under the terms of the MIT License.
  */
@@ -133,6 +134,7 @@ render_glyph(int32 x, int32 y, uint8 glyph, uint8 attr)
                uint8* color = get_palette_entry(foreground_color(attr));
                uint8* backgroundColor = 
get_palette_entry(background_color(attr));
 
+               set_ac();
                for (y = 0; y < CHAR_HEIGHT; y++) {
                        uint8 bits = FONT[CHAR_HEIGHT * glyph + y];
                        for (x = 0; x < CHAR_WIDTH; x++) {
@@ -149,6 +151,8 @@ render_glyph(int32 x, int32 y, uint8 glyph, uint8 attr)
 
                        base += sConsole.bytes_per_row;
                }
+               clear_ac();
+
        } else {
                // VGA mode will be treated as monochrome
                // (ie. only the first plane will be used)
@@ -157,6 +161,7 @@ render_glyph(int32 x, int32 y, uint8 glyph, uint8 attr)
                        + sConsole.bytes_per_row * y * CHAR_HEIGHT + x * 
CHAR_WIDTH / 8);
                uint8 baseOffset =  (x * CHAR_WIDTH) & 0x7;
 
+               set_ac();
                for (y = 0; y < CHAR_HEIGHT; y++) {
                        uint8 bits = FONT[CHAR_HEIGHT * glyph + y];
                        uint8 offset = baseOffset;
@@ -179,6 +184,7 @@ render_glyph(int32 x, int32 y, uint8 glyph, uint8 attr)
 
                        base += sConsole.bytes_per_row;
                }
+               clear_ac();
        }
 }
 
@@ -200,12 +206,14 @@ draw_cursor(int32 x, int32 y)
                endY /= 8;
        }
 
+       set_ac();
        for (; y < endY; y++) {
                for (int32 x2 = x; x2 < endX; x2++)
                        base[x2] = ~base[x2];
 
                base += sConsole.bytes_per_row;
        }
+       clear_ac();
 }
 
 
@@ -290,12 +298,14 @@ console_blit(int32 srcx, int32 srcy, int32 width, int32 
height, int32 destx,
                destx = destx * CHAR_WIDTH / 8;
        }
 
+       set_ac();
        for (int32 y = 0; y < height; y++) {
                memmove((void*)(sConsole.frame_buffer + (desty + y)
                                * sConsole.bytes_per_row + destx),
                        (void*)(sConsole.frame_buffer + (srcy + y) * 
sConsole.bytes_per_row
                                + srcx), width);
        }
+       clear_ac();
 }
 
 
@@ -305,6 +315,7 @@ console_clear(uint8 attr)
        if (!frame_buffer_console_available())
                return;
 
+       set_ac();
        switch (sConsole.bytes_per_pixel) {
                case 1:
                        if (sConsole.depth >= 8) {
@@ -334,6 +345,7 @@ console_clear(uint8 attr)
                }
        }
 
+       clear_ac();
        sConsole.cursor_x = -1;
        sConsole.cursor_y = -1;
 }
diff --git a/src/system/kernel/elf.cpp b/src/system/kernel/elf.cpp
index 77300068fe..8e5820a53a 100644
--- a/src/system/kernel/elf.cpp
+++ b/src/system/kernel/elf.cpp
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2018, Jérôme Duval, jerome.duval@xxxxxxxxx.
  * Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@xxxxxx.
  * Copyright 2002-2009, Axel Dörfler, axeld@xxxxxxxxxxxxxxxx.
  * Distributed under the terms of the MIT License.
@@ -1963,7 +1964,9 @@ elf_load_user_image(const char *path, Team *team, int 
flags, addr_t *entry)
                        size_t amount = fileUpperBound
                                - (programHeaders[i].p_vaddr % B_PAGE_SIZE)
                                - (programHeaders[i].p_filesz);
+                       set_ac();
                        memset((void *)start, 0, amount);
+                       clear_ac();
 
                        // Check if we need extra storage for the bss - we have 
to do this if
                        // the above region doesn't already comprise the memory 
size, too.
@@ -2025,6 +2028,7 @@ elf_load_user_image(const char *path, Team *team, int 
flags, addr_t *entry)
        // modify the dynamic ptr by the delta of the regions
        image->dynamic_section += image->text_region.delta;
 
+       set_ac();
        status = elf_parse_dynamic_section(image);
        if (status != B_OK)
                goto error2;
@@ -2033,6 +2037,8 @@ elf_load_user_image(const char *path, Team *team, int 
flags, addr_t *entry)
        if (status != B_OK)
                goto error2;
 
+       clear_ac();
+
        // set correct area protection
        for (i = 0; i < elfHeader.e_phnum; i++) {
                if (mappedAreas[i] == -1)
diff --git a/src/system/kernel/locks/user_mutex.cpp 
b/src/system/kernel/locks/user_mutex.cpp
index 89414ff379..74489a2051 100644
--- a/src/system/kernel/locks/user_mutex.cpp
+++ b/src/system/kernel/locks/user_mutex.cpp
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2018, Jérôme Duval, jerome.duval@xxxxxxxxx.
  * Copyright 2015, Hamish Morrison, hamishm53@xxxxxxxxx.
  * Copyright 2010, Ingo Weinhold, ingo_weinhold@xxxxxx.
  * Distributed under the terms of the MIT License.
@@ -139,13 +140,17 @@ user_mutex_lock_locked(int32* mutex, addr_t 
physicalAddress,
        const char* name, uint32 flags, bigtime_t timeout, MutexLocker& locker)
 {
        // mark the mutex locked + waiting
+       set_ac();
        int32 oldValue = atomic_or(mutex,
                B_USER_MUTEX_LOCKED | B_USER_MUTEX_WAITING);
+       clear_ac();
 
        if ((oldValue & (B_USER_MUTEX_LOCKED | B_USER_MUTEX_WAITING)) == 0
                        || (oldValue & B_USER_MUTEX_DISABLED) != 0) {
                // clear the waiting flag and be done
+               set_ac();
                atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING);
+               clear_ac();
                return B_OK;
        }
 
@@ -153,8 +158,11 @@ user_mutex_lock_locked(int32* mutex, addr_t 
physicalAddress,
        status_t error = user_mutex_wait_locked(mutex, physicalAddress, name,
                flags, timeout, locker, lastWaiter);
 
-       if (lastWaiter)
+       if (lastWaiter) {
+               set_ac();
                atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING);
+               clear_ac();
+       }
 
        return error;
 }
@@ -166,14 +174,18 @@ user_mutex_unlock_locked(int32* mutex, addr_t 
physicalAddress, uint32 flags)
        UserMutexEntry* entry = sUserMutexTable.Lookup(physicalAddress);
        if (entry == NULL) {
                // no one is waiting -- clear locked flag
+               set_ac();
                atomic_and(mutex, ~(int32)B_USER_MUTEX_LOCKED);
+               clear_ac();
                return;
        }
 
        // Someone is waiting -- set the locked flag. It might still be set,
        // but when using userland atomic operations, the caller will usually
        // have cleared it already.
+       set_ac();
        int32 oldValue = atomic_or(mutex, B_USER_MUTEX_LOCKED);
+       clear_ac();
 
        // unblock the first thread
        entry->locked = true;
@@ -189,11 +201,16 @@ user_mutex_unlock_locked(int32* mutex, addr_t 
physicalAddress, uint32 flags)
 
                // dequeue the first thread and mark the mutex uncontended
                sUserMutexTable.Remove(entry);
+               set_ac();
                atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING);
+               clear_ac();
        } else {
                bool otherWaiters = remove_user_mutex_entry(entry);
-               if (!otherWaiters)
+               if (!otherWaiters) {
+                       set_ac();
                        atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING);
+                       clear_ac();
+               }
        }
 }
 
@@ -204,9 +221,13 @@ user_mutex_sem_acquire_locked(int32* sem, addr_t 
physicalAddress,
 {
        // The semaphore may have been released in the meantime, and we also
        // need to mark it as contended if it isn't already.
+       set_ac();
        int32 oldValue = atomic_get(sem);
+       clear_ac();
        while (oldValue > -1) {
+               set_ac();
                int32 value = atomic_test_and_set(sem, oldValue - 1, oldValue);
+               clear_ac();
                if (value == oldValue && value > 0)
                        return B_OK;
                oldValue = value;
@@ -216,8 +237,11 @@ user_mutex_sem_acquire_locked(int32* sem, addr_t 
physicalAddress,
        status_t error = user_mutex_wait_locked(sem, physicalAddress, name, 
flags,
                timeout, locker, lastWaiter);
 
-       if (lastWaiter)
+       if (lastWaiter) {
+               set_ac();
                atomic_test_and_set(sem, 0, -1);
+               clear_ac();
+       }
 
        return error;
 }
@@ -229,10 +253,14 @@ user_mutex_sem_release_locked(int32* sem, addr_t 
physicalAddress)
        UserMutexEntry* entry = sUserMutexTable.Lookup(physicalAddress);
        if (!entry) {
                // no waiters - mark as uncontended and release
+               set_ac();
                int32 oldValue = atomic_get(sem);
+               clear_ac();
                while (true) {
                        int32 inc = oldValue < 0 ? 2 : 1;
+                       set_ac();
                        int32 value = atomic_test_and_set(sem, oldValue + inc, 
oldValue);
+                       clear_ac();
                        if (value == oldValue)
                                return;
                        oldValue = value;
@@ -246,7 +274,9 @@ user_mutex_sem_release_locked(int32* sem, addr_t 
physicalAddress)
 
        if (!otherWaiters) {
                // mark the semaphore uncontended
+               set_ac();
                atomic_test_and_set(sem, 0, -1);
+               clear_ac();
        }
 }
 
diff --git a/src/system/kernel/signal.cpp b/src/system/kernel/signal.cpp
index 17321813df..cea1c3168e 100644
--- a/src/system/kernel/signal.cpp
+++ b/src/system/kernel/signal.cpp
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2018, Jérôme Duval, jerome.duval@xxxxxxxxx.
  * Copyright 2014, Paweł Dziepak, pdziepak@xxxxxxxxxxx.
  * Copyright 2011-2016, Ingo Weinhold, ingo_weinhold@xxxxxx.
  * Copyright 2002-2009, Axel Dörfler, axeld@xxxxxxxxxxxxxxxx.
@@ -928,14 +929,17 @@ handle_signals(Thread* thread)
        sigset_t nonBlockedMask = ~thread->sig_block_mask;
        sigset_t signalMask = thread->AllPendingSignals() & nonBlockedMask;
 
+       set_ac();
        if (thread->user_thread->defer_signals > 0
                && (signalMask & NON_DEFERRABLE_SIGNALS) == 0
                && thread->sigsuspend_original_unblocked_mask == 0) {
                thread->user_thread->pending_signals = signalMask;
+               clear_ac();
                return;
        }
 
        thread->user_thread->pending_signals = 0;
+       clear_ac();
 
        // determine syscall restart behavior
        uint32 restartFlags = atomic_and(&thread->flags,
diff --git a/src/system/kernel/thread.cpp b/src/system/kernel/thread.cpp
index 12203f06d1..4e6641de0d 100644
--- a/src/system/kernel/thread.cpp
+++ b/src/system/kernel/thread.cpp
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2018, Jérôme Duval, jerome.duval@xxxxxxxxx.
  * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@xxxxxx.
  * Copyright 2002-2009, Axel Dörfler, axeld@xxxxxxxxxxxxxxxx.
  * Distributed under the terms of the MIT License.
@@ -648,12 +649,14 @@ enter_userspace(Thread* thread, UserThreadEntryArguments* 
args)
 
        // init the thread's user_thread
        user_thread* userThread = thread->user_thread;
+       set_ac();
        userThread->pthread = args->pthread;
        userThread->flags = 0;
        userThread->wait_status = B_OK;
        userThread->defer_signals
                = (args->flags & THREAD_CREATION_FLAG_DEFER_SIGNALS) != 0 ? 1 : 
0;
        userThread->pending_signals = 0;
+       clear_ac();
 
        if (args->forkArgs != NULL) {
                // This is a fork()ed thread. Copy the fork args onto the stack 
and


Other related posts:

  • » [haiku-commits] haiku: hrev51782 - src/system/kernel/arch/x86 headers/private/kernel/arch/x86 src/system/kernel/arch/x86/64 src/system/kernel/locks src/system/kernel - jerome . duval