From Alex von Gluck IV <kallisti5@xxxxxxxxxxx>:
Alex von Gluck IV has uploaded this change for review. (
https://review.haiku-os.org/c/haiku/+/2256 ;)
Change subject: EFI: Make our haiku_loader architecture agnostic
......................................................................
EFI: Make our haiku_loader architecture agnostic
* This is the bulk of the work. Anything else should be
minor cleanups and tweaking.
* riscv64 isn't a viable EFI platform yet.. just acting
as a stand-in to test a non-x86 EFI haiku_loader
Change-Id: Ib03de81e2b562e693987b86d7b4318209fb1c792
---
M build/jam/ArchitectureRules
M headers/private/kernel/arch/riscv64/arch_kernel_args.h
D headers/private/kernel/boot/platform/efi/arch_mmu.h
A headers/private/kernel/boot/platform/efi/arch_start.h
M src/add-ons/kernel/drivers/timer/Jamfile
M src/kits/debug/arch/riscv64/arch_debug_support.cpp
M src/system/boot/platform/efi/Jamfile
A src/system/boot/platform/efi/arch/riscv64/Jamfile
A src/system/boot/platform/efi/arch/riscv64/arch_timer.cpp
A src/system/boot/platform/efi/arch/riscv64/arch_timer.h
A src/system/boot/platform/efi/arch/riscv64/crt0-efi-riscv64.S
A src/system/boot/platform/efi/arch/riscv64/relocation_func.cpp
M src/system/boot/platform/efi/arch/x86_64/Jamfile
M src/system/boot/platform/efi/arch/x86_64/arch_mmu.cpp
A src/system/boot/platform/efi/arch/x86_64/arch_start.cpp
M src/system/boot/platform/efi/arch/x86_64/entry.S
R src/system/boot/platform/efi/arch/x86_64/smp_trampoline.S
R src/system/boot/platform/efi/arch/x86_64/support.S
M src/system/boot/platform/efi/mmu.cpp
M src/system/boot/platform/efi/mmu.h
M src/system/boot/platform/efi/serial.cpp
M src/system/boot/platform/efi/start.cpp
A src/system/ldscripts/riscv64/boot_loader_efi.ld
23 files changed, 796 insertions(+), 304 deletions(-)
git pull ssh://git.haiku-os.org:22/haiku refs/changes/56/2256/1
diff --git a/build/jam/ArchitectureRules b/build/jam/ArchitectureRules
index e54d9b1..3208f2e 100644
--- a/build/jam/ArchitectureRules
+++ b/build/jam/ArchitectureRules
@@ -302,8 +302,8 @@
}
case riscv64 :
- HAIKU_KERNEL_PLATFORM ?= u-boot ;
- HAIKU_BOOT_TARGETS += u-boot ;
+ HAIKU_KERNEL_PLATFORM ?= efi ;
+ HAIKU_BOOT_TARGETS += efi ;
HAIKU_BOOT_SDIMAGE_SIZE ?= 128 ;
# SOC's like allwinner need an offset to skip the
hardcoded initial loader
diff --git a/headers/private/kernel/arch/riscv64/arch_kernel_args.h
b/headers/private/kernel/arch/riscv64/arch_kernel_args.h
index 0286024..9af2c6d 100644
--- a/headers/private/kernel/arch/riscv64/arch_kernel_args.h
+++ b/headers/private/kernel/arch/riscv64/arch_kernel_args.h
@@ -9,6 +9,10 @@
# error This file is included from <boot/kernel_args.h> only
#endif
+
+#include <util/FixedWidthPointer.h>
+
+
#define _PACKED __attribute__((packed))
#define MAX_VIRTUAL_RANGES_TO_KEEP 32
@@ -21,9 +25,14 @@
uint64 vir_pgdir;
uint64 next_pagetable;
+ uint64 virtual_end;
+
// The virtual ranges we want to keep in the kernel.
uint32 num_virtual_ranges_to_keep;
addr_range virtual_ranges_to_keep[MAX_VIRTUAL_RANGES_TO_KEEP];
-} arch_kernel_args;
+
+ // needed for UEFI, otherwise kernel acpi support can't find ACPI root
+ FixedWidthPointer<void> acpi_root;
+} _PACKED arch_kernel_args;
#endif /* KERNEL_ARCH_RISCV64_KERNEL_ARGS_H */
diff --git a/headers/private/kernel/boot/platform/efi/arch_mmu.h
b/headers/private/kernel/boot/platform/efi/arch_mmu.h
deleted file mode 100644
index b525d08..0000000
--- a/headers/private/kernel/boot/platform/efi/arch_mmu.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * Copyright 2013-2019 Haiku, Inc. All rights reserved.
- * Distributed under the terms of the MIT License.
- */
-#ifndef KERNEL_BOOT_PLATFORM_EFI_ARCH_MMU_H
-#define KERNEL_BOOT_PLATFORM_EFI_ARCH_MMU_H
-
-
-void arch_mmu_init();
-
-
-#endif /* KERNEL_BOOT_PLATFORM_EFI_ARCH_MMU_H */
diff --git a/headers/private/kernel/boot/platform/efi/arch_start.h
b/headers/private/kernel/boot/platform/efi/arch_start.h
new file mode 100644
index 0000000..1c45176
--- /dev/null
+++ b/headers/private/kernel/boot/platform/efi/arch_start.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright 2019-2020, Haiku, Inc. All rights reserved.
+ * Released under the terms of the MIT License.
+ */
+#ifndef __ARCH_START_H
+#define __ARCH_START_H
+
+
+void arch_start_kernel(addr_t kernelEntry);
+
+
+#endif /* __ARCH_START_H */
diff --git a/src/add-ons/kernel/drivers/timer/Jamfile
b/src/add-ons/kernel/drivers/timer/Jamfile
index e7d6e70..3ebed37 100644
--- a/src/add-ons/kernel/drivers/timer/Jamfile
+++ b/src/add-ons/kernel/drivers/timer/Jamfile
@@ -1,6 +1,7 @@
SubDir HAIKU_TOP src add-ons kernel drivers timer ;
UsePrivateHeaders drivers ;
+SubDirHdrs $(HAIKU_TOP) headers private kernel platform
$(TARGET_KERNEL_PLATFORM) ;
KernelAddon <driver>hpet :
hpet.cpp ;
diff --git a/src/kits/debug/arch/riscv64/arch_debug_support.cpp
b/src/kits/debug/arch/riscv64/arch_debug_support.cpp
index 83f6b35..136b0e4 100644
--- a/src/kits/debug/arch/riscv64/arch_debug_support.cpp
+++ b/src/kits/debug/arch/riscv64/arch_debug_support.cpp
@@ -20,6 +20,8 @@
arch_debug_get_instruction_pointer(debug_context *context, thread_id thread,
void **ip, void **stackFrameAddress)
{
+ #warning TODO RISCV64 get instruction pointer
+ #if 0
// get the CPU state
debug_cpu_state cpuState;
status_t error = debug_get_cpu_state(context, thread, NULL, &cpuState);
@@ -28,6 +30,7 @@
*ip = (void*)cpuState.rip;
*stackFrameAddress = (void*)cpuState.rbp;
+ #endif
return B_OK;
}
@@ -37,6 +40,8 @@
arch_debug_get_stack_frame(debug_context *context, void *stackFrameAddress,
debug_stack_frame_info *stackFrameInfo)
{
+ #warning TODO RISCV64 get stack frame
+ #if 0
stack_frame stackFrame;
ssize_t bytesRead = debug_read_memory(context, stackFrameAddress,
&stackFrame, sizeof(stackFrame));
@@ -48,5 +53,7 @@
stackFrameInfo->frame = stackFrameAddress;
stackFrameInfo->parent_frame = stackFrame.previous;
stackFrameInfo->return_address = stackFrame.return_address;
+ #endif
+
return B_OK;
}
diff --git a/src/system/boot/platform/efi/Jamfile
b/src/system/boot/platform/efi/Jamfile
index 18a8cea..860943b 100644
--- a/src/system/boot/platform/efi/Jamfile
+++ b/src/system/boot/platform/efi/Jamfile
@@ -29,8 +29,6 @@
quirks.cpp
smp.cpp
serial.cpp
- smp_trampoline.S
- support.S
;
local platform ;
diff --git a/src/system/boot/platform/efi/arch/riscv64/Jamfile
b/src/system/boot/platform/efi/arch/riscv64/Jamfile
new file mode 100644
index 0000000..9e5a2ed
--- /dev/null
+++ b/src/system/boot/platform/efi/arch/riscv64/Jamfile
@@ -0,0 +1,19 @@
+SubDir HAIKU_TOP src system boot platform efi arch riscv64 ;
+
+SubDirHdrs $(HAIKU_TOP) src system boot platform efi ;
+
+UsePrivateHeaders [ FDirName kernel platform ] ;
+UsePrivateHeaders [ FDirName kernel boot platform efi ] ;
+
+local arch_src =
+ crt0-efi-$(TARGET_ARCH).S
+ #entry.S
+ relocation_func.cpp
+ #arch_smp.cpp
+ #arch_mmu.cpp
+ #arch_timer.cpp
+ ;
+
+BootMergeObject boot_platform_efi_riscv64.o :
+ $(arch_src)
+ ;
diff --git a/src/system/boot/platform/efi/arch/riscv64/arch_timer.cpp
b/src/system/boot/platform/efi/arch/riscv64/arch_timer.cpp
new file mode 100644
index 0000000..0f9f76a
--- /dev/null
+++ b/src/system/boot/platform/efi/arch/riscv64/arch_timer.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright, 2019, Haiku, Inc. All rights reserved.
+ * Distributed under the terms of the MIT License.
+ *
+ * Authors:
+ * Alexander von Gluck IV <kallisti5@xxxxxxxxxxx>
+*/
+
+
+#include "arch_timer.h"
+
+#include <KernelExport.h>
+
+#include <kernel.h>
+#include <safemode.h>
+#include <boot/stage2.h>
+#include <boot/menu.h>
+
+#include <string.h>
+
+//#define TRACE_TIMER
+#ifdef TRACE_TIMER
+# define TRACE(x) dprintf x
+#else
+# define TRACE(x) ;
+#endif
+
+
+void
+arch_timer_init(void)
+{
+ // Stub
+}
diff --git a/src/system/boot/platform/efi/arch/riscv64/arch_timer.h
b/src/system/boot/platform/efi/arch/riscv64/arch_timer.h
new file mode 100644
index 0000000..bfe3e9b
--- /dev/null
+++ b/src/system/boot/platform/efi/arch/riscv64/arch_timer.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2008, Dustin Howett, dustin.howett@xxxxxxxxx. All rights reserved.
+ * Distributed under the terms of the MIT License.
+ */
+#ifndef HPET_H
+#define HPET_H
+
+#include <SupportDefs.h>
+#include <arch/x86/arch_hpet.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void arch_timer_init(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* HPET_H */
diff --git a/src/system/boot/platform/efi/arch/riscv64/crt0-efi-riscv64.S
b/src/system/boot/platform/efi/arch/riscv64/crt0-efi-riscv64.S
new file mode 100644
index 0000000..29b8bc3
--- /dev/null
+++ b/src/system/boot/platform/efi/arch/riscv64/crt0-efi-riscv64.S
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * crt0-efi-riscv.S - PE/COFF header for RISC-V EFI applications
+ *
+ * Copright (C) 2014 Linaro Ltd. <ard.biesheuvel@xxxxxxxxxx>
+ * Copright (C) 2018 Alexander Graf <agraf@xxxxxxx>
+ *
+ * This file is inspired by arch/arm/lib/crt0_aarch64_efi.S
+ */
+
+
+#if __riscv_xlen == 64
+#define SIZE_LONG 8
+#define SAVE_LONG(reg, idx) sd reg, (idx*SIZE_LONG)(sp)
+#define LOAD_LONG(reg, idx) ld reg, (idx*SIZE_LONG)(sp)
+#define PE_MACHINE 0x5064
+#else
+#define SIZE_LONG 4
+#define SAVE_LONG(reg, idx) sw reg, (idx*SIZE_LONG)(sp)
+#define LOAD_LONG(reg, idx) lw reg, (idx*SIZE_LONG)(sp)
+#define PE_MACHINE 0x5032
+#endif
+
+
+ .section .text.head
+
+ /*
+ * Magic "MZ" signature for PE/COFF
+ */
+ .globl ImageBase
+ImageBase:
+ .ascii "MZ"
+ .skip 58 /* 'MZ' + pad + offset == 64 */
+ .long pe_header - ImageBase /* Offset to the PE header */
+pe_header:
+ .ascii "PE" /* 'PE' */
+coff_header:
+ .short PE_MACHINE /* RISC-V 64/32-bit */
+ .short 2 /* nr_sections */
+ .long 0 /* TimeDateStamp */
+ .long 0 /* PointerToSymbolTable */
+ .long 0 /* NumberOfSymbols */
+ .short section_table - optional_header /* SizeOfOptionalHeader */
+ /* Characteristics */
+ .short 0x20E
+optional_header:
+ .short 0x20b /* PE32+ format */
+ .byte 0x02 /* MajorLinkerVersion */
+ .byte 0x14 /* MinorLinkerVersion */
+ .long _edata - _start /* SizeOfCode */
+ .long 0 /* SizeOfInitializedData */
+ .long 0 /* SizeOfUninitializedData */
+ .long _start - ImageBase /* AddressOfEntryPoint */
+ .long _start - ImageBase /* BaseOfCode */
+
+extra_header_fields:
+ .quad 0 /* ImageBase */
+ .long 0x20 /* SectionAlignment */
+ .long 0x8 /* FileAlignment */
+ .short 0 /* MajorOperatingSystemVersion
*/
+ .short 0 /* MinorOperatingSystemVersion
*/
+ .short 0 /* MajorImageVersion */
+ .short 0 /* MinorImageVersion */
+ .short 0 /* MajorSubsystemVersion */
+ .short 0 /* MinorSubsystemVersion */
+ .long 0 /* Win32VersionValue */
+
+ .long _edata - ImageBase /* SizeOfImage */
+
+ /*
+ * Everything before the kernel image is considered part of the header
+ */
+ .long _start - ImageBase /* SizeOfHeaders */
+ .long 0 /* CheckSum */
+ .short 10 /* Subsystem (EFI) */
+ .short 0 /* DllCharacteristics */
+ .quad 0 /* SizeOfStackReserve */
+ .quad 0 /* SizeOfStackCommit */
+ .quad 0 /* SizeOfHeapReserve */
+ .quad 0 /* SizeOfHeapCommit */
+ .long 0 /* LoaderFlags */
+ .long 0x6 /* NumberOfRvaAndSizes */
+
+ .quad 0 /* ExportTable */
+ .quad 0 /* ImportTable */
+ .quad 0 /* ResourceTable */
+ .quad 0 /* ExceptionTable */
+ .quad 0 /* CertificationTable */
+ .quad 0 /* BaseRelocationTable */
+
+ /* Section table */
+section_table:
+
+ /*
+ * The EFI application loader requires a relocation section
+ * because EFI applications must be relocatable. This is a
+ * dummy section as far as we are concerned.
+ */
+ .ascii ".reloc"
+ .byte 0
+ .byte 0 /* end of 0 padding of section name */
+ .long 0
+ .long 0
+ .long 0 /* SizeOfRawData */
+ .long 0 /* PointerToRawData */
+ .long 0 /* PointerToRelocations */
+ .long 0 /* PointerToLineNumbers */
+ .short 0 /* NumberOfRelocations */
+ .short 0 /* NumberOfLineNumbers */
+ .long 0x42100040 /* Characteristics (section flags) */
+
+
+ .ascii ".text"
+ .byte 0
+ .byte 0
+ .byte 0 /* end of 0 padding of section name */
+ .long _edata - _start /* VirtualSize */
+ .long _start - ImageBase /* VirtualAddress */
+ .long _edata - _start /* SizeOfRawData */
+ .long _start - ImageBase /* PointerToRawData */
+
+ .long 0 /* PointerToRelocations (0 for executables) */
+ .long 0 /* PointerToLineNumbers (0 for executables) */
+ .short 0 /* NumberOfRelocations (0 for executables) */
+ .short 0 /* NumberOfLineNumbers (0 for executables) */
+ .long 0xe0500020 /* Characteristics (section flags) */
+
+_start:
+ addi sp, sp, -(SIZE_LONG * 3)
+ SAVE_LONG(a0, 0)
+ SAVE_LONG(a1, 1)
+ SAVE_LONG(ra, 2)
+
+ lla a0, ImageBase
+ lla a1, _DYNAMIC
+ call _relocate
+ bne a0, zero, 0f
+
+ LOAD_LONG(a1, 1)
+ LOAD_LONG(a0, 0)
+ call efi_main
+
+ LOAD_LONG(ra, 2)
+
+0: addi sp, sp, (SIZE_LONG * 3)
+ ret
diff --git a/src/system/boot/platform/efi/arch/riscv64/relocation_func.cpp
b/src/system/boot/platform/efi/arch/riscv64/relocation_func.cpp
new file mode 100644
index 0000000..ca56132
--- /dev/null
+++ b/src/system/boot/platform/efi/arch/riscv64/relocation_func.cpp
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* reloc_riscv.c - position independent ELF shared object relocator
+ Copyright (C) 2018 Alexander Graf <agraf@xxxxxxx>
+ Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@xxxxxxxxxx>
+ Copyright (C) 1999 Hewlett-Packard Co.
+ Contributed by David Mosberger <davidm@xxxxxxxxxx>.
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials
+ provided with the distribution.
+ * Neither the name of Hewlett-Packard Co. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+ CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ BE LIABLE FOR ANYDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+ OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ SUCH DAMAGE.
+*/
+
+#include <efi/types.h>
+#include <efi/system-table.h>
+
+#include <elf.h>
+
+#if __riscv_xlen == 64
+#define Elf_Dyn Elf64_Dyn
+#define Elf_Rela Elf64_Rela
+#define ELF_R_TYPE ELF64_R_TYPE
+#else
+#define Elf_Dyn Elf32_Dyn
+#define Elf_Rela Elf32_Rela
+#define ELF_R_TYPE ELF32_R_TYPE
+#endif
+
+
+efi_status _relocate(long ldbase, Elf_Dyn *dyn,
+ efi_handle image __attribute__((__unused__)),
+ efi_system_table *systab __attribute__((__unused__)))
+{
+ long relsz = 0, relent = 0;
+ Elf_Rela *rel = 0;
+ unsigned long *addr;
+ int i;
+
+ for (i = 0; dyn[i].d_tag != DT_NULL; ++i) {
+ switch (dyn[i].d_tag) {
+ case DT_RELA:
+ rel = (Elf_Rela *)((ulong)dyn[i].d_un.d_ptr + ldbase);
+ break;
+ case DT_RELASZ:
+ relsz = dyn[i].d_un.d_val;
+ break;
+ case DT_RELAENT:
+ relent = dyn[i].d_un.d_val;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!rel && relent == 0)
+ return EFI_SUCCESS;
+
+ if (!rel || relent == 0)
+ return EFI_LOAD_ERROR;
+
+ while (relsz > 0) {
+ /* apply the relocs */
+ switch (ELF_R_TYPE(rel->r_info)) {
+ case R_RISCV_RELATIVE:
+ addr = (ulong *)(ldbase + rel->r_offset);
+ *addr = ldbase + rel->r_addend;
+ break;
+ default:
+ /* Panic */
+ while (1) ;
+ }
+ rel = (Elf_Rela *)((char *)rel + relent);
+ relsz -= relent;
+ }
+ return EFI_SUCCESS;
+}
diff --git a/src/system/boot/platform/efi/arch/x86_64/Jamfile
b/src/system/boot/platform/efi/arch/x86_64/Jamfile
index 13c02e7..cb465a9 100644
--- a/src/system/boot/platform/efi/arch/x86_64/Jamfile
+++ b/src/system/boot/platform/efi/arch/x86_64/Jamfile
@@ -5,10 +5,20 @@
UsePrivateHeaders [ FDirName kernel platform ] ;
UsePrivateHeaders [ FDirName kernel boot platform efi ] ;
+{
+ local defines = _BOOT_MODE GNU_EFI_USE_MS_ABI _BOOT_PLATFORM_EFI ;
+ defines = [ FDefines $(defines) ] ;
+ SubDirCcFlags $(defines) ;
+ SubDirC++Flags $(defines) -fno-rtti ;
+}
+
local arch_src =
crt0-efi-$(TARGET_ARCH).S
entry.S
+ smp_trampoline.S
+ support.S
relocation_func.cpp
+ arch_start.cpp
arch_smp.cpp
arch_mmu.cpp
arch_timer.cpp
diff --git a/src/system/boot/platform/efi/arch/x86_64/arch_mmu.cpp
b/src/system/boot/platform/efi/arch/x86_64/arch_mmu.cpp
index c771434..4645b47 100644
--- a/src/system/boot/platform/efi/arch/x86_64/arch_mmu.cpp
+++ b/src/system/boot/platform/efi/arch/x86_64/arch_mmu.cpp
@@ -7,9 +7,24 @@
*/
+#include <algorithm>
+
+#include <kernel.h>
+#include <arch_kernel.h>
#include <boot/platform.h>
+#include <boot/stage2.h>
#include <arch/x86/descriptors.h>
+#include <efi/types.h>
+#include <efi/boot-services.h>
+
+#include "mmu.h"
+#include "efi_platform.h"
+
+
+#undef BOOT_GDT_SEGMENT_COUNT
+#define BOOT_GDT_SEGMENT_COUNT (USER_DATA_SEGMENT + 1)
+
extern uint64 gLongGDT;
extern uint64 gLongGDTR;
@@ -37,6 +52,176 @@
}
+// Called after EFI boot services exit.
+// Currently assumes that the memory map is sane... Sorted and no overlapping
+// regions.
+void
+arch_mmu_post_efi_setup(size_t memory_map_size, efi_memory_descriptor
*memory_map, size_t descriptor_size, uint32_t descriptor_version)
+{
+ // Add physical memory to the kernel args and update virtual addresses
for EFI regions..
+ addr_t addr = (addr_t)memory_map;
+ gKernelArgs.num_physical_memory_ranges = 0;
+ for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
+ efi_memory_descriptor *entry = (efi_memory_descriptor *)(addr +
i * descriptor_size);
+ switch (entry->Type) {
+ case EfiLoaderCode:
+ case EfiLoaderData:
+ case EfiBootServicesCode:
+ case EfiBootServicesData:
+ case EfiConventionalMemory: {
+ // Usable memory.
+ // Ignore memory below 1MB and above 512GB.
+ uint64_t base = entry->PhysicalStart;
+ uint64_t end = entry->PhysicalStart +
entry->NumberOfPages * 4096;
+ if (base < 0x100000)
+ base = 0x100000;
+ if (end > (512ull * 1024 * 1024 * 1024))
+ end = 512ull * 1024 * 1024 * 1024;
+ if (base >= end)
+ break;
+ uint64_t size = end - base;
+
+ insert_physical_memory_range(base, size);
+ // LoaderData memory is bootloader allocated memory,
possibly
+ // containing the kernel or loaded drivers.
+ if (entry->Type == EfiLoaderData)
+ insert_physical_allocated_range(base, size);
+ break;
+ }
+ case EfiACPIReclaimMemory:
+ // ACPI reclaim -- physical memory we could actually
use later
+ gKernelArgs.ignored_physical_memory +=
entry->NumberOfPages * 4096;
+ break;
+ case EfiRuntimeServicesCode:
+ case EfiRuntimeServicesData:
+ entry->VirtualStart = entry->PhysicalStart;
+ break;
+ }
+ }
+
+ // Sort the address ranges.
+ sort_address_ranges(gKernelArgs.physical_memory_range,
+ gKernelArgs.num_physical_memory_ranges);
+ sort_address_ranges(gKernelArgs.physical_allocated_range,
+ gKernelArgs.num_physical_allocated_ranges);
+ sort_address_ranges(gKernelArgs.virtual_allocated_range,
+ gKernelArgs.num_virtual_allocated_ranges);
+
+ // Switch EFI to virtual mode, using the kernel pmap.
+ // Something involving ConvertPointer might need to be done after this?
+ //
http://wiki.phoenix.com/wiki/index.php/EFI_RUNTIME_SERVICES#SetVirtualAddressMap.28.29
+ kRuntimeServices->SetVirtualAddressMap(memory_map_size,
descriptor_size, descriptor_version, memory_map);
+
+ // Important. Make sure supervisor threads can fault on read only
pages...
+ #if defined(__x86_64__) || defined(__x86__)
+ asm("mov %%rax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
+ #else
+ #error Ensure supervisor threads can fault on read-only pages on this
architecture!
+ #endif
+}
+
+
+
+uint64_t
+arch_mmu_generate_post_efi_page_tables(size_t memory_map_size,
+ efi_memory_descriptor *memory_map, size_t descriptor_size,
+ uint32_t descriptor_version)
+{
+ // Generate page tables, matching bios_ia32/long.cpp.
+ uint64_t *pml4;
+ uint64_t *pdpt;
+ uint64_t *pageDir;
+ uint64_t *pageTable;
+
+ // Allocate the top level PML4.
+ pml4 = NULL;
+ if (platform_allocate_region((void**)&pml4, B_PAGE_SIZE, 0, false) !=
B_OK)
+ panic("Failed to allocate PML4.");
+ gKernelArgs.arch_args.phys_pgdir = (uint32_t)(addr_t)pml4;
+ memset(pml4, 0, B_PAGE_SIZE);
+ platform_bootloader_address_to_kernel_address(pml4,
&gKernelArgs.arch_args.vir_pgdir);
+
+ // Store the virtual memory usage information.
+ gKernelArgs.virtual_allocated_range[0].start = KERNEL_LOAD_BASE_64_BIT;
+ gKernelArgs.virtual_allocated_range[0].size =
get_current_virtual_address() - KERNEL_LOAD_BASE_64_BIT;
+ gKernelArgs.num_virtual_allocated_ranges = 1;
+ gKernelArgs.arch_args.virtual_end = ROUNDUP(KERNEL_LOAD_BASE_64_BIT
+ + gKernelArgs.virtual_allocated_range[0].size, 0x200000);
+
+ // Find the highest physical memory address. We map all physical memory
+ // into the kernel address space, so we want to make sure we map
everything
+ // we have available.
+ uint64 maxAddress = 0;
+ for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
+ efi_memory_descriptor *entry = (efi_memory_descriptor
*)((addr_t)memory_map + i * descriptor_size);
+ maxAddress = std::max(maxAddress,
+ entry->PhysicalStart +
entry->NumberOfPages * 4096);
+ }
+
+ // Want to map at least 4GB, there may be stuff other than usable RAM
that
+ // could be in the first 4GB of physical address space.
+ maxAddress = std::max(maxAddress, (uint64)0x100000000ll);
+ maxAddress = ROUNDUP(maxAddress, 0x40000000);
+
+ // Currently only use 1 PDPT (512GB). This will need to change if
someone
+ // wants to use Haiku on a box with more than 512GB of RAM but that's
+ // probably not going to happen any time soon.
+ if (maxAddress / 0x40000000 > 512)
+ panic("Can't currently support more than 512GB of RAM!");
+
+ // Create page tables for the physical map area. Also map this PDPT
+ // temporarily at the bottom of the address space so that we are
identity
+ // mapped.
+
+ pdpt = (uint64*)mmu_allocate_page();
+ memset(pdpt, 0, B_PAGE_SIZE);
+ pml4[510] = (addr_t)pdpt | kTableMappingFlags;
+ pml4[0] = (addr_t)pdpt | kTableMappingFlags;
+
+ for (uint64 i = 0; i < maxAddress; i += 0x40000000) {
+ pageDir = (uint64*)mmu_allocate_page();
+ memset(pageDir, 0, B_PAGE_SIZE);
+ pdpt[i / 0x40000000] = (addr_t)pageDir | kTableMappingFlags;
+
+ for (uint64 j = 0; j < 0x40000000; j += 0x200000) {
+ pageDir[j / 0x200000] = (i + j) |
kLargePageMappingFlags;
+ }
+ }
+
+ // Allocate tables for the kernel mappings.
+
+ pdpt = (uint64*)mmu_allocate_page();
+ memset(pdpt, 0, B_PAGE_SIZE);
+ pml4[511] = (addr_t)pdpt | kTableMappingFlags;
+
+ pageDir = (uint64*)mmu_allocate_page();
+ memset(pageDir, 0, B_PAGE_SIZE);
+ pdpt[510] = (addr_t)pageDir | kTableMappingFlags;
+
+ // We can now allocate page tables and duplicate the mappings across
from
+ // the 32-bit address space to them.
+ pageTable = NULL; // shush, compiler.
+ for (uint32 i = 0; i < gKernelArgs.virtual_allocated_range[0].size
+ / B_PAGE_SIZE; i++) {
+ if ((i % 512) == 0) {
+ pageTable = (uint64*)mmu_allocate_page();
+ memset(pageTable, 0, B_PAGE_SIZE);
+ pageDir[i / 512] = (addr_t)pageTable |
kTableMappingFlags;
+ }
+
+ // Get the physical address to map.
+ void *phys;
+ if
(platform_kernel_address_to_bootloader_address(KERNEL_LOAD_BASE_64_BIT + (i *
B_PAGE_SIZE),
+ &phys) !=
B_OK)
+ continue;
+
+ pageTable[i % 512] = (addr_t)phys | kPageMappingFlags;
+ }
+
+ return (uint64)pml4;
+}
+
+
void
arch_mmu_init()
{
diff --git a/src/system/boot/platform/efi/arch/x86_64/arch_start.cpp
b/src/system/boot/platform/efi/arch/x86_64/arch_start.cpp
new file mode 100644
index 0000000..50ac440
--- /dev/null
+++ b/src/system/boot/platform/efi/arch/x86_64/arch_start.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2014-2020 Haiku, Inc. All rights reserved.
+ * Copyright 2013-2014, Fredrik Holmqvist, fredrik.holmqvist@xxxxxxxxx.
+ * Copyright 2014, Henry Harrington, henry.harrington@xxxxxxxxx.
+ * All rights reserved.
+ * Distributed under the terms of the MIT License.
+ *
+ * Authors:
+ * Alexander von Gluck IV <kallisti5@xxxxxxxxxxx>
+ */
+
+
+#include <boot/platform.h>
+#include <boot/stage2.h>
+#include <boot/stdio.h>
+
+#include "mmu.h"
+#include "serial.h"
+#include "smp.h"
+#include "efi_platform.h"
+
+
+// From entry.S
+extern "C" void arch_enter_kernel(uint64 pml4, uint64 entry_point,
+ uint64 stackTop);
+
+
+void
+arch_start_kernel(addr_t kernelEntry)
+{
+ // Prepare to exit EFI boot services.
+ // Read the memory map.
+ // First call is to determine the buffer size.
+ size_t memory_map_size = 0;
+ efi_memory_descriptor dummy;
+ efi_memory_descriptor *memory_map;
+ size_t map_key;
+ size_t descriptor_size;
+ uint32_t descriptor_version;
+ if (kBootServices->GetMemoryMap(&memory_map_size, &dummy, &map_key,
+ &descriptor_size, &descriptor_version) != EFI_BUFFER_TOO_SMALL)
{
+ panic("Unable to determine size of system memory map");
+ }
+
+ // Allocate a buffer twice as large as needed just in case it gets
bigger between
+ // calls to ExitBootServices.
+ size_t actual_memory_map_size = memory_map_size * 2;
+ memory_map
+ = (efi_memory_descriptor
*)kernel_args_malloc(actual_memory_map_size);
+
+ if (memory_map == NULL)
+ panic("Unable to allocate memory map.");
+
+ // Read (and print) the memory map.
+ memory_map_size = actual_memory_map_size;
+ if (kBootServices->GetMemoryMap(&memory_map_size, memory_map, &map_key,
+ &descriptor_size, &descriptor_version) != EFI_SUCCESS) {
+ panic("Unable to fetch system memory map.");
+ }
+
+ addr_t addr = (addr_t)memory_map;
+ dprintf("System provided memory map:\n");
+ for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
+ efi_memory_descriptor *entry
+ = (efi_memory_descriptor *)(addr + i * descriptor_size);
+ dprintf(" %#lx-%#lx %#lx %#x %#lx\n", entry->PhysicalStart,
+ entry->PhysicalStart + entry->NumberOfPages * 4096,
+ entry->VirtualStart, entry->Type, entry->Attribute);
+ }
+
+ // Generate page tables for use after ExitBootServices.
+ uint64_t final_pml4 =
arch_mmu_generate_post_efi_page_tables(memory_map_size,
+ memory_map, descriptor_size, descriptor_version);
+ dprintf("Final PML4 at %#lx\n", final_pml4);
+
+ // Attempt to fetch the memory map and exit boot services.
+ // This needs to be done in a loop, as ExitBootServices can change the
+ // memory map.
+ // Even better: Only GetMemoryMap and ExitBootServices can be called
after
+ // the first call to ExitBootServices, as the firmware is permitted to
+ // partially exit. This is why twice as much space was allocated for the
+ // memory map, as it's impossible to allocate more now.
+ // A changing memory map shouldn't affect the generated page tables, as
+ // they only needed to know about the maximum address, not any specific
+ // entry.
+ dprintf("Calling ExitBootServices. So long, EFI!\n");
+ while (true) {
+ if (kBootServices->ExitBootServices(kImage, map_key) ==
EFI_SUCCESS) {
+ // The console was provided by boot
services, disable it.
+ stdout = NULL;
+ stderr = NULL;
+ // Also switch to legacy serial output
(may not work on all systems)
+ serial_switch_to_legacy();
+ dprintf("Switched to legacy serial
output\n");
+ break;
+ }
+
+ memory_map_size = actual_memory_map_size;
+ if (kBootServices->GetMemoryMap(&memory_map_size,
memory_map, &map_key,
+ &descriptor_size, &descriptor_version)
!= EFI_SUCCESS) {
+ panic("Unable to fetch system memory
map.");
+ }
+ }
+
+ // Update EFI, generate final kernel physical memory map, etc.
+ arch_mmu_post_efi_setup(memory_map_size, memory_map,
+ descriptor_size, descriptor_version);
+
+ smp_boot_other_cpus(final_pml4, kernelEntry);
+
+ // Enter the kernel!
+ arch_enter_kernel(final_pml4, kernelEntry,
+ gKernelArgs.cpu_kstack[0].start +
gKernelArgs.cpu_kstack[0].size);
+}
diff --git a/src/system/boot/platform/efi/arch/x86_64/entry.S
b/src/system/boot/platform/efi/arch/x86_64/entry.S
index e518f9f..19b5935 100644
--- a/src/system/boot/platform/efi/arch/x86_64/entry.S
+++ b/src/system/boot/platform/efi/arch/x86_64/entry.S
@@ -16,8 +16,8 @@
.code64
-/*! void efi_enter_kernel(uint64 pml4, uint64 entry_point, uint64
stackTop); */
-FUNCTION(efi_enter_kernel):
+/*! void arch_enter_kernel(uint64 pml4, uint64 entry_point, uint64
stackTop); */
+FUNCTION(arch_enter_kernel):
// Point CR3 to the kernel's PML4.
movq %rdi, %cr3
diff --git a/src/system/boot/platform/efi/smp_trampoline.S
b/src/system/boot/platform/efi/arch/x86_64/smp_trampoline.S
similarity index 100%
rename from src/system/boot/platform/efi/smp_trampoline.S
rename to src/system/boot/platform/efi/arch/x86_64/smp_trampoline.S
diff --git a/src/system/boot/platform/efi/support.S
b/src/system/boot/platform/efi/arch/x86_64/support.S
similarity index 100%
rename from src/system/boot/platform/efi/support.S
rename to src/system/boot/platform/efi/arch/x86_64/support.S
diff --git a/src/system/boot/platform/efi/mmu.cpp
b/src/system/boot/platform/efi/mmu.cpp
index a1e913c..037a73d 100644
--- a/src/system/boot/platform/efi/mmu.cpp
+++ b/src/system/boot/platform/efi/mmu.cpp
@@ -27,11 +27,12 @@
};
-static uint64_t next_virtual_address = KERNEL_LOAD_BASE_64_BIT + 32 * 1024 *
1024;
+static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE_64_BIT + 32 * 1024 * 1024;
static allocated_memory_region *allocated_memory_regions = NULL;
-static uint64_t mmu_allocate_page()
+extern "C" uint64_t
+mmu_allocate_page()
{
efi_physical_addr addr;
efi_status s = kBootServices->AllocatePages(AllocateAnyPages,
EfiLoaderData, 1, &addr);
@@ -42,172 +43,19 @@
}
-uint64_t
-mmu_generate_post_efi_page_tables(size_t memory_map_size,
- efi_memory_descriptor *memory_map, size_t descriptor_size,
- uint32_t descriptor_version)
+extern "C" addr_t
+get_next_virtual_address(size_t size)
{
- // Generate page tables, matching bios_ia32/long.cpp.
- uint64_t *pml4;
- uint64_t *pdpt;
- uint64_t *pageDir;
- uint64_t *pageTable;
-
- // Allocate the top level PML4.
- pml4 = NULL;
- if (platform_allocate_region((void**)&pml4, B_PAGE_SIZE, 0, false) !=
B_OK)
- panic("Failed to allocate PML4.");
- gKernelArgs.arch_args.phys_pgdir = (uint32_t)(addr_t)pml4;
- memset(pml4, 0, B_PAGE_SIZE);
- platform_bootloader_address_to_kernel_address(pml4,
&gKernelArgs.arch_args.vir_pgdir);
-
- // Store the virtual memory usage information.
- gKernelArgs.virtual_allocated_range[0].start = KERNEL_LOAD_BASE_64_BIT;
- gKernelArgs.virtual_allocated_range[0].size = next_virtual_address -
KERNEL_LOAD_BASE_64_BIT;
- gKernelArgs.num_virtual_allocated_ranges = 1;
- gKernelArgs.arch_args.virtual_end = ROUNDUP(KERNEL_LOAD_BASE_64_BIT
- + gKernelArgs.virtual_allocated_range[0].size, 0x200000);
-
- // Find the highest physical memory address. We map all physical memory
- // into the kernel address space, so we want to make sure we map
everything
- // we have available.
- uint64 maxAddress = 0;
- for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
- efi_memory_descriptor *entry = (efi_memory_descriptor
*)((addr_t)memory_map + i * descriptor_size);
- maxAddress = std::max(maxAddress,
- entry->PhysicalStart +
entry->NumberOfPages * 4096);
- }
-
- // Want to map at least 4GB, there may be stuff other than usable RAM
that
- // could be in the first 4GB of physical address space.
- maxAddress = std::max(maxAddress, (uint64)0x100000000ll);
- maxAddress = ROUNDUP(maxAddress, 0x40000000);
-
- // Currently only use 1 PDPT (512GB). This will need to change if
someone
- // wants to use Haiku on a box with more than 512GB of RAM but that's
- // probably not going to happen any time soon.
- if (maxAddress / 0x40000000 > 512)
- panic("Can't currently support more than 512GB of RAM!");
-
- // Create page tables for the physical map area. Also map this PDPT
- // temporarily at the bottom of the address space so that we are
identity
- // mapped.
-
- pdpt = (uint64*)mmu_allocate_page();
- memset(pdpt, 0, B_PAGE_SIZE);
- pml4[510] = (addr_t)pdpt | kTableMappingFlags;
- pml4[0] = (addr_t)pdpt | kTableMappingFlags;
-
- for (uint64 i = 0; i < maxAddress; i += 0x40000000) {
- pageDir = (uint64*)mmu_allocate_page();
- memset(pageDir, 0, B_PAGE_SIZE);
- pdpt[i / 0x40000000] = (addr_t)pageDir | kTableMappingFlags;
-
- for (uint64 j = 0; j < 0x40000000; j += 0x200000) {
- pageDir[j / 0x200000] = (i + j) |
kLargePageMappingFlags;
- }
- }
-
- // Allocate tables for the kernel mappings.
-
- pdpt = (uint64*)mmu_allocate_page();
- memset(pdpt, 0, B_PAGE_SIZE);
- pml4[511] = (addr_t)pdpt | kTableMappingFlags;
-
- pageDir = (uint64*)mmu_allocate_page();
- memset(pageDir, 0, B_PAGE_SIZE);
- pdpt[510] = (addr_t)pageDir | kTableMappingFlags;
-
- // We can now allocate page tables and duplicate the mappings across
from
- // the 32-bit address space to them.
- pageTable = NULL; // shush, compiler.
- for (uint32 i = 0; i < gKernelArgs.virtual_allocated_range[0].size
- / B_PAGE_SIZE; i++) {
- if ((i % 512) == 0) {
- pageTable = (uint64*)mmu_allocate_page();
- memset(pageTable, 0, B_PAGE_SIZE);
- pageDir[i / 512] = (addr_t)pageTable |
kTableMappingFlags;
- }
-
- // Get the physical address to map.
- void *phys;
- if
(platform_kernel_address_to_bootloader_address(KERNEL_LOAD_BASE_64_BIT + (i *
B_PAGE_SIZE),
- &phys) !=
B_OK)
- continue;
-
- pageTable[i % 512] = (addr_t)phys | kPageMappingFlags;
- }
-
- return (uint64)pml4;
+ addr_t address = sNextVirtualAddress;
+ sNextVirtualAddress += ROUNDUP(size, B_PAGE_SIZE);
+ return address;
}
-// Called after EFI boot services exit.
-// Currently assumes that the memory map is sane... Sorted and no overlapping
-// regions.
-void
-mmu_post_efi_setup(size_t memory_map_size, efi_memory_descriptor *memory_map,
size_t descriptor_size, uint32_t descriptor_version)
+extern "C" addr_t
+get_current_virtual_address()
{
- // Add physical memory to the kernel args and update virtual addresses
for EFI regions..
- addr_t addr = (addr_t)memory_map;
- gKernelArgs.num_physical_memory_ranges = 0;
- for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
- efi_memory_descriptor *entry = (efi_memory_descriptor *)(addr +
i * descriptor_size);
- switch (entry->Type) {
- case EfiLoaderCode:
- case EfiLoaderData:
- case EfiBootServicesCode:
- case EfiBootServicesData:
- case EfiConventionalMemory: {
- // Usable memory.
- // Ignore memory below 1MB and above 512GB.
- uint64_t base = entry->PhysicalStart;
- uint64_t end = entry->PhysicalStart +
entry->NumberOfPages * 4096;
- if (base < 0x100000)
- base = 0x100000;
- if (end > (512ull * 1024 * 1024 * 1024))
- end = 512ull * 1024 * 1024 * 1024;
- if (base >= end)
- break;
- uint64_t size = end - base;
-
- insert_physical_memory_range(base, size);
- // LoaderData memory is bootloader allocated memory,
possibly
- // containing the kernel or loaded drivers.
- if (entry->Type == EfiLoaderData)
- insert_physical_allocated_range(base, size);
- break;
- }
- case EfiACPIReclaimMemory:
- // ACPI reclaim -- physical memory we could actually
use later
- gKernelArgs.ignored_physical_memory +=
entry->NumberOfPages * 4096;
- break;
- case EfiRuntimeServicesCode:
- case EfiRuntimeServicesData:
- entry->VirtualStart = entry->PhysicalStart;
- break;
- }
- }
-
- // Sort the address ranges.
- sort_address_ranges(gKernelArgs.physical_memory_range,
- gKernelArgs.num_physical_memory_ranges);
- sort_address_ranges(gKernelArgs.physical_allocated_range,
- gKernelArgs.num_physical_allocated_ranges);
- sort_address_ranges(gKernelArgs.virtual_allocated_range,
- gKernelArgs.num_virtual_allocated_ranges);
-
- // Switch EFI to virtual mode, using the kernel pmap.
- // Something involving ConvertPointer might need to be done after this?
- //
http://wiki.phoenix.com/wiki/index.php/EFI_RUNTIME_SERVICES#SetVirtualAddressMap.28.29
- kRuntimeServices->SetVirtualAddressMap(memory_map_size,
descriptor_size, descriptor_version, memory_map);
-
- // Important. Make sure supervisor threads can fault on read only
pages...
- #if defined(__x86_64__) || defined(__x86__)
- asm("mov %%rax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
- #else
- #error Ensure supervisor threads can fault on read-only pages on this
architecture!
- #endif
+ return sNextVirtualAddress;
}
@@ -364,8 +212,7 @@
if (region->paddr <= addr && addr < region->paddr +
region->size) {
// Lazily allocate virtual memory.
if (region->vaddr == 0) {
- region->vaddr = next_virtual_address;
- next_virtual_address += ROUNDUP(region->size,
B_PAGE_SIZE);
+ region->vaddr =
get_next_virtual_address(region->size);
}
*_result = region->vaddr + (addr - region->paddr);
//dprintf("Converted bootloader address %p in region
%#lx-%#lx to %#lx\n",
diff --git a/src/system/boot/platform/efi/mmu.h
b/src/system/boot/platform/efi/mmu.h
index a553582..a222955 100644
--- a/src/system/boot/platform/efi/mmu.h
+++ b/src/system/boot/platform/efi/mmu.h
@@ -1,57 +1,64 @@
/*
* Copyright 2014, Henry Harrington, henry.harrington@xxxxxxxxx.
+ * Copyright 2019-2020, Haiku, Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*/
-
#ifndef MMU_H
#define MMU_H
-#include <arch/x86/descriptors.h>
-
-#undef BOOT_GDT_SEGMENT_COUNT
-#define BOOT_GDT_SEGMENT_COUNT (USER_DATA_SEGMENT + 1)
#ifndef _ASSEMBLER
+
#include "efi_platform.h"
#include <util/FixedWidthPointer.h>
-extern segment_descriptor gBootGDT[BOOT_GDT_SEGMENT_COUNT];
-
-static const uint32 kDefaultPageFlags = 0x3;
- // present, R/W
-static const uint64 kTableMappingFlags = 0x7;
- // present, R/W, user
-static const uint64 kLargePageMappingFlags = 0x183;
- // present, R/W, user, global, large
-static const uint64 kPageMappingFlags = 0x103;
- // present, R/W, user, global
-
-
#ifdef __cplusplus
extern "C" {
#endif
-extern addr_t mmu_map_physical_memory(addr_t physicalAddress,
- size_t size, uint32 flags);
+static const uint32 kDefaultPageFlags = 0x3;
+ // present, R/W
+static const uint64 kTableMappingFlags = 0x7;
+ // present, R/W, user
+static const uint64 kLargePageMappingFlags = 0x183;
+ // present, R/W, user, global, large
+static const uint64 kPageMappingFlags = 0x103;
+ // present, R/W, user, global
+
+
+extern addr_t get_next_virtual_address(size_t size);
+extern addr_t get_current_virtual_address();
+
+extern void mmu_init();
+
+extern uint64_t mmu_allocate_page();
+
+extern addr_t mmu_map_physical_memory(addr_t physicalAddress, size_t size,
+ uint32 flags);
+
extern void mmu_free(void *virtualAddress, size_t size);
-extern void mmu_post_efi_setup(size_t memory_map_size,
- efi_memory_descriptor *memory_map, size_t descriptor_size,
- uint32_t descriptor_version);
-
-extern uint64_t mmu_generate_post_efi_page_tables(size_t memory_map_size,
- efi_memory_descriptor *memory_map, size_t descriptor_size,
- uint32_t descriptor_version);
-
extern status_t platform_kernel_address_to_bootloader_address(uint64_t address,
void **_result);
extern status_t platform_bootloader_address_to_kernel_address(void *address,
uint64_t *_result);
+// Architecture dependant
+
+extern void arch_mmu_post_efi_setup(size_t memory_map_size,
+ efi_memory_descriptor *memory_map, size_t descriptor_size,
+ uint32_t descriptor_version);
+
+extern uint64_t arch_mmu_generate_post_efi_page_tables(size_t memory_map_size,
+ efi_memory_descriptor *memory_map, size_t descriptor_size,
+ uint32_t descriptor_version);
+
+void arch_mmu_init();
+
#ifdef __cplusplus
}
#endif
diff --git a/src/system/boot/platform/efi/serial.cpp
b/src/system/boot/platform/efi/serial.cpp
index b8ad4a7..54f40ac 100644
--- a/src/system/boot/platform/efi/serial.cpp
+++ b/src/system/boot/platform/efi/serial.cpp
@@ -50,12 +50,15 @@
if (sSerialUsesEFI) {
size_t bufSize = 1;
sSerial->Write(sSerial, &bufSize, &ch);
- } else {
- while ((in8(sSerialBasePort + SERIAL_LINE_STATUS) & 0x20) == 0)
- asm volatile ("pause;");
-
- out8(ch, sSerialBasePort + SERIAL_TRANSMIT_BUFFER);
+ return;
}
+
+ #if defined(__x86__) || defined(__x86_64__)
+ while ((in8(sSerialBasePort + SERIAL_LINE_STATUS) & 0x20) == 0)
+ asm volatile ("pause;");
+
+ out8(ch, sSerialBasePort + SERIAL_TRANSMIT_BUFFER);
+ #endif
}
@@ -115,6 +118,7 @@
}
+#if defined(__x86__) || defined(__x86_64__)
extern "C" void
serial_switch_to_legacy(void)
{
@@ -135,3 +139,4 @@
out8(3, sSerialBasePort + SERIAL_LINE_CONTROL);
// 8N1
}
+#endif
diff --git a/src/system/boot/platform/efi/start.cpp
b/src/system/boot/platform/efi/start.cpp
index 9c3bf43..586fee8 100644
--- a/src/system/boot/platform/efi/start.cpp
+++ b/src/system/boot/platform/efi/start.cpp
@@ -20,7 +20,7 @@
#include <boot/stage2.h>
#include <boot/stdio.h>
-#include "arch_mmu.h"
+#include "arch_start.h"
#include "acpi.h"
#include "console.h"
#include "efi_platform.h"
@@ -42,7 +42,6 @@
static uint32 sBootOptions;
-static uint64 gLongKernelEntry;
extern "C" int main(stage2_args *args);
@@ -136,8 +135,8 @@
convert_kernel_args();
// Save the kernel entry point address.
- gLongKernelEntry = image->elf_header.e_entry;
- dprintf("kernel entry at %#lx\n", gLongKernelEntry);
+ addr_t kernelEntry = image->elf_header.e_entry;
+ dprintf("kernel entry at %#lx\n", kernelEntry);
// map in a kernel stack
void *stack_address = NULL;
@@ -154,91 +153,10 @@
// Apply any weird EFI quirks
quirks_init();
- // Prepare to exit EFI boot services.
- // Read the memory map.
- // First call is to determine the buffer size.
- size_t memory_map_size = 0;
- efi_memory_descriptor dummy;
- efi_memory_descriptor *memory_map;
- size_t map_key;
- size_t descriptor_size;
- uint32_t descriptor_version;
- if (kBootServices->GetMemoryMap(&memory_map_size, &dummy, &map_key,
- &descriptor_size, &descriptor_version) != EFI_BUFFER_TOO_SMALL)
{
- panic("Unable to determine size of system memory map");
- }
+ // Begin architecture-centric kernel entry.
+ arch_start_kernel(kernelEntry);
- // Allocate a buffer twice as large as needed just in case it gets
bigger between
- // calls to ExitBootServices.
- size_t actual_memory_map_size = memory_map_size * 2;
- memory_map
- = (efi_memory_descriptor
*)kernel_args_malloc(actual_memory_map_size);
-
- if (memory_map == NULL)
- panic("Unable to allocate memory map.");
-
- // Read (and print) the memory map.
- memory_map_size = actual_memory_map_size;
- if (kBootServices->GetMemoryMap(&memory_map_size, memory_map, &map_key,
- &descriptor_size, &descriptor_version) != EFI_SUCCESS) {
- panic("Unable to fetch system memory map.");
- }
-
- addr_t addr = (addr_t)memory_map;
- dprintf("System provided memory map:\n");
- for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
- efi_memory_descriptor *entry
- = (efi_memory_descriptor *)(addr + i * descriptor_size);
- dprintf(" %#lx-%#lx %#lx %#x %#lx\n", entry->PhysicalStart,
- entry->PhysicalStart + entry->NumberOfPages * 4096,
- entry->VirtualStart, entry->Type, entry->Attribute);
- }
-
- // Generate page tables for use after ExitBootServices.
- uint64_t final_pml4 = mmu_generate_post_efi_page_tables(memory_map_size,
- memory_map, descriptor_size, descriptor_version);
- dprintf("Final PML4 at %#lx\n", final_pml4);
-
- // Attempt to fetch the memory map and exit boot services.
- // This needs to be done in a loop, as ExitBootServices can change the
- // memory map.
- // Even better: Only GetMemoryMap and ExitBootServices can be called
after
- // the first call to ExitBootServices, as the firmware is permitted to
- // partially exit. This is why twice as much space was allocated for the
- // memory map, as it's impossible to allocate more now.
- // A changing memory map shouldn't affect the generated page tables, as
- // they only needed to know about the maximum address, not any specific
- // entry.
- dprintf("Calling ExitBootServices. So long, EFI!\n");
- while (true) {
- if (kBootServices->ExitBootServices(kImage, map_key) ==
EFI_SUCCESS) {
- // The console was provided by boot services, disable
it.
- stdout = NULL;
- stderr = NULL;
- // Also switch to legacy serial output (may not work on
all systems)
- serial_switch_to_legacy();
- dprintf("Switched to legacy serial output\n");
- break;
- }
-
- memory_map_size = actual_memory_map_size;
- if (kBootServices->GetMemoryMap(&memory_map_size, memory_map,
&map_key,
- &descriptor_size, &descriptor_version) != EFI_SUCCESS) {
- panic("Unable to fetch system memory map.");
- }
- }
-
- // Update EFI, generate final kernel physical memory map, etc.
- mmu_post_efi_setup(memory_map_size, memory_map,
- descriptor_size, descriptor_version);
-
- smp_boot_other_cpus(final_pml4, gLongKernelEntry);
-
- // Enter the kernel!
- efi_enter_kernel(final_pml4, gLongKernelEntry,
- gKernelArgs.cpu_kstack[0].start +
gKernelArgs.cpu_kstack[0].size);
-
- panic("Shouldn't get here");
+ panic("Shouldn't get here!");
}
diff --git a/src/system/ldscripts/riscv64/boot_loader_efi.ld
b/src/system/ldscripts/riscv64/boot_loader_efi.ld
new file mode 100644
index 0000000..aece030
--- /dev/null
+++ b/src/system/ldscripts/riscv64/boot_loader_efi.ld
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * U-Boot riscv64 EFI linker script
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Modified from arch/arm/lib/elf_aarch64_efi.lds
+ */
+
+OUTPUT_FORMAT("elf64-littleriscv", "elf64-littleriscv", "elf64-littleriscv")
+OUTPUT_ARCH(riscv)
+ENTRY(_start)
+SECTIONS
+{
+ .text 0x0 : {
+ _text = .;
+ *(.text.head)
+ *(.text)
+ *(.text.*)
+ *(.gnu.linkonce.t.*)
+ *(.srodata)
+ *(.rodata*)
+ . = ALIGN(16);
+ }
+ _etext = .;
+ _text_size = . - _text;
+ .dynamic : { *(.dynamic) }
+ .data : {
+ _data = .;
+ *(.sdata)
+ *(.data)
+ *(.data1)
+ *(.data.*)
+ *(.got.plt)
+ *(.got)
+
+ /*
+ * The EFI loader doesn't seem to like a .bss section, so we
+ * stick it all into .data:
+ */
+ . = ALIGN(16);
+ _bss = .;
+ *(.sbss)
+ *(.scommon)
+ *(.dynbss)
+ *(.bss)
+ *(.bss.*)
+ *(COMMON)
+ . = ALIGN(16);
+ _bss_end = .;
+ _edata = .;
+ }
+ .rela.dyn : { *(.rela.dyn) }
+ .rela.plt : { *(.rela.plt) }
+ .rela.got : { *(.rela.got) }
+ .rela.data : { *(.rela.data) *(.rela.data*) }
+ _data_size = . - _etext;
+
+ . = ALIGN(4096);
+ .dynsym : { *(.dynsym) }
+ . = ALIGN(4096);
+ .dynstr : { *(.dynstr) }
+ . = ALIGN(4096);
+ .note.gnu.build-id : { *(.note.gnu.build-id) }
+ /DISCARD/ : {
+ *(.rel.reloc)
+ *(.eh_frame)
+ *(.note.GNU-stack)
+ }
+ .comment 0 : { *(.comment) }
+}
--
To view, visit https://review.haiku-os.org/c/haiku/+/2256
To unsubscribe, or for help writing mail filters, visit
https://review.haiku-os.org/settings
Gerrit-Project: haiku
Gerrit-Branch: master
Gerrit-Change-Id: Ib03de81e2b562e693987b86d7b4318209fb1c792
Gerrit-Change-Number: 2256
Gerrit-PatchSet: 1
Gerrit-Owner: Alex von Gluck IV <kallisti5@xxxxxxxxxxx>
Gerrit-MessageType: newchange