[haiku-commits] BRANCH mmu_man-github.sam460ex - src/apps/resourceedit src/servers/keystore data/artwork/icons src/system/boot/platform/u-boot/arch/ppc src/kits/app

  • From: mmu_man-github.sam460ex <community@xxxxxxxxxxxx>
  • To: haiku-commits@xxxxxxxxxxxxx
  • Date: Sat, 9 Mar 2013 04:00:35 +0100 (CET)

added 2 changesets to branch 'refs/remotes/mmu_man-github/sam460ex'
old head: 0acc7a44b63a0a1351ac2ed837bfc14a98705422
new head: f106fe34d2a165be37dad87c27e0ccf3193da2bf
overview: https://github.com/mmuman/haiku/compare/0acc7a4...f106fe3

----------------------------------------------------------------------------

af63ede: U-Boot: PPC: Implement Book-E MMU support in bootloader
  
  Unlike Classic PPC, Book-E CPUs do not have hardware page-table walk,
  rather only a limited set of software-managed TLBs. Also, translation
  is never disabled even when page-faulting.
  
  The Linux Book-E port pins part of the RAM in one or more TLBs, and
  allocates kernel memory from it for the kernel itself and exception
  vectors, as well as page tables.
  cf. http://kernel.org/doc/ols/2003/ols2003-pages-340-350.pdf
  
  We take a similar approach, but instead of using a tree-like page
  directory we reserve a large part of this mapped range for an
  hashed page table similar to the Classic one, to later allow
  factoring code out. The kernel and boot modules will also be
  allocated there, and the rest should be used as SLAB areas.
  
  Note doing so will not allow implementing proper permissions for
  the areas allocated there since they are all handled by a single TLB.
  
  Also note Book-E does not standardize the MMU implementation itself.
  For now only AMCC440 type MMU is supported. This will need to be
  cleaned up later on.

f106fe3: Merge branch 'master' into sam460ex

                                          [ François Revol <revol@xxxxxxx> ]

----------------------------------------------------------------------------

150 files changed, 8766 insertions(+), 1826 deletions(-)
build/jam/BuildSetup                             |    6 +-
build/jam/HaikuImage                             |    4 +-
data/artwork/icons/App_ResourceEdit              |  Bin 0 -> 23980 bytes
.../add-ons/disk_systems/intel/fi.catkeys        |    4 +-
.../add-ons/disk_systems/ntfs/fi.catkeys         |    2 +
.../media/media-add-ons/multi_audio/fi.catkeys   |   34 +
data/catalogs/apps/drivesetup/de.catkeys         |    3 +-
data/catalogs/apps/drivesetup/fi.catkeys         |   21 +-
data/catalogs/apps/drivesetup/fr.catkeys         |    5 +-
data/catalogs/apps/firstbootprompt/fi.catkeys    |    3 +-
data/catalogs/apps/fontdemo/fi.catkeys           |    3 +-
data/catalogs/apps/launchbox/de.catkeys          |    3 +-
data/catalogs/apps/launchbox/hu.catkeys          |    3 +-
data/catalogs/apps/launchbox/ja.catkeys          |    3 +-
data/catalogs/apps/launchbox/sv.catkeys          |    3 +-
data/catalogs/apps/terminal/fi.catkeys           |    4 +-
data/catalogs/apps/terminal/pl.catkeys           |    3 +-
data/catalogs/apps/terminal/sv.catkeys           |    3 +-
data/catalogs/apps/webpositive/de.catkeys        |    7 +-
data/catalogs/apps/webpositive/hu.catkeys        |    7 +-
data/catalogs/apps/webpositive/ja.catkeys        |   37 +-
data/catalogs/apps/webpositive/sv.catkeys        |    7 +-
data/catalogs/kits/tracker/fi.catkeys            |    3 +-
data/catalogs/preferences/network/de.catkeys     |    7 +-
data/catalogs/preferences/network/hu.catkeys     |    7 +-
data/catalogs/preferences/network/ja.catkeys     |    7 +-
data/catalogs/preferences/network/sv.catkeys     |    7 +-
.../preferences/notifications/fi.catkeys         |    3 +-
data/catalogs/preferences/shortcuts/fi.catkeys   |    3 +-
data/catalogs/preferences/time/fi.catkeys        |    6 +-
docs/user/storage/NodeMonitor.dox                |   46 +-
docs/user/storage/VolumeRoster.dox               |  131 ++
headers/os/app/Key.h                             |  115 ++
headers/os/app/KeyStore.h                        |  103 ++
headers/os/interface/Menu.h                      |    1 +
headers/os/storage/VolumeRoster.h                |    8 +
headers/private/app/KeyStoreDefs.h               |   48 +
headers/private/interface/ColumnListView.h       |    4 +-
src/add-ons/accelerants/radeon_hd/display.cpp    |   82 +-
src/add-ons/accelerants/radeon_hd/encoder.cpp    |    4 -
src/add-ons/decorators/BeDecorator/Jamfile       |    5 +
src/add-ons/decorators/MacDecorator/Jamfile      |    4 +
src/add-ons/decorators/WinDecorator/Jamfile      |    5 +
.../network/wlan/iprowifi4965/dev/iwn/if_iwn.c   |    1 +
src/apps/Jamfile                                 |    2 +-
src/apps/activitymonitor/ActivityWindow.cpp      |   26 +
src/apps/activitymonitor/ActivityWindow.h        |    7 +-
src/apps/deskbar/ExpandoMenuBar.cpp              |  134 +-
src/apps/deskbar/ExpandoMenuBar.h                |    2 +-
src/apps/deskbar/StatusView.cpp                  |    7 +-
src/apps/deskbar/TimeView.cpp                    |    4 +-
src/apps/deskbar/TimeView.h                      |    8 +-
src/apps/resourceedit/Constants.h                |   53 +
src/apps/resourceedit/DefaultTypes.cpp           |   47 +
src/apps/resourceedit/DefaultTypes.h             |   69 +
src/apps/resourceedit/EditWindow.cpp             |  163 +++
src/apps/resourceedit/EditWindow.h               |   53 +
src/apps/resourceedit/Jamfile                    |   54 +
src/apps/resourceedit/MainWindow.cpp             | 1212 ++++++++++++++++++
src/apps/resourceedit/MainWindow.h               |  186 +++
src/apps/resourceedit/ResourceEdit.cpp           |  152 +++
src/apps/resourceedit/ResourceEdit.h             |   47 +
src/apps/resourceedit/ResourceEdit.rdef          |  136 ++
src/apps/resourceedit/ResourceListView.cpp       |   60 +
src/apps/resourceedit/ResourceListView.h         |   27 +
src/apps/resourceedit/ResourceRow.cpp            |  153 +++
src/apps/resourceedit/ResourceRow.h              |   46 +
src/apps/resourceedit/SettingsFile.cpp           |   56 +
src/apps/resourceedit/SettingsFile.h             |   33 +
src/apps/resourceedit/SettingsWindow.cpp         |  120 ++
src/apps/resourceedit/SettingsWindow.h           |   48 +
src/apps/resourceedit/edits/AppFlagsEdit.cpp     |    4 +
src/apps/resourceedit/edits/AppFlagsEdit.h       |    4 +
src/apps/resourceedit/edits/BooleanEdit.cpp      |   71 +
src/apps/resourceedit/edits/BooleanEdit.h        |   30 +
src/apps/resourceedit/edits/EditView.cpp         |   52 +
src/apps/resourceedit/edits/EditView.h           |   31 +
src/apps/resourceedit/edits/NormalEdit.cpp       |   64 +
src/apps/resourceedit/edits/NormalEdit.h         |   31 +
src/apps/resourceedit/interface/ImageButton.cpp  |   67 +
src/apps/resourceedit/interface/ImageButton.h    |   32 +
src/apps/resourceedit/main.cpp                   |   17 +
.../settings/GenericSettingsView.cpp             |   60 +
.../resourceedit/settings/GenericSettingsView.h  |   37 +
src/apps/resourceedit/support/UndoContext.cpp    |  175 +++
src/apps/resourceedit/support/UndoContext.h      |   55 +
src/apps/showimage/Jamfile                       |    1 -
src/apps/showimage/ShowImageUndo.cpp             |   93 --
src/apps/showimage/ShowImageUndo.h               |   63 -
src/apps/showimage/ShowImageView.cpp             |   52 +-
src/apps/showimage/ShowImageView.h               |    3 -
src/apps/showimage/ShowImageWindow.cpp           |   14 -
src/bin/Jamfile                                  |    1 +
src/bin/keystore/Jamfile                         |    6 +
src/bin/keystore/keystore.cpp                    |  459 +++++++
src/bin/keystore/keystore.rdef                   |    2 +
src/kits/app/Jamfile                             |    4 +
src/kits/app/Key.cpp                             |  342 +++++
src/kits/app/KeyStore.cpp                        |  434 +++++++
[ *** stats truncated: 51 lines dropped *** ]

############################################################################

Commit:      af63ede7a1fd24b4713800cdc7aabb7ae3e09a55
Author:      François Revol <revol@xxxxxxx>
Date:        Sat Mar  9 02:33:12 2013 UTC

U-Boot: PPC: Implement Book-E MMU support in bootloader

Unlike Classic PPC, Book-E CPUs do not have hardware page-table walk,
rather only a limited set of software-managed TLBs. Also, translation
is never disabled even when page-faulting.

The Linux Book-E port pins part of the RAM in one or more TLBs, and
allocates kernel memory from it for the kernel itself and exception
vectors, as well as page tables.
cf. http://kernel.org/doc/ols/2003/ols2003-pages-340-350.pdf

We take a similar approach, but instead of using a tree-like page
directory we reserve a large part of this mapped range for an
hashed page table similar to the Classic one, to later allow
factoring code out. The kernel and boot modules will also be
allocated there, and the rest should be used as SLAB areas.

Note doing so will not allow implementing proper permissions for
the areas allocated there since they are all handled by a single TLB.

Also note Book-E does not standardize the MMU implementation itself.
For now only AMCC440 type MMU is supported. This will need to be
cleaned up later on.

----------------------------------------------------------------------------

diff --git a/src/system/boot/platform/u-boot/arch/ppc/Jamfile 
b/src/system/boot/platform/u-boot/arch/ppc/Jamfile
index ba93a44..639e56f 100644
--- a/src/system/boot/platform/u-boot/arch/ppc/Jamfile
+++ b/src/system/boot/platform/u-boot/arch/ppc/Jamfile
@@ -10,6 +10,11 @@ UseLibraryHeaders [ FDirName libfdt ] ;
 
 SubDirC++Flags -fno-rtti ;
 
+BootMergeObject boot_platform_u-boot_ppc_amcc440.o :
+       arch_mmu_amcc440.cpp
+       : -fno-pic -mcpu=440
+;
+
 BootMergeObject boot_platform_u-boot_ppc.o :
        # must come first to have _start_* at correct locations
        shell.S
@@ -20,6 +25,7 @@ BootMergeObject boot_platform_u-boot_ppc.o :
        arch_cpu.cpp
        #mmu.cpp
        : -fno-pic
+       : boot_platform_u-boot_ppc_amcc440.o
 ;
 
 SEARCH on [ FGristFiles arch_cpu_asm.S ]
diff --git a/src/system/boot/platform/u-boot/arch/ppc/arch_cpu.cpp 
b/src/system/boot/platform/u-boot/arch/ppc/arch_cpu.cpp
index 04ecebb..b292d6b 100644
--- a/src/system/boot/platform/u-boot/arch/ppc/arch_cpu.cpp
+++ b/src/system/boot/platform/u-boot/arch/ppc/arch_cpu.cpp
@@ -42,6 +42,10 @@ extern void *gFDT;
 #endif
 
 
+// FIXME: this is ugly; introduce a cpu type in kernel args
+bool gIs440 = false;
+
+
 static status_t
 enumerate_cpus(void)
 {
@@ -204,6 +208,8 @@ check_cpu_features()
        if (is_460)
                is_440 = true;
 
+       gIs440 = is_440;
+
        // some cpu-dependent tweaking
 
        if (is_440) {
diff --git a/src/system/boot/platform/u-boot/arch/ppc/arch_mmu.cpp 
b/src/system/boot/platform/u-boot/arch/ppc/arch_mmu.cpp
index 640cf89..b58cb7f 100644
--- a/src/system/boot/platform/u-boot/arch/ppc/arch_mmu.cpp
+++ b/src/system/boot/platform/u-boot/arch/ppc/arch_mmu.cpp
@@ -26,131 +26,90 @@
 
 #include <string.h>
 
-int32 of_address_cells(int package);
-int32 of_size_cells(int package);
-
-//#define TRACE_MMU
-#ifdef TRACE_MMU
-#      define TRACE(x) dprintf x
-#else
-#      define TRACE(x) ;
-#endif
-
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-#define TRACE_MEMORY_MAP
-       // Define this to print the memory map to serial debug,
-       // You also need to define ENABLE_SERIAL in serial.cpp
-       // for output to work.
-
-#ifdef __ARM__
-
-
-/*
-TODO:
-       -recycle bit!
+/*! This implements boot loader mmu support for Book-E PowerPC,
+       which only support a limited number of TLB and no hardware page table 
walk,
+       and does not standardize at how to use the mmu, requiring 
vendor-specific
+       code.
+
+       Like Linux, we pin one of the TLB entries to a fixed translation,
+       however we use it differently.
+       cf. http://kernel.org/doc/ols/2003/ols2003-pages-340-350.pdf
+
+       This translation uses a single large page (16 or 256MB are possible) 
which
+       directly maps the begining of the RAM.
+       We use it as a linear space to allocate from at boot time,
+       loading the kernel and modules into it, and other required data.
+       Near the end we reserve a page table (it doesn't need to be aligned),
+       but unlike Linux we use the same globally hashed page table that is
+       implemented by Classic PPC, to allow reusing code if possible, and also
+       to limit fragmentation which would occur by using a tree-based page 
table.
+       However this means we might actually run out of page table entries in 
case
+       of too many collisions.
+
+       The kernel will then create areas to cover this already-mapped space.
+       This also means proper permission bits (RWX) will not be applicable to
+       separate areas which are enclosed by this mapping.
+
+       We put the kernel stack at the end of the mapping so that the guard 
page is
+       outsite and thus unmapped. (we don't support SMP)
 */
 
 /*!    The (physical) memory layout of the boot loader is currently as follows:
-        0x00000000                     u-boot (run from NOR flash)
-        0xa0000000                     u-boot stuff like kernel arguments afaik
-        0xa0100000 - 0xa0ffffff        boot.tgz (up to 15MB probably never 
needed so big...)
-        0xa1000000 - 0xa1ffffff        pagetables
-        0xa2000000 - ?                 code (up to 1MB)
-        0xa2100000                     boot loader heap / free physical memory
+        0x00000000                     kernel
+        0x00400000                     ...modules
+
+        (at least on the Sam460ex U-Boot; we'll need to accomodate other 
setups)
+        0x01000000                     boot loader
+        0x01800000                     Flattened Device Tree
+        0x01900000                     boot.tgz (= ramdisk)
+        0x02000000                     boot loader uimage
+
+         
+                                               boot loader heap (should be 
discarded later on)
+        ... 256M-Kstack        page hash table
+        ... 256M                       kernel stack
+                                               kernel stack guard page
 
        The kernel is mapped at KERNEL_BASE, all other stuff mapped by the
        loader (kernel args, modules, driver settings, ...) comes after
-       0x80020000 which means that there is currently only 2 MB reserved for
-       the kernel itself (see kMaxKernelSize).
+       0x80040000 which means that there is currently only 4 MB reserved for
+       the kernel itself (see kMaxKernelSize). FIXME: downsize kernel_ppc
 */
 
 
-/*
-*defines a block in memory
-*/
-struct memblock {
-       const char name[16];
-               // the name will be used for debugging etc later perhaps...
-       addr_t  start;
-               // start of the block
-       addr_t  end;
-               // end of the block
-       uint32  flags;
-               // which flags should be applied (device/normal etc..)
-};
-
-
-static struct memblock LOADER_MEMORYMAP[] = {
-       {
-               "devices",
-               DEVICE_BASE,
-               DEVICE_BASE + DEVICE_SIZE - 1,
-               MMU_L2_FLAG_B,
-       },
-       {
-               "RAM_loader", // 1MB loader
-               SDRAM_BASE + 0,
-               SDRAM_BASE + 0x0fffff,
-               MMU_L2_FLAG_C,
-       },
-       {
-               "RAM_pt", // Page Table 1MB
-               SDRAM_BASE + 0x100000,
-               SDRAM_BASE + 0x1FFFFF,
-               MMU_L2_FLAG_C,
-       },
-       {
-               "RAM_free", // 16MB free RAM (more but we don't map it 
automaticaly)
-               SDRAM_BASE + 0x0200000,
-               SDRAM_BASE + 0x11FFFFF,
-               MMU_L2_FLAG_C,
-       },
-       {
-               "RAM_stack", // stack
-               SDRAM_BASE + 0x1200000,
-               SDRAM_BASE + 0x2000000,
-               MMU_L2_FLAG_C,
-       },
-       {
-               "RAM_initrd", // stack
-               SDRAM_BASE + 0x2000000,
-               SDRAM_BASE + 0x2500000,
-               MMU_L2_FLAG_C,
-       },
+int32 of_address_cells(int package);
+int32 of_size_cells(int package);
 
-#ifdef FB_BASE
-       {
-               "framebuffer", // 2MB framebuffer ram
-               FB_BASE,
-               FB_BASE + FB_SIZE - 1,
-               MMU_L2_FLAG_AP_RW|MMU_L2_FLAG_C,
-       },
+extern bool gIs440;
+// XXX:use a base class for Book-E support?
+extern status_t arch_mmu_setup_pinned_tlb_amcc440(phys_addr_t totalRam,
+       size_t &tableSize, size_t &tlbSize);
+
+#define TRACE_MMU
+#ifdef TRACE_MMU
+#      define TRACE(x) dprintf x
+#else
+#      define TRACE(x) ;
 #endif
-};
 
+#define TRACE_MEMORY_MAP
+       // Define this to print the memory map to serial debug.
 
-//static const uint32 kDefaultPageTableFlags = MMU_FLAG_READWRITE;
-       // not cached not buffered, R/W
-static const size_t kMaxKernelSize = 0x200000;         // 2 MB for the kernel
+static const size_t kMaxKernelSize = 0x400000;         // 4 MB for the kernel
 
-static addr_t sNextPhysicalAddress = 0; //will be set by mmu_init
+static addr_t sNextPhysicalAddress = kMaxKernelSize; //will be set by mmu_init
 static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
-static addr_t sMaxVirtualAddress = KERNEL_BASE + kMaxKernelSize;
-
-static addr_t sNextPageTableAddress = 0;
-//the page directory is in front of the pagetable
-static uint32 kPageTableRegionEnd = 0;
+//static addr_t sMaxVirtualAddress = KERNEL_BASE + kMaxKernelSize;
 
 // working page directory and page table
-static uint32 *sPageDirectory = 0 ;
-//page directory has to be on a multiple of 16MB for
-//some arm processors
+static void *sPageTable = 0 ;
 
 
 static addr_t
 get_next_virtual_address(size_t size)
 {
        addr_t address = sNextVirtualAddress;
+       sNextPhysicalAddress += size;
        sNextVirtualAddress += size;
 
        return address;
@@ -158,395 +117,27 @@ get_next_virtual_address(size_t size)
 
 
 static addr_t
-get_next_virtual_address_alligned (size_t size, uint32 mask)
-{
-       addr_t address = (sNextVirtualAddress) & mask;
-       sNextVirtualAddress = address + size;
-
-       return address;
-}
-
-
-static addr_t
 get_next_physical_address(size_t size)
 {
        addr_t address = sNextPhysicalAddress;
        sNextPhysicalAddress += size;
+       sNextVirtualAddress += size;
 
        return address;
 }
 
 
-static addr_t
-get_next_physical_address_alligned(size_t size, uint32 mask)
-{
-       addr_t address = sNextPhysicalAddress & mask;
-       sNextPhysicalAddress = address + size;
-
-       return address;
-}
-
-
-static addr_t
-get_next_virtual_page(size_t pagesize)
-{
-       return get_next_virtual_address_alligned(pagesize, 0xffffffc0);
-}
-
-
-static addr_t
-get_next_physical_page(size_t pagesize)
-{
-       return get_next_physical_address_alligned(pagesize, 0xffffffc0);
-}
-
-
-/*
- * Set translation table base
- */
-void
-mmu_set_TTBR(uint32 ttb)
-{
-       ttb &= 0xffffc000;
-       asm volatile("MCR p15, 0, %[adr], c2, c0, 0"::[adr] "r" (ttb));
-}
-
-
-/*
- * Flush the TLB
- */
-void
-mmu_flush_TLB()
-{
-       uint32 value = 0;
-       asm volatile("MCR p15, 0, %[c8format], c8, c7, 0"::[c8format] "r" 
(value));
-}
-
-
-/*
- * Read MMU Control Register
- */
-uint32
-mmu_read_C1()
-{
-       uint32 controlReg = 0;
-       asm volatile("MRC p15, 0, %[c1out], c1, c0, 0":[c1out] "=r" 
(controlReg));
-       return controlReg;
-}
-
-
-/*
- * Write MMU Control Register
- */
-void
-mmu_write_C1(uint32 value)
-{
-       asm volatile("MCR p15, 0, %[c1in], c1, c0, 0"::[c1in] "r" (value));
-}
-
-
-void
-mmu_write_DACR(uint32 value)
-{
-       asm volatile("MCR p15, 0, %[c1in], c3, c0, 0"::[c1in] "r" (value));
-}
-
-
-static uint32 *
-get_next_page_table(uint32 type)
-{
-       TRACE(("get_next_page_table, sNextPageTableAddress %p, 
kPageTableRegionEnd "
-               "%p, type 0x" B_PRIX32 "\n", sNextPageTableAddress,
-               kPageTableRegionEnd, type));
-
-       size_t size = 0;
-       switch(type) {
-               case MMU_L1_TYPE_COARSE:
-               default:
-                       size = 1024;
-                       break;
-               case MMU_L1_TYPE_FINE:
-                       size = 4096;
-                       break;
-               case MMU_L1_TYPE_SECTION:
-                       size = 16384;
-                       break;
-       }
-
-       addr_t address = sNextPageTableAddress;
-       if (address >= kPageTableRegionEnd) {
-               TRACE(("outside of pagetableregion!\n"));
-               return (uint32 *)get_next_physical_address_alligned(size, 
0xffffffc0);
-       }
-
-       sNextPageTableAddress += size;
-       return (uint32 *)address;
-}
-
-
-void
-init_page_directory()
-{
-       TRACE(("init_page_directory\n"));
-       uint32 smalltype;
-
-       // see if subpages disabled
-       if (mmu_read_C1() & (1<<23))
-               smalltype = MMU_L2_TYPE_SMALLNEW;
-       else
-               smalltype = MMU_L2_TYPE_SMALLEXT;
-
-       gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
-
-       // clear out the pgdir
-       for (uint32 i = 0; i < 4096; i++)
-       sPageDirectory[i] = 0;
-
-       uint32 *pageTable = NULL;
-       for (uint32 i = 0; i < ARRAY_SIZE(LOADER_MEMORYMAP);i++) {
-
-               pageTable = get_next_page_table(MMU_L1_TYPE_COARSE);
-               TRACE(("BLOCK: %s START: %lx END %lx\n", 
LOADER_MEMORYMAP[i].name,
-                       LOADER_MEMORYMAP[i].start, LOADER_MEMORYMAP[i].end));
-               addr_t pos = LOADER_MEMORYMAP[i].start;
-
-               int c = 0;
-               while (pos < LOADER_MEMORYMAP[i].end) {
-                       pageTable[c] = pos |  LOADER_MEMORYMAP[i].flags | 
smalltype;
-
-                       c++;
-                       if (c > 255) { // we filled a pagetable => we need a 
new one
-                               // there is 1MB per pagetable so:
-                               sPageDirectory[VADDR_TO_PDENT(pos)]
-                                       = (uint32)pageTable | 
MMU_L1_TYPE_COARSE;
-                               pageTable = 
get_next_page_table(MMU_L1_TYPE_COARSE);
-                               c = 0;
-                       }
-
-                       pos += B_PAGE_SIZE;
-               }
-
-               if (c > 0) {
-                       sPageDirectory[VADDR_TO_PDENT(pos)]
-                               = (uint32)pageTable | MMU_L1_TYPE_COARSE;
-               }
-       }
-
-       mmu_flush_TLB();
-
-       /* set up the translation table base */
-       mmu_set_TTBR((uint32)sPageDirectory);
-
-       mmu_flush_TLB();
-
-       /* set up the domain access register */
-       mmu_write_DACR(0xFFFFFFFF);
-
-       /* turn on the mmu */
-       mmu_write_C1(mmu_read_C1() | 0x1);
-}
-
-
-/*!     Adds a new page table for the specified base address */
-static void
-add_page_table(addr_t base)
-{
-       TRACE(("add_page_table(base = %p)\n", (void *)base));
-
-       // Get new page table and clear it out
-       uint32 *pageTable = get_next_page_table(MMU_L1_TYPE_COARSE);
-/*
-       if (pageTable > (uint32 *)(8 * 1024 * 1024)) {
-               panic("tried to add page table beyond the indentity mapped 8 MB 
"
-                       "region\n");
-       }
-*/
-       for (int32 i = 0; i < 256; i++)
-               pageTable[i] = 0;
-
-       // put the new page table into the page directory
-       sPageDirectory[VADDR_TO_PDENT(base)]
-               = (uint32)pageTable | MMU_L1_TYPE_COARSE;
-}
-
-
-/*!    Creates an entry to map the specified virtualAddress to the given
-       physicalAddress.
-       If the mapping goes beyond the current page table, it will allocate
-       a new one. If it cannot map the requested page, it panics.
-*/
-static void
-map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
-{
-       TRACE(("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress,
-               physicalAddress));
-
-       if (virtualAddress < KERNEL_BASE) {
-               panic("map_page: asked to map invalid page %p!\n",
-                       (void *)virtualAddress);
-       }
-
-       if (virtualAddress >= sMaxVirtualAddress) {
-               // we need to add a new page table
-               add_page_table(sMaxVirtualAddress);
-               sMaxVirtualAddress += B_PAGE_SIZE * 256;
-
-               if (virtualAddress >= sMaxVirtualAddress) {
-                       panic("map_page: asked to map a page to %p\n",
-                               (void *)virtualAddress);
-               }
-       }
-
-       physicalAddress &= ~(B_PAGE_SIZE - 1);
-
-       // map the page to the correct page table
-       uint32 *pageTable
-               = (uint32 *)(sPageDirectory[VADDR_TO_PDENT(virtualAddress)]
-                       & ARM_PDE_ADDRESS_MASK);
-
-       TRACE(("map_page: pageTable 0x%lx\n",
-               sPageDirectory[VADDR_TO_PDENT(virtualAddress)] & 
ARM_PDE_ADDRESS_MASK));
-
-       if (pageTable == NULL) {
-               add_page_table(virtualAddress);
-               pageTable = (uint32 
*)(sPageDirectory[VADDR_TO_PDENT(virtualAddress)]
-                       & ARM_PDE_ADDRESS_MASK);
-       }
-
-       uint32 tableEntry = VADDR_TO_PTENT(virtualAddress);
-
-       TRACE(("map_page: inserting pageTable %p, tableEntry %ld, 
physicalAddress "
-               "%p\n", pageTable, tableEntry, physicalAddress));
-
-       pageTable[tableEntry] = physicalAddress | flags;
-
-       mmu_flush_TLB();
-
-       TRACE(("map_page: done\n"));
-}
-
-
 //     #pragma mark -
 
 
 extern "C" addr_t
 mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
 {
-       addr_t address = sNextVirtualAddress;
-       addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
-
-       physicalAddress -= pageOffset;
-
-       for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
-               map_page(get_next_virtual_page(B_PAGE_SIZE), physicalAddress + 
offset,
-                       flags);
-       }
-
-       return address + pageOffset;
-}
-
-
-static void
-unmap_page(addr_t virtualAddress)
-{
-       TRACE(("unmap_page(virtualAddress = %p)\n", (void *)virtualAddress));
-
-       if (virtualAddress < KERNEL_BASE) {
-               panic("unmap_page: asked to unmap invalid page %p!\n",
-                       (void *)virtualAddress);
-       }
-
-       // unmap the page from the correct page table
-       uint32 *pageTable
-               = (uint32 *)(sPageDirectory[VADDR_TO_PDENT(virtualAddress)]
-                       & ARM_PDE_ADDRESS_MASK);
-
-       pageTable[VADDR_TO_PTENT(virtualAddress)] = 0;
-
-       mmu_flush_TLB();
-}
-
-
-extern "C" void *
-mmu_allocate(void *virtualAddress, size_t size)
-{
-       TRACE(("mmu_allocate: requested vaddr: %p, next free vaddr: 0x%lx, 
size: "
-               "%ld\n", virtualAddress, sNextVirtualAddress, size));
-
-       size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
-               // get number of pages to map
-
-       if (virtualAddress != NULL) {
-               // This special path is almost only useful for loading the
-               // kernel into memory; it will only allow you to map the
-               // 'kMaxKernelSize' bytes following the kernel base address.
-               // Also, it won't check for already mapped addresses, so
-               // you better know why you are here :)
-               addr_t address = (addr_t)virtualAddress;
-
-               // is the address within the valid range?
-               if (address < KERNEL_BASE
-                       || address + size >= KERNEL_BASE + kMaxKernelSize) {
-                       TRACE(("mmu_allocate in illegal range\n address: %lx"
-                               "  KERNELBASE: %lx KERNEL_BASE + 
kMaxKernelSize: %lx"
-                               "  address + size : %lx \n", (uint32)address, 
KERNEL_BASE,
-                               KERNEL_BASE + kMaxKernelSize, (uint32)(address 
+ size)));
-                       return NULL;
-               }
-               for (uint32 i = 0; i < size; i++) {
-                       map_page(address, get_next_physical_page(B_PAGE_SIZE),
-                               kDefaultPageFlags);
-                       address += B_PAGE_SIZE;
-               }
-
-               return virtualAddress;
-       }
-
-       void *address = (void *)sNextVirtualAddress;
-
-       for (uint32 i = 0; i < size; i++) {
-               map_page(get_next_virtual_page(B_PAGE_SIZE),
-                       get_next_physical_page(B_PAGE_SIZE), kDefaultPageFlags);
-       }
-
-       return address;
+       panic("WRITEME");
+       return 0;
 }
 
 
-/*!    This will unmap the allocated chunk of memory from the virtual
-       address space. It might not actually free memory (as its implementation
-       is very simple), but it might.
-*/
-extern "C" void
-mmu_free(void *virtualAddress, size_t size)
-{
-       TRACE(("mmu_free(virtualAddress = %p, size: %ld)\n", virtualAddress, 
size));
-
-       addr_t address = (addr_t)virtualAddress;
-       size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
-               // get number of pages to map
-
-       // is the address within the valid range?
-       if (address < KERNEL_BASE
-               || address + size >= KERNEL_BASE + kMaxKernelSize) {
-               panic("mmu_free: asked to unmap out of range region (%p, size 
%lx)\n",
-                       (void *)address, size);
-       }
-
-       // unmap all pages within the range
-       for (uint32 i = 0; i < size; i++) {
-               unmap_page(address);
-               address += B_PAGE_SIZE;
-       }
-
-       if (address == sNextVirtualAddress) {
-               // we can actually reuse the virtual address space
-               sNextVirtualAddress -= size;
-       }
-}
-#endif
-
-
 /*!    Sets up the final and kernel accessible GDT and IDT tables.
        BIOS calls won't work any longer after this function has
        been called.
@@ -556,17 +147,7 @@ mmu_init_for_kernel(void)
 {
        TRACE(("mmu_init_for_kernel\n"));
 
-#ifdef __ARM__
-       // save the memory we've physically allocated
-       gKernelArgs.physical_allocated_range[0].size
-               = sNextPhysicalAddress - 
gKernelArgs.physical_allocated_range[0].start;
-
-       // Save the memory we've virtually allocated (for the kernel and other
-       // stuff)
-       gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
-       gKernelArgs.virtual_allocated_range[0].size
-               = sNextVirtualAddress - KERNEL_BASE;
-       gKernelArgs.num_virtual_allocated_ranges = 1;
+       // TODO: remove all U-Boot TLB
 
 #ifdef TRACE_MEMORY_MAP
        {
@@ -574,33 +155,35 @@ mmu_init_for_kernel(void)
 
                dprintf("phys memory ranges:\n");
                for (i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
-                       dprintf("    base 0x%08lx, length 0x%08lx\n",
+                       dprintf("    base 0x%"B_PRIxPHYSADDR
+                               ", length 0x%"B_PRIxPHYSADDR"\n",
                                gKernelArgs.physical_memory_range[i].start,
                                gKernelArgs.physical_memory_range[i].size);
                }
 
                dprintf("allocated phys memory ranges:\n");
                for (i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) 
{
-                       dprintf("    base 0x%08lx, length 0x%08lx\n",
+                       dprintf("    base 0x%"B_PRIxPHYSADDR
+                               ", length 0x%"B_PRIxPHYSADDR"\n",
                                gKernelArgs.physical_allocated_range[i].start,
                                gKernelArgs.physical_allocated_range[i].size);
                }
 
                dprintf("allocated virt memory ranges:\n");
                for (i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
-                       dprintf("    base 0x%08lx, length 0x%08lx\n",
+                       dprintf("    base 0x%"B_PRIxPHYSADDR
+                               ", length 0x%"B_PRIxPHYSADDR"\n",
                                gKernelArgs.virtual_allocated_range[i].start,
                                gKernelArgs.virtual_allocated_range[i].size);
                }
        }
 #endif
-#endif
 }
 
 
-//XXX:move this
+//TODO:move this to generic/ ?
 static status_t
-find_physical_memory_ranges(size_t &total)
+find_physical_memory_ranges(phys_addr_t &total)
 {
        int memory = -1;
        int package;
@@ -700,64 +283,62 @@ find_physical_memory_ranges(size_t &total)
 extern "C" void
 mmu_init(void)
 {
+       size_t tableSize, tlbSize;
+       status_t err;
        TRACE(("mmu_init\n"));
 
        // get map of physical memory (fill in kernel_args structure)
 
-       size_t total;
+       phys_addr_t total;
        if (find_physical_memory_ranges(total) != B_OK) {
                dprintf("Error: could not find physical memory ranges!\n");
                return /*B_ERROR*/;
        }
-       dprintf("total physical memory = %" B_PRId32 "MB\n", total / (1024 * 
1024));
-
-#ifdef __ARM__
-       mmu_write_C1(mmu_read_C1() & ~((1<<29)|(1<<28)|(1<<0)));
-               // access flag disabled, TEX remap disabled, mmu disabled
+       dprintf("total physical memory = %" B_PRId64 "MB\n", total / (1024 * 
1024));
+
+       // XXX: ugly, and wrong, there are several 440 mmu types... FIXME
+       if (gIs440) {
+               err = arch_mmu_setup_pinned_tlb_amcc440(total, tableSize, 
tlbSize);
+               dprintf("setup_pinned_tlb: 0x%08lx table %zdMB tlb %zdMB\n",
+                       err, tableSize / (1024 * 1024), tlbSize / (1024 * 
1024));
+       } else {
+               panic("Unknown MMU type!");
+               return;
+       }
 
-       uint32 highestRAMAddress = SDRAM_BASE;
+       // remember the start of the allocated physical pages
+       gKernelArgs.physical_allocated_range[0].start
+               = gKernelArgs.physical_memory_range[0].start;
+       gKernelArgs.physical_allocated_range[0].size = tlbSize;
+       gKernelArgs.num_physical_allocated_ranges = 1;
 
-       // calculate lowest RAM adress from MEMORYMAP
-       for (uint32 i = 0; i < ARRAY_SIZE(LOADER_MEMORYMAP); i++) {
-               if (strcmp("RAM_free", LOADER_MEMORYMAP[i].name) == 0)
-                       sNextPhysicalAddress = LOADER_MEMORYMAP[i].start;
+       // Save the memory we've virtually allocated (for the kernel and other
+       // stuff)
+       gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
+       gKernelArgs.virtual_allocated_range[0].size
+               = tlbSize + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
+       gKernelArgs.num_virtual_allocated_ranges = 1;
 
-               if (strcmp("RAM_pt", LOADER_MEMORYMAP[i].name) == 0) {
-                       sNextPageTableAddress = LOADER_MEMORYMAP[i].start
-                               + MMU_L1_TABLE_SIZE;
-                       kPageTableRegionEnd = LOADER_MEMORYMAP[i].end;
-                       sPageDirectory = (uint32 *) LOADER_MEMORYMAP[i].start;
-               }
 
-               if (strncmp("RAM_", LOADER_MEMORYMAP[i].name, 4) == 0) {
-                       if (LOADER_MEMORYMAP[i].end > highestRAMAddress)
-                               highestRAMAddress = LOADER_MEMORYMAP[i].end;
-               }
-       }
+       sPageTable = (void *)(tlbSize - tableSize - KERNEL_STACK_SIZE);
+               // we put the page table near the end of the pinned TLB
+       TRACE(("page table at 0x%p to 0x%p\n", sPageTable,
+               (uint8 *)sPageTable + tableSize));
 
-       gKernelArgs.physical_memory_range[0].start = SDRAM_BASE;
-       gKernelArgs.physical_memory_range[0].size = highestRAMAddress - 
SDRAM_BASE;
-       gKernelArgs.num_physical_memory_ranges = 1;
+       // map in a kernel stack
+       gKernelArgs.cpu_kstack[0].start = (addr_t)(tlbSize - KERNEL_STACK_SIZE);
+       gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
+               + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
 
-       gKernelArgs.physical_allocated_range[0].start = SDRAM_BASE;
-       gKernelArgs.physical_allocated_range[0].size = 0;
-       gKernelArgs.num_physical_allocated_ranges = 1;
-               // remember the start of the allocated physical pages
+       TRACE(("kernel stack at 0x%Lx to 0x%Lx\n", 
gKernelArgs.cpu_kstack[0].start,
+               gKernelArgs.cpu_kstack[0].start + 
gKernelArgs.cpu_kstack[0].size));
 
+#ifdef __ARM__
        init_page_directory();
 
        // map the page directory on the next vpage
        gKernelArgs.arch_args.vir_pgdir = mmu_map_physical_memory(
                (addr_t)sPageDirectory, MMU_L1_TABLE_SIZE, kDefaultPageFlags);
-
-       // map in a kernel stack
-       gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL,
-               KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
-       gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
-               + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
-
-       TRACE(("kernel stack at 0x%lx to 0x%lx\n", 
gKernelArgs.cpu_kstack[0].start,
-               gKernelArgs.cpu_kstack[0].start + 
gKernelArgs.cpu_kstack[0].size));
 #endif
 }
 
@@ -769,22 +350,50 @@ extern "C" status_t
 platform_allocate_region(void **_address, size_t size, uint8 protection,
        bool /*exactAddress*/)
 {
-#ifdef __ARM__
-       void *address = mmu_allocate(*_address, size);
+       TRACE(("platform_allocate_region(&%p, %zd)\n", *_address, size));
+
+       //get_next_virtual_address
+       size = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
+               // roundup to page size for clarity
+
+       if (*_address != NULL) {
+               // This special path is almost only useful for loading the
+               // kernel into memory; it will only allow you to map the
+               // 'kMaxKernelSize' bytes following the kernel base address.
+               // Also, it won't check for already mapped addresses, so
+               // you better know why you are here :)
+               addr_t address = (addr_t)*_address;
+
+               // is the address within the valid range?
+               if (address < KERNEL_BASE
+                       || address + size >= KERNEL_BASE + kMaxKernelSize) {
+                       TRACE(("mmu_allocate in illegal range\n address: %lx"
+                               "  KERNELBASE: %lx KERNEL_BASE + 
kMaxKernelSize: %lx"
+                               "  address + size : %lx \n", (uint32)address, 
KERNEL_BASE,
+                               KERNEL_BASE + kMaxKernelSize, (uint32)(address 
+ size)));
+                       return B_ERROR;
+               }
+               TRACE(("platform_allocate_region: allocated %zd bytes at 
%08lx\n", size,
+                       address));
+
+               return B_OK;
+       }
+
+       void *address = (void *)get_next_virtual_address(size);
        if (address == NULL)
                return B_NO_MEMORY;
 
+       TRACE(("platform_allocate_region: allocated %zd bytes at %p\n", size,
+               address));
        *_address = address;
        return B_OK;
-#else
-       return B_ERROR;
-#endif
 }
 
 
 extern "C" status_t
 platform_free_region(void *address, size_t size)
 {
+       TRACE(("platform_free_region(%p, %zd)\n", address, size));
 #ifdef __ARM__
        mmu_free(address, size);
 #endif
@@ -795,6 +404,7 @@ platform_free_region(void *address, size_t size)
 void
 platform_release_heap(struct stage2_args *args, void *base)
 {
+       //XXX
        // It will be freed automatically, since it is in the
        // identity mapped region, and not stored in the kernel's
        // page tables.
@@ -804,15 +414,12 @@ platform_release_heap(struct stage2_args *args, void 
*base)
 status_t
 platform_init_heap(struct stage2_args *args, void **_base, void **_top)
 {
-#ifdef __ARM__
-       void *heap = (void *)get_next_physical_address(args->heap_size);
-       if (heap == NULL)
-               return B_NO_MEMORY;
+       // the heap is put right before the pagetable
+       void *heap = (uint8 *)sPageTable - args->heap_size;
+       //FIXME: use phys addresses to allow passing args to U-Boot?
 
        *_base = heap;
        *_top = (void *)((int8 *)heap + args->heap_size);
+       TRACE(("boot heap at 0x%p to 0x%p\n", *_base, *_top));
        return B_OK;
-#else
-       return B_ERROR;
-#endif
 }
diff --git a/src/system/boot/platform/u-boot/arch/ppc/arch_mmu_amcc440.cpp 
b/src/system/boot/platform/u-boot/arch/ppc/arch_mmu_amcc440.cpp
new file mode 100644
index 0000000..5077e63
--- /dev/null
+++ b/src/system/boot/platform/u-boot/arch/ppc/arch_mmu_amcc440.cpp
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2004-2008, Axel Dörfler, axeld@xxxxxxxxxxxxxxxx.
+ * Based on code written by Travis Geiselbrecht for NewOS.
+ *
+ * Distributed under the terms of the MIT License.
+ */
+
+
+#include "mmu.h"
+
+#include <boot/platform.h>
+#include <boot/stdio.h>
+#include <boot/kernel_args.h>
+#include <boot/stage2.h>
+#include <arch/cpu.h>
+#include <arch/ppc/arch_mmu_amcc440.h>
+#include <arch_kernel.h>
+#include <platform/openfirmware/openfirmware.h>
+#ifdef __ARM__
+#include <arm_mmu.h>
+#endif
+#include <kernel.h>
+
+#include <board_config.h>
+
+#include <OS.h>
+
+#include <string.h>
+
+int32 of_address_cells(int package);
+int32 of_size_cells(int package);
+
+#define TRACE_MMU
+#ifdef TRACE_MMU
+#      define TRACE(x) dprintf x
+#else
+#      define TRACE(x) ;
+#endif
+
+
+/*!    Computes the recommended minimal page table size as
+       described in table 7-22 of the PowerPC "Programming
+       Environment for 32-Bit Microprocessors".
+       The page table size ranges from 64 kB (for 8 MB RAM)
+       to 32 MB (for 4 GB RAM).
+       FIXME: account for larger TLB descriptors for Book-E
+*/
+static size_t
+suggested_page_table_size(phys_addr_t total)
+{
+       uint32 max = 23;
+               // 2^23 == 8 MB
+
+       while (max < 32) {
+               if (total <= (1UL << max))
+                       break;
+
+               max++;
+       }
+
+       return 1UL << (max - 7);
+               // 2^(23 - 7) == 64 kB
+}
+
+
+static void
+read_TLB(int i, uint32 tlb[3], uint8 &pid)
+{
+       //FIXME:read pid too
+       asm volatile(
+               "tlbre %0,%3,0\n"
+               "\ttlbre %1,%3,1\n"
+               "\ttlbre %2,%3,2"
+               :   "=r"(tlb[0]),
+                       "=r"(tlb[1]),
+                       "=r"(tlb[2])
+               :       "r"(i)
+       );
+}
+
+
+static void
+write_TLB(int i, uint32 tlb[3], uint8 pid)
+{
+       //FIXME:write pid too
+       asm volatile(
+               "tlbwe %0,%3,0\n"
+               "\ttlbwe %1,%3,1\n"
+               "\ttlbwe %2,%3,2"
+               : : "r"(tlb[0]),
+                       "r"(tlb[1]),
+                       "r"(tlb[2]),
+                       "r"(i)
+       );
+}
+
+
+static void
+dump_TLBs(void)
+{
+       int i;
+       for (i = 0; i < TLB_COUNT; i++) {
+               uint32 tlb[3];// = { 0, 0, 0 };
+               uint8 pid;
+               read_TLB(i, tlb, pid);
+               dprintf("TLB[%02d]: %08lx %08lx %08lx %02x\n",
+                       i, tlb[0], tlb[1], tlb[2], pid);
+       }
+}
+
+
+status_t
+arch_mmu_setup_pinned_tlb_amcc440(phys_addr_t totalRam, size_t &tableSize,
+       size_t &tlbSize)
+{
+       dump_TLBs();
+       tlb_length tlbLength = TLB_LENGTH_16MB;
+//XXX:totalRam = 4LL*1024*1024*1024;
+
+       size_t suggestedTableSize = suggested_page_table_size(totalRam);
+       dprintf("suggested page table size = %" B_PRIuSIZE "\n",
+               suggestedTableSize);
+
+       tableSize = suggestedTableSize;
+
+       // add 4MB for kernel and some more for modules...
+       tlbSize = tableSize + 8 * 1024 * 1024;
+
+       // round up to realistic TLB lengths, either 16MB or 256MB
+       // the unused space will be filled with SLAB areas
+       if (tlbSize < 16 * 1024 * 1024)
+               tlbSize = 16 * 1024 * 1024;
+       else {
+               tlbSize = 256 * 1024 * 1024;
+               tlbLength = TLB_LENGTH_256MB;
+       }
+
+       uint32 tlb[3];
+       uint8 pid;
+       int i;
+
+       // Make sure the last TLB is free, else we are in trouble
+       // XXX: allow using a different TLB entry?
+       read_TLB(TLB_COUNT - 1, tlb, pid);
+       if ((tlb[0] & TLB_V) != 0) {
+               panic("Last TLB already in use. FIXME.");
+               return B_ERROR;
+       }
+
+       // TODO: remove existing mapping from U-Boot at KERNEL_BASE !!!
+       // (on Sam460ex it's pci mem)
+       // for now we just move it to AS1 which we don't use, until calling
+       // the kernel.
+       // we could probably swap it with our own KERNEL_BASE TLB to call U-Boot
+       // if required, but it'd be quite ugly.
+       for (i = 0; i < TLB_COUNT; i++) {
+               read_TLB(i, tlb, pid);
+               //dprintf("tlb[%d][0] = %08lx\n", i, tlb[0]);
+               // TODO: make the test more complete and correct
+               if ((tlb[0] & 0xfffffc00) == KERNEL_BASE) {
+                       tlb[0] |= 0x100; // AS1
+                       write_TLB(i, tlb, pid);
+                       dprintf("Moved existing translation in TLB[%d] to 
AS1\n", i);
+               }
+       }
+
+       // pin the last TLB
+       //XXX:also maybe skip the FDT + initrd + loader ?
+       phys_addr_t physBase = gKernelArgs.physical_memory_range[0].start;
+       //TODO:make sure 1st range is large enough?
+       i = TLB_COUNT - 1; // last one
+       pid = 0; // the kernel's PID
+       tlb[0] = (KERNEL_BASE | tlbLength << 4 | TLB_V);
+       tlb[1] = ((physBase & 0xfffffc00) | (physBase >> 32));
+       tlb[2] = (0x0000003f); // user:RWX kernel:RWX
+       write_TLB(i, tlb, pid);
+
+       return B_OK;
+}
+

############################################################################

Commit:      f106fe34d2a165be37dad87c27e0ccf3193da2bf
Author:      François Revol <revol@xxxxxxx>
Date:        Sat Mar  9 02:50:12 2013 UTC

Merge branch 'master' into sam460ex

----------------------------------------------------------------------------


Other related posts:

  • » [haiku-commits] BRANCH mmu_man-github.sam460ex - src/apps/resourceedit src/servers/keystore data/artwork/icons src/system/boot/platform/u-boot/arch/ppc src/kits/app - mmu_man-github . sam460ex