From f01472a5ffd03b535e8a66bb00d9a7548a0f61bf Mon Sep 17 00:00:00 2001
From: bunnei <bunneidev@gmail.com>
Date: Fri, 1 Sep 2017 23:10:03 -0400
Subject: [PATCH] core: Various changes to support 64-bit addressing.

---
 src/core/hle/kernel/vm_manager.cpp | 22 +++++++--------
 src/core/hle/kernel/vm_manager.h   | 20 +++++++-------
 src/core/memory.cpp                | 44 +++++++++++++++---------------
 src/core/memory.h                  | 16 +++++------
 src/core/memory_setup.h            |  6 ++--
 5 files changed, 54 insertions(+), 54 deletions(-)

diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
index cef1f7fa83..f70c325015 100644
--- a/src/core/hle/kernel/vm_manager.cpp
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -56,7 +56,7 @@ void VMManager::Reset() {
     initial_vma.size = MAX_ADDRESS;
     vma_map.emplace(initial_vma.base, initial_vma);
 
-    UpdatePageTableForVMA(initial_vma);
+    //UpdatePageTableForVMA(initial_vma);
 }
 
 VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
@@ -69,7 +69,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
 
 ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
                                                           std::shared_ptr<std::vector<u8>> block,
-                                                          size_t offset, u32 size,
+                                                          size_t offset, u64 size,
                                                           MemoryState state) {
     ASSERT(block != nullptr);
     ASSERT(offset + size <= block->size());
@@ -89,7 +89,7 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
     return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
 }
 
-ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* memory, u32 size,
+ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* memory, u64 size,
                                                             MemoryState state) {
     ASSERT(memory != nullptr);
 
@@ -107,7 +107,7 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8* me
     return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
 }
 
-ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u32 size,
+ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u64 size,
                                                    MemoryState state,
                                                    Memory::MMIORegionPointer mmio_handler) {
     // This is the appropriately sized VMA that will turn into our allocation.
@@ -141,7 +141,7 @@ VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
     return MergeAdjacent(vma_handle);
 }
 
-ResultCode VMManager::UnmapRange(VAddr target, u32 size) {
+ResultCode VMManager::UnmapRange(VAddr target, u64 size) {
     CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
     VAddr target_end = target + size;
 
@@ -166,7 +166,7 @@ VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission ne
     return MergeAdjacent(iter);
 }
 
-ResultCode VMManager::ReprotectRange(VAddr target, u32 size, VMAPermission new_perms) {
+ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_perms) {
     CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
     VAddr target_end = target + size;
 
@@ -209,7 +209,7 @@ VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) {
     return vma_map.erase(iter, iter); // Erases an empty range of elements
 }
 
-ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
+ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u64 size) {
     ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x%8X", size);
     ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x%08X", base);
 
@@ -225,8 +225,8 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
         return ERR_INVALID_ADDRESS_STATE;
     }
 
-    u32 start_in_vma = base - vma.base;
-    u32 end_in_vma = start_in_vma + size;
+    u64 start_in_vma = base - vma.base;
+    u64 end_in_vma = start_in_vma + size;
 
     if (end_in_vma > vma.size) {
         // Requested allocation doesn't fit inside VMA
@@ -245,7 +245,7 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
     return MakeResult<VMAIter>(vma_handle);
 }
 
-ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u32 size) {
+ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u64 size) {
     ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x%8X", size);
     ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x%08X", target);
 
@@ -274,7 +274,7 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u32 size) {
     return MakeResult<VMAIter>(begin_vma);
 }
 
-VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u32 offset_in_vma) {
+VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) {
     VirtualMemoryArea& old_vma = vma_handle->second;
     VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA
 
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
index 38e0d74d0d..aa2265ce64 100644
--- a/src/core/hle/kernel/vm_manager.h
+++ b/src/core/hle/kernel/vm_manager.h
@@ -63,7 +63,7 @@ struct VirtualMemoryArea {
     /// Virtual base address of the region.
     VAddr base = 0;
     /// Size of the region.
-    u32 size = 0;
+    u64 size = 0;
 
     VMAType type = VMAType::Free;
     VMAPermission permissions = VMAPermission::None;
@@ -109,7 +109,7 @@ public:
      * used.
      * @note This is the limit used by the New 3DS kernel. Old 3DS used 0x20000000.
      */
-    static const u32 MAX_ADDRESS = 0x40000000;
+    static const VAddr MAX_ADDRESS = 0x8000000000;
 
     /**
      * A map covering the entirety of the managed address space, keyed by the `base` field of each
@@ -142,7 +142,7 @@ public:
      * @param state MemoryState tag to attach to the VMA.
      */
     ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
-                                        size_t offset, u32 size, MemoryState state);
+                                        size_t offset, u64 size, MemoryState state);
 
     /**
      * Maps an unmanaged host memory pointer at a given address.
@@ -152,7 +152,7 @@ public:
      * @param size Size of the mapping.
      * @param state MemoryState tag to attach to the VMA.
      */
-    ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u32 size, MemoryState state);
+    ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u64 size, MemoryState state);
 
     /**
      * Maps a memory-mapped IO region at a given address.
@@ -163,17 +163,17 @@ public:
      * @param state MemoryState tag to attach to the VMA.
      * @param mmio_handler The handler that will implement read and write for this MMIO region.
      */
-    ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state,
+    ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u64 size, MemoryState state,
                                  Memory::MMIORegionPointer mmio_handler);
 
     /// Unmaps a range of addresses, splitting VMAs as necessary.
-    ResultCode UnmapRange(VAddr target, u32 size);
+    ResultCode UnmapRange(VAddr target, u64 size);
 
     /// Changes the permissions of the given VMA.
     VMAHandle Reprotect(VMAHandle vma, VMAPermission new_perms);
 
     /// Changes the permissions of a range of addresses, splitting VMAs as necessary.
-    ResultCode ReprotectRange(VAddr target, u32 size, VMAPermission new_perms);
+    ResultCode ReprotectRange(VAddr target, u64 size, VMAPermission new_perms);
 
     /**
      * Scans all VMAs and updates the page table range of any that use the given vector as backing
@@ -197,19 +197,19 @@ private:
      * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing
      * the appropriate error checking.
      */
-    ResultVal<VMAIter> CarveVMA(VAddr base, u32 size);
+    ResultVal<VMAIter> CarveVMA(VAddr base, u64 size);
 
     /**
      * Splits the edges of the given range of non-Free VMAs so that there is a VMA split at each
      * end of the range.
      */
-    ResultVal<VMAIter> CarveVMARange(VAddr base, u32 size);
+    ResultVal<VMAIter> CarveVMARange(VAddr base, u64 size);
 
     /**
      * Splits a VMA in two, at the specified offset.
      * @returns the right side of the split, with the original iterator becoming the left side.
      */
-    VMAIter SplitVMA(VMAIter vma, u32 offset_in_vma);
+    VMAIter SplitVMA(VMAIter vma, u64 offset_in_vma);
 
     /**
      * Checks for and merges the specified VMA with adjacent ones if possible.
diff --git a/src/core/memory.cpp b/src/core/memory.cpp
index 65649d9d76..ed453d0c1f 100644
--- a/src/core/memory.cpp
+++ b/src/core/memory.cpp
@@ -34,7 +34,7 @@ enum class PageType {
 
 struct SpecialRegion {
     VAddr base;
-    u32 size;
+    u64 size;
     MMIORegionPointer handler;
 };
 
@@ -49,7 +49,7 @@ struct PageTable {
      * Array of memory pointers backing each page. An entry can only be non-null if the
      * corresponding entry in the `attributes` array is of type `Memory`.
      */
-    std::array<u8*, PAGE_TABLE_NUM_ENTRIES> pointers;
+    std::map<u64, u8*> pointers;
 
     /**
      * Contains MMIO handlers that back memory regions whose entries in the `attribute` array is of
@@ -61,13 +61,13 @@ struct PageTable {
      * Array of fine grained page attributes. If it is set to any value other than `Memory`, then
      * the corresponding entry in `pointers` MUST be set to null.
      */
-    std::array<PageType, PAGE_TABLE_NUM_ENTRIES> attributes;
+    std::map<u64, PageType> attributes;
 
     /**
      * Indicates the number of externally cached resources touching a page that should be
      * flushed before the memory is accessed
      */
-    std::array<u8, PAGE_TABLE_NUM_ENTRIES> cached_res_count;
+    std::map<u64, u8> cached_res_count;
 };
 
 /// Singular page table used for the singleton process
@@ -75,18 +75,18 @@ static PageTable main_page_table;
 /// Currently active page table
 static PageTable* current_page_table = &main_page_table;
 
-std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() {
-    return &current_page_table->pointers;
-}
+//std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers() {
+//    return &current_page_table->pointers;
+//}
 
-static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
+static void MapPages(u64 base, u64 size, u8* memory, PageType type) {
     LOG_DEBUG(HW_Memory, "Mapping %p onto %08X-%08X", memory, base * PAGE_SIZE,
               (base + size) * PAGE_SIZE);
 
     RasterizerFlushVirtualRegion(base << PAGE_BITS, size * PAGE_SIZE,
                                  FlushMode::FlushAndInvalidate);
 
-    u32 end = base + size;
+    u64 end = base + size;
     while (base != end) {
         ASSERT_MSG(base < PAGE_TABLE_NUM_ENTRIES, "out of range mapping at %08X", base);
 
@@ -101,18 +101,18 @@ static void MapPages(u32 base, u32 size, u8* memory, PageType type) {
 }
 
 void InitMemoryMap() {
-    main_page_table.pointers.fill(nullptr);
-    main_page_table.attributes.fill(PageType::Unmapped);
-    main_page_table.cached_res_count.fill(0);
+    //main_page_table.pointers.fill(nullptr);
+    //main_page_table.attributes.fill(PageType::Unmapped);
+    //main_page_table.cached_res_count.fill(0);
 }
 
-void MapMemoryRegion(VAddr base, u32 size, u8* target) {
+void MapMemoryRegion(VAddr base, u64 size, u8* target) {
     ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
     ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
     MapPages(base / PAGE_SIZE, size / PAGE_SIZE, target, PageType::Memory);
 }
 
-void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler) {
+void MapIoRegion(VAddr base, u64 size, MMIORegionPointer mmio_handler) {
     ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
     ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
     MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Special);
@@ -120,7 +120,7 @@ void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler) {
     current_page_table->special_regions.emplace_back(SpecialRegion{base, size, mmio_handler});
 }
 
-void UnmapRegion(VAddr base, u32 size) {
+void UnmapRegion(VAddr base, u64 size) {
     ASSERT_MSG((size & PAGE_MASK) == 0, "non-page aligned size: %08X", size);
     ASSERT_MSG((base & PAGE_MASK) == 0, "non-page aligned base: %08X", base);
     MapPages(base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
@@ -222,7 +222,7 @@ void Write(const VAddr vaddr, const T data) {
     PageType type = current_page_table->attributes[vaddr >> PAGE_BITS];
     switch (type) {
     case PageType::Unmapped:
-        LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data,
+        LOG_ERROR(HW_Memory, "unmapped Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u64)data,
                   vaddr);
         return;
     case PageType::Memory:
@@ -304,12 +304,12 @@ u8* GetPhysicalPointer(PAddr address) {
     return vaddr ? GetPointer(*vaddr) : nullptr;
 }
 
-void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta) {
+void RasterizerMarkRegionCached(PAddr start, u64 size, int count_delta) {
     if (start == 0) {
         return;
     }
 
-    u32 num_pages = ((start + size - 1) >> PAGE_BITS) - (start >> PAGE_BITS) + 1;
+    u64 num_pages = ((start + size - 1) >> PAGE_BITS) - (start >> PAGE_BITS) + 1;
     PAddr paddr = start;
 
     for (unsigned i = 0; i < num_pages; ++i, paddr += PAGE_SIZE) {
@@ -368,13 +368,13 @@ void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta) {
     }
 }
 
-void RasterizerFlushRegion(PAddr start, u32 size) {
+void RasterizerFlushRegion(PAddr start, u64 size) {
     if (VideoCore::g_renderer != nullptr) {
         VideoCore::g_renderer->Rasterizer()->FlushRegion(start, size);
     }
 }
 
-void RasterizerFlushAndInvalidateRegion(PAddr start, u32 size) {
+void RasterizerFlushAndInvalidateRegion(PAddr start, u64 size) {
     // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be
     // null here
     if (VideoCore::g_renderer != nullptr) {
@@ -382,7 +382,7 @@ void RasterizerFlushAndInvalidateRegion(PAddr start, u32 size) {
     }
 }
 
-void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode) {
+void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode) {
     // Since pages are unmapped on shutdown after video core is shutdown, the renderer may be
     // null here
     if (VideoCore::g_renderer != nullptr) {
@@ -398,7 +398,7 @@ void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode) {
             VAddr overlap_end = std::min(end, region_end);
 
             PAddr physical_start = TryVirtualToPhysicalAddress(overlap_start).value();
-            u32 overlap_size = overlap_end - overlap_start;
+            u64 overlap_size = overlap_end - overlap_start;
 
             auto* rasterizer = VideoCore::g_renderer->Rasterizer();
             switch (mode) {
diff --git a/src/core/memory.h b/src/core/memory.h
index c8c56babd9..e8d796d244 100644
--- a/src/core/memory.h
+++ b/src/core/memory.h
@@ -16,10 +16,10 @@ namespace Memory {
  * Page size used by the ARM architecture. This is the smallest granularity with which memory can
  * be mapped.
  */
-const u32 PAGE_SIZE = 0x1000;
-const u32 PAGE_MASK = PAGE_SIZE - 1;
 const int PAGE_BITS = 12;
-const size_t PAGE_TABLE_NUM_ENTRIES = 1 << (32 - PAGE_BITS);
+const u64 PAGE_SIZE = 1 << PAGE_BITS;
+const u64 PAGE_MASK = PAGE_SIZE - 1;
+const size_t PAGE_TABLE_NUM_ENTRIES = 1ULL << (64 - PAGE_BITS);
 
 /// Physical memory regions as seen from the ARM11
 enum : PAddr {
@@ -178,17 +178,17 @@ u8* GetPhysicalPointer(PAddr address);
  * Adds the supplied value to the rasterizer resource cache counter of each
  * page touching the region.
  */
-void RasterizerMarkRegionCached(PAddr start, u32 size, int count_delta);
+void RasterizerMarkRegionCached(PAddr start, u64 size, int count_delta);
 
 /**
  * Flushes any externally cached rasterizer resources touching the given region.
  */
-void RasterizerFlushRegion(PAddr start, u32 size);
+void RasterizerFlushRegion(PAddr start, u64 size);
 
 /**
  * Flushes and invalidates any externally cached rasterizer resources touching the given region.
  */
-void RasterizerFlushAndInvalidateRegion(PAddr start, u32 size);
+void RasterizerFlushAndInvalidateRegion(PAddr start, u64 size);
 
 enum class FlushMode {
     /// Write back modified surfaces to RAM
@@ -201,12 +201,12 @@ enum class FlushMode {
  * Flushes and invalidates any externally cached rasterizer resources touching the given virtual
  * address region.
  */
-void RasterizerFlushVirtualRegion(VAddr start, u32 size, FlushMode mode);
+void RasterizerFlushVirtualRegion(VAddr start, u64 size, FlushMode mode);
 
 /**
  * Dynarmic has an optimization to memory accesses when the pointer to the page exists that
  * can be used by setting up the current page table as a callback. This function is used to
  * retrieve the current page table for that purpose.
  */
-std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers();
+//std::array<u8*, PAGE_TABLE_NUM_ENTRIES>* GetCurrentPageTablePointers();
 }
diff --git a/src/core/memory_setup.h b/src/core/memory_setup.h
index 3fdf3a87dc..fc3fda466b 100644
--- a/src/core/memory_setup.h
+++ b/src/core/memory_setup.h
@@ -18,7 +18,7 @@ void InitMemoryMap();
  * @param size The amount of bytes to map. Must be page-aligned.
  * @param target Buffer with the memory backing the mapping. Must be of length at least `size`.
  */
-void MapMemoryRegion(VAddr base, u32 size, u8* target);
+void MapMemoryRegion(VAddr base, u64 size, u8* target);
 
 /**
  * Maps a region of the emulated process address space as a IO region.
@@ -26,7 +26,7 @@ void MapMemoryRegion(VAddr base, u32 size, u8* target);
  * @param size The amount of bytes to map. Must be page-aligned.
  * @param mmio_handler The handler that backs the mapping.
  */
-void MapIoRegion(VAddr base, u32 size, MMIORegionPointer mmio_handler);
+void MapIoRegion(VAddr base, u64 size, MMIORegionPointer mmio_handler);
 
-void UnmapRegion(VAddr base, u32 size);
+void UnmapRegion(VAddr base, u64 size);
 }
-- 
GitLab