diff --git a/Utilities/VirtualMemory.cpp b/Utilities/VirtualMemory.cpp index ec34b919164d..631cbf3c7fe3 100644 --- a/Utilities/VirtualMemory.cpp +++ b/Utilities/VirtualMemory.cpp @@ -76,6 +76,15 @@ namespace utils #endif } + void memory_release(void* pointer, std::size_t size) + { +#ifdef _WIN32 + verify(HERE), ::VirtualFree(pointer, 0, MEM_RELEASE); +#else + verify(HERE), ::munmap(pointer, size) != -1; +#endif + } + void memory_protect(void* pointer, std::size_t size, protection prot) { #ifdef _WIN32 @@ -83,6 +92,129 @@ namespace utils verify(HERE), ::VirtualProtect(pointer, size, +prot, &old); #else verify(HERE), ::mprotect((void*)((u64)pointer & -4096), ::align(size, 4096), +prot) != -1; +#endif + } + + shm::shm(u32 size) + : m_size(::align(size, 0x10000)) + , m_ptr(nullptr) + { +#ifdef _WIN32 + m_handle = ::CreateFileMappingW(INVALID_HANDLE_VALUE, NULL, PAGE_EXECUTE_READWRITE, 0, m_size, NULL); +//#elif __linux__ +// m_file = ::memfd_create("mem1", 0); +// ::ftruncate(m_file, m_size); +#else + while ((m_file = ::shm_open("/rpcs3-mem1", O_RDWR | O_CREAT | O_EXCL, S_IWUSR | S_IRUSR)) == -1) + { + if (errno != EEXIST) + return; + } + + ::shm_unlink("/rpcs3-mem1"); + ::ftruncate(m_file, m_size); +#endif + + m_ptr = this->map(nullptr); + } + + shm::~shm() + { +#ifdef _WIN32 + ::UnmapViewOfFile(m_ptr); + ::CloseHandle(m_handle); +#else + ::munmap(m_ptr, m_size); + ::close(m_file); +#endif + } + + u8* shm::map(void* ptr, protection prot) const + { +#ifdef _WIN32 + DWORD access = 0; + switch (prot) + { + case protection::rw: access = FILE_MAP_WRITE; break; + case protection::ro: access = FILE_MAP_READ; break; + case protection::no: break; + case protection::wx: access = FILE_MAP_WRITE | FILE_MAP_EXECUTE; break; + case protection::rx: access = FILE_MAP_READ | FILE_MAP_EXECUTE; break; + } + + return static_cast(::MapViewOfFileEx(m_handle, access, 0, 0, m_size, ptr)); +#else + return static_cast(::mmap((void*)((u64)ptr & -0x10000), m_size, +prot, MAP_SHARED | (ptr ? MAP_FIXED : 0), m_file, 0)); +#endif + } + + u8* shm::map_critical(void* ptr, protection prot) + { + const auto target = (u8*)((u64)ptr & -0x10000); + +#ifdef _WIN32 + ::MEMORY_BASIC_INFORMATION mem; + if (!::VirtualQuery(target, &mem, sizeof(mem)) || mem.State != MEM_RESERVE || !::VirtualFree(mem.AllocationBase, 0, MEM_RELEASE)) + { + return nullptr; + } + + const auto base = (u8*)mem.AllocationBase; + const auto size = mem.RegionSize + (target - base); + + if (base < target && !::VirtualAlloc(base, target - base, MEM_RESERVE, PAGE_NOACCESS)) + { + return nullptr; + } + + if (target + m_size < base + size && !::VirtualAlloc(target + m_size, base + size - target - m_size, MEM_RESERVE, PAGE_NOACCESS)) + { + return nullptr; + } +#endif + + return this->map(target, prot); + } + + void shm::unmap(void* ptr) const + { +#ifdef _WIN32 + ::UnmapViewOfFile(ptr); +#else + ::munmap(ptr, m_size); +#endif + } + + void shm::unmap_critical(void* ptr) + { + const auto target = (u8*)((u64)ptr & -0x10000); + + this->unmap(target); + +#ifdef _WIN32 + ::MEMORY_BASIC_INFORMATION mem, mem2; + if (!::VirtualQuery(target - 1, &mem, sizeof(mem)) || !::VirtualQuery(target + m_size, &mem2, sizeof(mem2))) + { + return; + } + + if (mem.State == MEM_RESERVE && !::VirtualFree(mem.AllocationBase, 0, MEM_RELEASE)) + { + return; + } + + if (mem2.State == MEM_RESERVE && !::VirtualFree(mem2.AllocationBase, 0, MEM_RELEASE)) + { + return; + } + + const auto size1 = mem.State == MEM_RESERVE ? target - (u8*)mem.AllocationBase : 0; + const auto size2 = mem2.State == MEM_RESERVE ? mem2.RegionSize : 0; + + if (!::VirtualAlloc(mem.State == MEM_RESERVE ? mem.AllocationBase : target, m_size + size1 + size2, MEM_RESERVE, PAGE_NOACCESS)) + { + return; + } #endif } } diff --git a/Utilities/VirtualMemory.h b/Utilities/VirtualMemory.h index 74f441a598dc..e43c6630784a 100644 --- a/Utilities/VirtualMemory.h +++ b/Utilities/VirtualMemory.h @@ -30,6 +30,56 @@ namespace utils */ void memory_decommit(void* pointer, std::size_t size); + // Free memory after reserved by memory_reserve, should specify original size + void memory_release(void* pointer, std::size_t size); + // Set memory protection void memory_protect(void* pointer, std::size_t size, protection prot); + + // Shared memory handle + class shm + { +#ifdef _WIN32 + void* m_handle; +#else + int m_file; +#endif + u32 m_size; + u8* m_ptr; + + public: + explicit shm(u32 size); + + shm(const shm&) = delete; + + ~shm(); + + // Map shared memory + u8* map(void* ptr, protection prot = protection::rw) const; + + // Map shared memory over reserved memory region, which is unsafe (non-atomic) under Win32 + u8* map_critical(void* ptr, protection prot = protection::rw); + + // Unmap shared memory + void unmap(void* ptr) const; + + // Unmap shared memory, undoing map_critical + void unmap_critical(void* ptr); + + // Access memory with simple range check + u8* get(u32 offset, u32 size) const + { + if (offset >= m_size || m_size - offset < size) + { + return nullptr; + } + + return m_ptr + offset; + } + + u32 size() const + { + return m_size; + } + }; } diff --git a/rpcs3/Emu/Cell/Modules/sys_heap.cpp b/rpcs3/Emu/Cell/Modules/sys_heap.cpp index 909b6b576f63..624aa0f95cf8 100644 --- a/rpcs3/Emu/Cell/Modules/sys_heap.cpp +++ b/rpcs3/Emu/Cell/Modules/sys_heap.cpp @@ -48,7 +48,7 @@ u32 _sys_heap_memalign(u32 heap, u32 align, u32 size) { sysPrxForUser.warning("_sys_heap_memalign(heap=0x%x, align=0x%x, size=0x%x)", heap, align, size); - return vm::alloc(size, vm::main, std::max(align, 4096)); + return vm::alloc(size, vm::main, std::max(align, 0x10000)); } s32 _sys_heap_free(u32 heap, u32 addr) diff --git a/rpcs3/Emu/Cell/Modules/sys_libc_.cpp b/rpcs3/Emu/Cell/Modules/sys_libc_.cpp index 45a1abf67a04..4f8ccb61fe3c 100644 --- a/rpcs3/Emu/Cell/Modules/sys_libc_.cpp +++ b/rpcs3/Emu/Cell/Modules/sys_libc_.cpp @@ -381,7 +381,7 @@ u32 _sys_memalign(u32 align, u32 size) { sysPrxForUser.warning("_sys_memalign(align=0x%x, size=0x%x)", align, size); - return vm::alloc(size, vm::main, std::max(align, 4096)); + return vm::alloc(size, vm::main, std::max(align, 0x10000)); } s32 _sys_free(u32 addr) diff --git a/rpcs3/Emu/Cell/PPUModule.cpp b/rpcs3/Emu/Cell/PPUModule.cpp index a3d84ed5fab5..e4da47edacb0 100644 --- a/rpcs3/Emu/Cell/PPUModule.cpp +++ b/rpcs3/Emu/Cell/PPUModule.cpp @@ -330,7 +330,7 @@ static void ppu_initialize_modules(const std::shared_ptr& link // Allocate HLE variable if (variable.second.size >= 4096 || variable.second.align >= 4096) { - variable.second.addr = vm::alloc(variable.second.size, vm::main, std::max(variable.second.align, 4096)); + variable.second.addr = vm::alloc(variable.second.size, vm::main, std::max(variable.second.align, 0x10000)); } else { diff --git a/rpcs3/Emu/Cell/PPUThread.cpp b/rpcs3/Emu/Cell/PPUThread.cpp index 866387e33e3a..22110dcd7ed1 100644 --- a/rpcs3/Emu/Cell/PPUThread.cpp +++ b/rpcs3/Emu/Cell/PPUThread.cpp @@ -400,39 +400,32 @@ extern void ppu_remove_breakpoint(u32 addr) extern bool ppu_patch(u32 addr, u32 value) { - // TODO: check executable flag - if (vm::check_addr(addr, sizeof(u32))) + if (g_cfg.core.ppu_decoder == ppu_decoder_type::llvm && Emu.GetStatus() != system_state::ready) { - if (g_cfg.core.ppu_decoder == ppu_decoder_type::llvm && Emu.GetStatus() != system_state::ready) - { - // TODO - return false; - } - - if (!vm::check_addr(addr, sizeof(u32), vm::page_writable)) - { - utils::memory_protect(vm::g_base_addr + addr, sizeof(u32), utils::protection::rw); - } + // TODO: support recompilers + LOG_FATAL(GENERAL, "Patch failed at 0x%x: LLVM recompiler is used.", addr); + return false; + } - vm::write32(addr, value); + const auto ptr = vm::get_super_ptr(addr); - const u32 _break = ::narrow(reinterpret_cast(&ppu_break)); - const u32 fallback = ::narrow(reinterpret_cast(&ppu_fallback)); + if (!ptr) + { + LOG_FATAL(GENERAL, "Patch failed at 0x%x: invalid memory address.", addr); + return false; + } - if (ppu_ref(addr) != _break && ppu_ref(addr) != fallback) - { - ppu_ref(addr) = ppu_cache(addr); - } + *ptr = value; - if (!vm::check_addr(addr, sizeof(u32), vm::page_writable)) - { - utils::memory_protect(vm::g_base_addr + addr, sizeof(u32), utils::protection::ro); - } + const u32 _break = ::narrow(reinterpret_cast(&ppu_break)); + const u32 fallback = ::narrow(reinterpret_cast(&ppu_fallback)); - return true; + if (ppu_ref(addr) != _break && ppu_ref(addr) != fallback) + { + ppu_ref(addr) = ppu_cache(addr); } - return false; + return true; } std::string ppu_thread::get_name() const diff --git a/rpcs3/Emu/Cell/lv2/sys_memory.cpp b/rpcs3/Emu/Cell/lv2/sys_memory.cpp index 2c5d5018b32d..984a3c0a9477 100644 --- a/rpcs3/Emu/Cell/lv2/sys_memory.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_memory.cpp @@ -1,7 +1,8 @@ #include "stdafx.h" +#include "Utilities/VirtualMemory.h" +#include "Emu/IdManager.h" #include "sys_memory.h" - - +#include "sys_mmapper.h" logs::channel sys_memory("sys_memory"); @@ -10,33 +11,19 @@ error_code sys_memory_allocate(u32 size, u64 flags, vm::ptr alloc_addr) sys_memory.warning("sys_memory_allocate(size=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size, flags, alloc_addr); // Check allocation size - switch (flags) - { - case 0: //handle "default" value, issue 2510 - case SYS_MEMORY_PAGE_SIZE_1M: - { - if (size % 0x100000) - { - return CELL_EALIGN; - } + const u32 align = + flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 : + flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : + flags == 0 ? 0x10000 : 0; - break; - } - - case SYS_MEMORY_PAGE_SIZE_64K: + if (!align) { - if (size % 0x10000) - { - return CELL_EALIGN; - } - - break; + return {CELL_EINVAL, flags}; } - default: + if (size % align) { - return CELL_EINVAL; - } + return {CELL_EALIGN, size}; } // Get "default" memory container @@ -49,7 +36,7 @@ error_code sys_memory_allocate(u32 size, u64 flags, vm::ptr alloc_addr) } // Allocate memory, write back the start address of the allocated area - *alloc_addr = verify(HERE, vm::alloc(size, vm::user_space, flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 : 0x10000)); + *alloc_addr = verify(HERE, vm::alloc(size, vm::user_space, align)); return CELL_OK; } @@ -59,32 +46,19 @@ error_code sys_memory_allocate_from_container(u32 size, u32 cid, u64 flags, vm:: sys_memory.warning("sys_memory_allocate_from_container(size=0x%x, cid=0x%x, flags=0x%llx, alloc_addr=*0x%x)", size, cid, flags, alloc_addr); // Check allocation size - switch (flags) - { - case SYS_MEMORY_PAGE_SIZE_1M: - { - if (size % 0x100000) - { - return CELL_EALIGN; - } + const u32 align = + flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 : + flags == SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 : + flags == 0 ? 0x10000 : 0; - break; - } - - case SYS_MEMORY_PAGE_SIZE_64K: + if (!align) { - if (size % 0x10000) - { - return CELL_EALIGN; - } - - break; + return {CELL_EINVAL, flags}; } - default: + if (size % align) { - return CELL_EINVAL; - } + return {CELL_EALIGN, size}; } const auto ct = idm::get(cid, [&](lv2_memory_container& ct) -> CellError @@ -108,8 +82,11 @@ error_code sys_memory_allocate_from_container(u32 size, u32 cid, u64 flags, vm:: return ct.ret; } - // Allocate memory, write back the start address of the allocated area, use cid as the supplementary info - *alloc_addr = verify(HERE, vm::alloc(size, vm::user_space, flags == SYS_MEMORY_PAGE_SIZE_1M ? 0x100000 : 0x10000, cid)); + // Create phantom lv2_memory object (TODO) + const auto mem = idm::make_ptr(size, align, flags, ct.ptr); + + // Allocate memory + *alloc_addr = verify(HERE, vm::get(vm::user_space)->alloc(size, mem->align, &mem->shm)); return CELL_OK; } @@ -120,26 +97,50 @@ error_code sys_memory_free(u32 addr) const auto area = vm::get(vm::user_space); - verify(HERE), area; - - // Deallocate memory - u32 cid, size = area->dealloc(addr, nullptr, &cid); + const auto shm = area->get(addr); - if (!size) + if (!shm.second) { - return CELL_EINVAL; + return {CELL_EINVAL, addr}; } - // Return "physical memory" - if (cid == 0) + // Retrieve phantom lv2_memory object (TODO) + const auto mem = idm::select([&](u32 id, lv2_memory& mem) -> u32 + { + if (mem.shm.get() == shm.second.get()) + { + return id; + } + + return 0; + }); + + if (!mem) { - fxm::get()->used -= size; + // Deallocate memory (simple) + if (!area->dealloc(addr)) + { + return {CELL_EINVAL, addr}; + } + + // Return "physical memory" to the default container + fxm::get_always()->used -= shm.second->size(); + + return CELL_OK; } - else if (const auto ct = idm::get(cid)) + + // Deallocate memory + if (!area->dealloc(addr, &shm.second)) { - ct->used -= size; + return {CELL_EINVAL, addr}; } + // Return "physical memory" + mem->ct->used -= mem->size; + + // Remove phantom lv2_memory object + verify(HERE), idm::remove(mem.ret); + return CELL_OK; } diff --git a/rpcs3/Emu/Cell/lv2/sys_mmapper.cpp b/rpcs3/Emu/Cell/lv2/sys_mmapper.cpp index 68b822b04379..1967fb1371fd 100644 --- a/rpcs3/Emu/Cell/lv2/sys_mmapper.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_mmapper.cpp @@ -1,13 +1,22 @@ #include "stdafx.h" -#include "sys_mmapper.h" #include "Emu/Cell/PPUThread.h" #include "sys_ppu_thread.h" #include "Emu/Cell/lv2/sys_event.h" - - +#include "Utilities/VirtualMemory.h" +#include "sys_memory.h" +#include "sys_mmapper.h" logs::channel sys_mmapper("sys_mmapper"); +lv2_memory::lv2_memory(u32 size, u32 align, u64 flags, const std::shared_ptr& ct) + : size(size) + , align(align) + , flags(flags) + , ct(ct) + , shm(std::make_shared(size)) +{ +} + error_code sys_mmapper_allocate_address(u64 size, u64 flags, u64 alignment, vm::ptr alloc_addr) { sys_mmapper.error("sys_mmapper_allocate_address(size=0x%llx, flags=0x%llx, alignment=0x%llx, alloc_addr=*0x%x)", size, flags, alignment, alloc_addr); @@ -233,7 +242,7 @@ error_code sys_mmapper_free_shared_memory(u32 mem_id) // Conditionally remove memory ID const auto mem = idm::withdraw(mem_id, [&](lv2_memory& mem) -> CellError { - if (!mem.addr.compare_and_swap_test(0, -1)) + if (mem.counter) { return CELL_EBUSY; } @@ -268,31 +277,33 @@ error_code sys_mmapper_map_shared_memory(u32 addr, u32 mem_id, u64 flags) return CELL_EINVAL; } - const auto mem = idm::get(mem_id); + const auto mem = idm::get(mem_id, [&](lv2_memory& mem) -> CellError + { + if (addr % mem.align) + { + return CELL_EALIGN; + } + + mem.counter++; + return {}; + }); if (!mem) { return CELL_ESRCH; } - if (addr % mem->align) - { - return CELL_EALIGN; - } - - if (const u32 old_addr = mem->addr.compare_and_swap(0, -1)) + if (mem.ret) { - sys_mmapper.warning("sys_mmapper_map_shared_memory(): Already mapped (mem_id=0x%x, addr=0x%x)", mem_id, old_addr); - return CELL_OK; + return mem.ret; } - if (!area->falloc(addr, mem->size, mem->data.data())) + if (!area->falloc(addr, mem->size, &mem->shm)) { - mem->addr = 0; + mem->counter--; return CELL_EBUSY; } - mem->addr = addr; return CELL_OK; } @@ -304,31 +315,28 @@ error_code sys_mmapper_search_and_map(u32 start_addr, u32 mem_id, u64 flags, vm: if (!area || start_addr < 0x50000000 || start_addr >= 0xC0000000) { - return CELL_EINVAL; + return {CELL_EINVAL, start_addr}; } - const auto mem = idm::get(mem_id); + const auto mem = idm::get(mem_id, [&](lv2_memory& mem) + { + mem.counter++; + }); if (!mem) { return CELL_ESRCH; } - if (const u32 old_addr = mem->addr.compare_and_swap(0, -1)) - { - sys_mmapper.warning("sys_mmapper_search_and_map(): Already mapped (mem_id=0x%x, addr=0x%x)", mem_id, old_addr); - return CELL_OK; - } - - const u32 addr = area->alloc(mem->size, mem->align, mem->data.data()); + const u32 addr = area->alloc(mem->size, mem->align, &mem->shm); if (!addr) { - mem->addr = 0; + mem->counter--; return CELL_ENOMEM; } - *alloc_addr = mem->addr = addr; + *alloc_addr = addr; return CELL_OK; } @@ -340,26 +348,39 @@ error_code sys_mmapper_unmap_shared_memory(u32 addr, vm::ptr mem_id) if (!area || addr < 0x50000000 || addr >= 0xC0000000) { - return CELL_EINVAL; + return {CELL_EINVAL, addr}; } - const auto mem = idm::select([&](u32 id, lv2_memory& mem) + const auto shm = area->get(addr); + + if (!shm.second) { - if (mem.addr == addr) + return {CELL_EINVAL, addr}; + } + + const auto mem = idm::select([&](u32 id, lv2_memory& mem) -> u32 + { + if (mem.shm.get() == shm.second.get()) { - *mem_id = id; - return true; + return id; } - return false; + return 0; }); if (!mem) { - return CELL_EINVAL; + return {CELL_EINVAL, addr}; } - verify(HERE), area->dealloc(addr, mem->data.data()), mem->addr.exchange(0) == addr; + if (!area->dealloc(addr, &shm.second)) + { + return {CELL_EINVAL, addr}; + } + + // Write out the ID + *mem_id = mem.ret; + return CELL_OK; } diff --git a/rpcs3/Emu/Cell/lv2/sys_mmapper.h b/rpcs3/Emu/Cell/lv2/sys_mmapper.h index 515bd593ac85..977a9de48b08 100644 --- a/rpcs3/Emu/Cell/lv2/sys_mmapper.h +++ b/rpcs3/Emu/Cell/lv2/sys_mmapper.h @@ -1,9 +1,9 @@ #pragma once #include "sys_sync.h" -#include "sys_memory.h" +#include -#include +struct lv2_memory_container; struct lv2_memory : lv2_obj { @@ -13,19 +13,11 @@ struct lv2_memory : lv2_obj const u32 align; // Alignment required const u64 flags; const std::shared_ptr ct; // Associated memory container + const std::shared_ptr shm; - atomic_t addr{}; // Actual mapping address + atomic_t counter{0}; - std::vector data; - - lv2_memory(u32 size, u32 align, u64 flags, const std::shared_ptr& ct) - : size(size) - , align(align) - , flags(flags) - , ct(ct) - { - data.resize(size); - } + lv2_memory(u32 size, u32 align, u64 flags, const std::shared_ptr& ct); }; enum : u64 @@ -52,7 +44,7 @@ struct page_fault_notification_entry // Used to hold list of queues to be notified on page fault event. struct page_fault_notification_entries { - std::list entries; + std::vector entries; }; struct page_fault_event @@ -63,7 +55,7 @@ struct page_fault_event struct page_fault_event_entries { - std::list events; + std::vector events; semaphore<> pf_mutex; }; diff --git a/rpcs3/Emu/Memory/vm.cpp b/rpcs3/Emu/Memory/vm.cpp index f0894d0e0dfa..a00a54eef981 100644 --- a/rpcs3/Emu/Memory/vm.cpp +++ b/rpcs3/Emu/Memory/vm.cpp @@ -7,7 +7,6 @@ #include "Emu/CPU/CPUThread.h" #include "Emu/Cell/lv2/sys_memory.h" #include "Emu/RSX/GSRender.h" - #include #include @@ -299,8 +298,10 @@ namespace vm } } - void _page_map(u32 addr, u32 size, u8 flags) + static void _page_map(u32 addr, u8 flags, utils::shm& shm) { + const u32 size = shm.size(); + if (!size || (size | addr) % 4096 || flags & page_allocated) { fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)" HERE, addr, size); @@ -314,7 +315,10 @@ namespace vm } } - utils::memory_commit(g_base_addr + addr, size); + if (shm.map_critical(g_base_addr + addr) != g_base_addr + addr) + { + fmt::throw_exception("Memory mapping failed - blame Windows (addr=0x%x, size=0x%x, flags=0x%x)", addr, size, flags); + } if (flags & page_executable) { @@ -393,19 +397,28 @@ namespace vm return true; } - void _page_unmap(u32 addr, u32 size) + static void _page_unmap(u32 addr, utils::shm& shm) { + const u32 size = shm.size(); + if (!size || (size | addr) % 4096) { fmt::throw_exception("Invalid arguments (addr=0x%x, size=0x%x)" HERE, addr, size); } + bool is_exec = false; + for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++) { if ((g_pages[i].flags & page_allocated) == 0) { fmt::throw_exception("Memory not mapped (addr=0x%x, size=0x%x, current_addr=0x%x)" HERE, addr, size, i * 4096); } + + if (g_pages[i].flags & page_executable) + { + is_exec = true; + } } for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++) @@ -416,8 +429,12 @@ namespace vm } } - utils::memory_decommit(g_base_addr + addr, size); - utils::memory_decommit(g_exec_addr + addr, size); + shm.unmap_critical(g_base_addr + addr); + + if (is_exec) + { + utils::memory_decommit(g_exec_addr + addr, size); + } if (g_cfg.core.ppu_debug) { @@ -438,7 +455,7 @@ namespace vm return true; } - u32 alloc(u32 size, memory_location_t location, u32 align, u32 sup) + u32 alloc(u32 size, memory_location_t location, u32 align) { const auto block = get(location); @@ -447,10 +464,10 @@ namespace vm fmt::throw_exception("Invalid memory location (%u)" HERE, (uint)location); } - return block->alloc(size, align, nullptr, sup); + return block->alloc(size, align); } - u32 falloc(u32 addr, u32 size, memory_location_t location, u32 sup) + u32 falloc(u32 addr, u32 size, memory_location_t location) { const auto block = get(location, addr); @@ -459,10 +476,10 @@ namespace vm fmt::throw_exception("Invalid memory location (%u, addr=0x%x)" HERE, (uint)location, addr); } - return block->falloc(addr, size, nullptr, sup); + return block->falloc(addr, size); } - u32 dealloc(u32 addr, memory_location_t location, u32* sup_out) + u32 dealloc(u32 addr, memory_location_t location) { const auto block = get(location, addr); @@ -471,7 +488,7 @@ namespace vm fmt::throw_exception("Invalid memory location (%u, addr=0x%x)" HERE, (uint)location, addr); } - return block->dealloc(addr, nullptr, sup_out); + return block->dealloc(addr); } void dealloc_verbose_nothrow(u32 addr, memory_location_t location) noexcept @@ -491,8 +508,10 @@ namespace vm } } - bool block_t::try_alloc(u32 addr, u32 size, u8 flags, u32 sup) + bool block_t::try_alloc(u32 addr, u8 flags, std::shared_ptr&& shm) { + const u32 size = shm->size(); + // Check if memory area is already mapped for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++) { @@ -503,13 +522,10 @@ namespace vm } // Map "real" memory pages - _page_map(addr, size, flags); + _page_map(addr, flags, *shm); // Add entry - m_map[addr] = size; - - // Add supplementary info if necessary - if (sup) m_sup[addr] = sup; + m_map[addr] = std::move(shm); return true; } @@ -528,24 +544,32 @@ namespace vm block_t::~block_t() { - vm::writer_lock lock(0); + { + vm::writer_lock lock(0); - // Deallocate all memory - for (auto& entry : m_map) + // Deallocate all memory + for (auto& entry : m_map) + { + _page_unmap(entry.first, *entry.second); + } + } + + // Notify rsx to invalidate range (TODO) + if (const auto rsxthr = fxm::check_unlocked()) { - _page_unmap(entry.first, entry.second); + rsxthr->on_notify_memory_unmapped(addr, size); } } - u32 block_t::alloc(const u32 orig_size, u32 align, const uchar* data, u32 sup) + u32 block_t::alloc(const u32 orig_size, u32 align, const std::shared_ptr* src) { vm::writer_lock lock(0); // Align to minimal page size - const u32 size = ::align(orig_size, 4096); + const u32 size = ::align(orig_size, 0x10000); // Check alignment (it's page allocation, so passing small values there is just silly) - if (align < 4096 || align != (0x80000000u >> cntlz32(align, true))) + if (align < 0x10000 || align != (0x80000000u >> cntlz32(align, true))) { fmt::throw_exception("Invalid alignment (size=0x%x, align=0x%x)" HERE, size, align); } @@ -567,16 +591,14 @@ namespace vm pflags |= page_64k_size; } + // Create or import shared memory object + std::shared_ptr shm = src ? std::shared_ptr(*src) : std::make_shared(size); + // Search for an appropriate place (unoptimized) for (u32 addr = ::align(this->addr, align); addr < this->addr + this->size - 1; addr += align) { - if (try_alloc(addr, size, pflags, sup)) + if (try_alloc(addr, pflags, std::move(shm))) { - if (data) - { - std::memcpy(vm::base(addr), data, orig_size); - } - return addr; } } @@ -584,12 +606,12 @@ namespace vm return 0; } - u32 block_t::falloc(u32 addr, const u32 orig_size, const uchar* data, u32 sup) + u32 block_t::falloc(u32 addr, const u32 orig_size, const std::shared_ptr* src) { vm::writer_lock lock(0); // align to minimal page size - const u32 size = ::align(orig_size, 4096); + const u32 size = ::align(orig_size, 0x10000); // return if addr or size is invalid if (!size || size > this->size || addr < this->addr || addr + size - 1 > this->addr + this->size - 1) @@ -608,54 +630,81 @@ namespace vm pflags |= page_64k_size; } - if (!try_alloc(addr, size, pflags, sup)) + if (!try_alloc(addr, pflags, src ? std::shared_ptr(*src) : std::make_shared(size))) { return 0; } - if (data) - { - std::memcpy(vm::base(addr), data, orig_size); - } - return addr; } - u32 block_t::dealloc(u32 addr, uchar* data_out, u32* sup_out) + u32 block_t::dealloc(u32 addr, const std::shared_ptr* src) { - vm::writer_lock lock(0); - - const auto found = m_map.find(addr); - - if (found != m_map.end()) + u32 result = 0; { - const u32 size = found->second; - const auto rsxthr = fxm::get(); + vm::writer_lock lock(0); - // Remove entry - m_map.erase(found); + const auto found = m_map.find(addr); + + if (found == m_map.end()) + { + return 0; + } - if (data_out) + if (src && found->second.get() != src->get()) { - std::memcpy(data_out, vm::base(addr), size); + return 0; } + result = found->second->size(); + // Unmap "real" memory pages - _page_unmap(addr, size); + _page_unmap(addr, *found->second); + + // Remove entry + m_map.erase(found); + } + + // Notify rsx to invalidate range (TODO) + if (const auto rsxthr = fxm::check_unlocked()) + { + rsxthr->on_notify_memory_unmapped(addr, result); + } + + return result; + } - // Notify rsx to invalidate range - if (rsxthr != nullptr) rsxthr->on_notify_memory_unmapped(addr, size); + std::pair> block_t::get(u32 addr, u32 size) + { + if (addr < this->addr || std::max(size, addr - this->addr + size) >= this->size) + { + return {addr, nullptr}; + } - // Write supplementary info if necessary - if (sup_out) *sup_out = m_sup[addr]; + vm::reader_lock lock; - // Remove supplementary info - m_sup.erase(addr); + const auto upper = m_map.upper_bound(addr); - return size; + if (upper == m_map.begin()) + { + return {addr, nullptr}; } - return 0; + const auto found = std::prev(upper); + + // Exact address condition (size == 0) + if (size == 0 && found->first != addr) + { + return {addr, nullptr}; + } + + // Range check + if (std::max(size, addr - found->first + size) > found->second->size()) + { + return {addr, nullptr}; + } + + return *found; } u32 block_t::imp_used(const vm::writer_lock&) @@ -664,7 +713,7 @@ namespace vm for (auto& entry : m_map) { - result += entry.second; + result += entry.second->size(); } return result; diff --git a/rpcs3/Emu/Memory/vm.h b/rpcs3/Emu/Memory/vm.h index 5282b1206f0b..470ff5c2400d 100644 --- a/rpcs3/Emu/Memory/vm.h +++ b/rpcs3/Emu/Memory/vm.h @@ -3,6 +3,7 @@ #include #include #include +#include "Utilities/VirtualMemory.h" class shared_mutex; class named_thread; @@ -126,14 +127,14 @@ namespace vm // Check flags for specified memory range (unsafe) bool check_addr(u32 addr, u32 size = 1, u8 flags = page_allocated); - // Search and map memory in specified memory location (don't pass alignment smaller than 4096) - u32 alloc(u32 size, memory_location_t location, u32 align = 4096, u32 sup = 0); + // Search and map memory in specified memory location (min alignment is 0x10000) + u32 alloc(u32 size, memory_location_t location, u32 align = 0x10000); // Map memory at specified address (in optionally specified memory location) - u32 falloc(u32 addr, u32 size, memory_location_t location = any, u32 sup = 0); + u32 falloc(u32 addr, u32 size, memory_location_t location = any); // Unmap memory at specified address (in optionally specified memory location), return size - u32 dealloc(u32 addr, memory_location_t location = any, u32* sup_out = nullptr); + u32 dealloc(u32 addr, memory_location_t location = any); // dealloc() with no return value and no exceptions void dealloc_verbose_nothrow(u32 addr, memory_location_t location = any) noexcept; @@ -141,10 +142,10 @@ namespace vm // Object that handles memory allocations inside specific constant bounds ("location") class block_t final { - std::map m_map; // Mapped memory: addr -> size - std::unordered_map m_sup; // Supplementary info for allocations + // Mapped regions: addr -> shm handle + std::map> m_map; - bool try_alloc(u32 addr, u32 size, u8 flags, u32 sup); + bool try_alloc(u32 addr, u8 flags, std::shared_ptr&&); public: block_t(u32 addr, u32 size, u64 flags = 0); @@ -156,14 +157,17 @@ namespace vm const u32 size; // Total size const u64 flags; // Currently unused - // Search and map memory (don't pass alignment smaller than 4096) - u32 alloc(u32 size, u32 align = 4096, const uchar* data = nullptr, u32 sup = 0); + // Search and map memory (min alignment is 0x10000) + u32 alloc(u32 size, u32 align = 0x10000, const std::shared_ptr* = nullptr); // Try to map memory at fixed location - u32 falloc(u32 addr, u32 size, const uchar* data = nullptr, u32 sup = 0); + u32 falloc(u32 addr, u32 size, const std::shared_ptr* = nullptr); // Unmap memory at specified location previously returned by alloc(), return size - u32 dealloc(u32 addr, uchar* data_out = nullptr, u32* sup_out = nullptr); + u32 dealloc(u32 addr, const std::shared_ptr* = nullptr); + + // Get memory at specified address (if size = 0, addr assumed exact) + std::pair> get(u32 addr, u32 size = 0); // Internal u32 imp_used(const vm::writer_lock&); @@ -290,6 +294,35 @@ namespace vm return *_ptr(addr); } + // Access memory bypassing memory protection + template + inline std::shared_ptr> get_super_ptr(u32 addr, u32 count = 1) + { + const auto area = vm::get(vm::any, addr); + + if (!area || addr + u64{count} * sizeof(T) > UINT32_MAX) + { + return nullptr; + } + + const auto shm = area->get(addr, sizeof(T) * count); + + if (!shm.second || shm.first > addr) + { + return nullptr; + } + + const auto ptr = reinterpret_cast*>(shm.second->get(addr - shm.first, sizeof(T) * count)); + + if (!ptr) + { + return nullptr; + } + + // Create a shared pointer using the aliasing constructor + return {shm.second, ptr}; + } + inline const be_t& read16(u32 addr) { return _ref(addr); diff --git a/rpcs3/Emu/Memory/vm_var.h b/rpcs3/Emu/Memory/vm_var.h index c573bf1fef64..c65cf3208aac 100644 --- a/rpcs3/Emu/Memory/vm_var.h +++ b/rpcs3/Emu/Memory/vm_var.h @@ -9,7 +9,7 @@ namespace vm { static inline vm::addr_t alloc(u32 size, u32 align) { - return vm::cast(vm::alloc(size, Location, std::max(align, 4096))); + return vm::cast(vm::alloc(size, Location, std::max(align, 0x10000))); } static inline void dealloc(u32 addr, u32 size = 0) noexcept diff --git a/rpcs3/Emu/RSX/rsx_cache.h b/rpcs3/Emu/RSX/rsx_cache.h index 00cd01cede84..96ff89c47d44 100644 --- a/rpcs3/Emu/RSX/rsx_cache.h +++ b/rpcs3/Emu/RSX/rsx_cache.h @@ -42,7 +42,7 @@ namespace rsx buffered_section() {} ~buffered_section() {} - void reset(u32 base, u32 length, protection_policy protect_policy= protect_policy_full_range) + void reset(u32 base, u32 length, protection_policy protect_policy = protect_policy_full_range) { verify(HERE), locked == false; @@ -82,12 +82,26 @@ namespace rsx void protect(utils::protection prot) { - if (prot == protection) return; + if (prot == protection) + { + return; + } verify(HERE), locked_address_range > 0; - utils::memory_protect(vm::base(locked_address_base), locked_address_range, prot); - protection = prot; - locked = prot != utils::protection::rw; + + vm::reader_lock lock; + + if (vm::check_addr(locked_address_base, locked_address_range)) + { + utils::memory_protect(vm::base(locked_address_base), locked_address_range, prot); + protection = prot; + locked = prot != utils::protection::rw; + } + else + { + // The range was fully or partially deallocated (TODO) + discard(); + } } void unprotect() @@ -131,7 +145,7 @@ namespace rsx /** * Check if the page containing the address tramples this section. Also compares a former trampled page range to compare - * If true, returns the range with updated invalid range + * If true, returns the range with updated invalid range */ std::tuple> overlaps_page(std::pair old_range, u32 address, bool full_range_check) const {