memmap.cpp (28830B)
1 // SPDX-FileCopyrightText: 2019-2024 Connor McLaughlin <stenzek@gmail.com> 2 // SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0) 3 4 #include "memmap.h" 5 #include "align.h" 6 #include "assert.h" 7 #include "error.h" 8 #include "log.h" 9 #include "small_string.h" 10 #include "string_util.h" 11 12 #include "fmt/format.h" 13 14 #include <memory> 15 16 #if defined(_WIN32) 17 #include "windows_headers.h" 18 #include <Psapi.h> 19 #elif defined(__APPLE__) 20 #ifdef __aarch64__ 21 #include <pthread.h> // pthread_jit_write_protect_np() 22 #endif 23 #include <mach-o/dyld.h> 24 #include <mach-o/getsect.h> 25 #include <mach/mach_init.h> 26 #include <mach/mach_port.h> 27 #include <mach/mach_vm.h> 28 #include <mach/vm_map.h> 29 #include <sys/mman.h> 30 #elif !defined(__ANDROID__) 31 #include <cerrno> 32 #include <dlfcn.h> 33 #include <fcntl.h> 34 #include <sys/mman.h> 35 #include <unistd.h> 36 #endif 37 38 Log_SetChannel(MemMap); 39 40 namespace MemMap { 41 /// Allocates RWX memory at the specified address. 42 static void* AllocateJITMemoryAt(const void* addr, size_t size); 43 } // namespace MemMap 44 45 #ifdef _WIN32 46 47 bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode) 48 { 49 DebugAssert((size & (HOST_PAGE_SIZE - 1)) == 0); 50 51 DWORD old_protect; 52 if (!VirtualProtect(baseaddr, size, static_cast<DWORD>(mode), &old_protect)) 53 { 54 ERROR_LOG("VirtualProtect() failed with error {}", GetLastError()); 55 return false; 56 } 57 58 return true; 59 } 60 61 std::string MemMap::GetFileMappingName(const char* prefix) 62 { 63 const unsigned pid = GetCurrentProcessId(); 64 return fmt::format("{}_{}", prefix, pid); 65 } 66 67 void* MemMap::CreateSharedMemory(const char* name, size_t size, Error* error) 68 { 69 const std::wstring mapping_name = name ? StringUtil::UTF8StringToWideString(name) : std::wstring(); 70 const HANDLE mapping = 71 CreateFileMappingW(INVALID_HANDLE_VALUE, nullptr, PAGE_READWRITE, static_cast<DWORD>(size >> 32), 72 static_cast<DWORD>(size), mapping_name.empty() ? nullptr : mapping_name.c_str()); 73 if (!mapping) 74 Error::SetWin32(error, "CreateFileMappingW() failed: ", GetLastError()); 75 76 return static_cast<void*>(mapping); 77 } 78 79 void MemMap::DestroySharedMemory(void* ptr) 80 { 81 CloseHandle(static_cast<HANDLE>(ptr)); 82 } 83 84 void MemMap::DeleteSharedMemory(const char* name) 85 { 86 // Automatically freed on close. 87 } 88 89 void* MemMap::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode) 90 { 91 void* ret = MapViewOfFileEx(static_cast<HANDLE>(handle), FILE_MAP_READ | FILE_MAP_WRITE, 92 static_cast<DWORD>(offset >> 32), static_cast<DWORD>(offset), size, baseaddr); 93 if (!ret) 94 return nullptr; 95 96 if (mode != PageProtect::ReadWrite) 97 { 98 DWORD old_prot; 99 if (!VirtualProtect(ret, size, static_cast<DWORD>(mode), &old_prot)) 100 Panic("Failed to protect memory mapping"); 101 } 102 return ret; 103 } 104 105 void MemMap::UnmapSharedMemory(void* baseaddr, size_t size) 106 { 107 if (!UnmapViewOfFile(baseaddr)) 108 Panic("Failed to unmap shared memory"); 109 } 110 111 const void* MemMap::GetBaseAddress() 112 { 113 const HMODULE mod = GetModuleHandleW(nullptr); 114 if (!mod) 115 return nullptr; 116 117 MODULEINFO mi; 118 if (!GetModuleInformation(GetCurrentProcess(), mod, &mi, sizeof(mi))) 119 return mod; 120 121 return mi.lpBaseOfDll; 122 } 123 124 void* MemMap::AllocateJITMemoryAt(const void* addr, size_t size) 125 { 126 void* ptr = static_cast<u8*>(VirtualAlloc(const_cast<void*>(addr), size, 127 addr ? (MEM_RESERVE | MEM_COMMIT) : MEM_COMMIT, PAGE_EXECUTE_READWRITE)); 128 if (!ptr && !addr) [[unlikely]] 129 ERROR_LOG("VirtualAlloc(RWX, {}) for internal buffer failed: {}", size, GetLastError()); 130 131 return ptr; 132 } 133 134 void MemMap::ReleaseJITMemory(void* ptr, size_t size) 135 { 136 if (!VirtualFree(ptr, 0, MEM_RELEASE)) 137 ERROR_LOG("Failed to free code pointer {}", static_cast<void*>(ptr)); 138 } 139 140 #if defined(CPU_ARCH_ARM32) || defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64) 141 142 void MemMap::FlushInstructionCache(void* address, size_t size) 143 { 144 ::FlushInstructionCache(GetCurrentProcess(), address, size); 145 } 146 147 #endif 148 149 SharedMemoryMappingArea::SharedMemoryMappingArea() = default; 150 151 SharedMemoryMappingArea::~SharedMemoryMappingArea() 152 { 153 Destroy(); 154 } 155 156 SharedMemoryMappingArea::PlaceholderMap::iterator SharedMemoryMappingArea::FindPlaceholder(size_t offset) 157 { 158 if (m_placeholder_ranges.empty()) 159 return m_placeholder_ranges.end(); 160 161 // this will give us an iterator equal or after page 162 auto it = m_placeholder_ranges.lower_bound(offset); 163 if (it == m_placeholder_ranges.end()) 164 { 165 // check the last page 166 it = (++m_placeholder_ranges.rbegin()).base(); 167 } 168 169 // it's the one we found? 170 if (offset >= it->first && offset < it->second) 171 return it; 172 173 // otherwise try the one before 174 if (it == m_placeholder_ranges.begin()) 175 return m_placeholder_ranges.end(); 176 177 --it; 178 if (offset >= it->first && offset < it->second) 179 return it; 180 else 181 return m_placeholder_ranges.end(); 182 } 183 184 bool SharedMemoryMappingArea::Create(size_t size) 185 { 186 Destroy(); 187 188 AssertMsg(Common::IsAlignedPow2(size, HOST_PAGE_SIZE), "Size is page aligned"); 189 190 m_base_ptr = static_cast<u8*>(VirtualAlloc2(GetCurrentProcess(), nullptr, size, MEM_RESERVE | MEM_RESERVE_PLACEHOLDER, 191 PAGE_NOACCESS, nullptr, 0)); 192 if (!m_base_ptr) 193 return false; 194 195 m_size = size; 196 m_num_pages = size / HOST_PAGE_SIZE; 197 m_placeholder_ranges.emplace(0, size); 198 return true; 199 } 200 201 void SharedMemoryMappingArea::Destroy() 202 { 203 AssertMsg(m_num_mappings == 0, "No mappings left"); 204 205 // hopefully this will be okay, and we don't need to coalesce all the placeholders... 206 if (m_base_ptr && !VirtualFreeEx(GetCurrentProcess(), m_base_ptr, 0, MEM_RELEASE)) 207 Panic("Failed to release shared memory area"); 208 209 m_placeholder_ranges.clear(); 210 m_base_ptr = nullptr; 211 m_size = 0; 212 m_num_pages = 0; 213 m_num_mappings = 0; 214 } 215 216 u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, 217 PageProtect mode) 218 { 219 DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size)); 220 221 const size_t map_offset = static_cast<u8*>(map_base) - m_base_ptr; 222 DebugAssert(Common::IsAlignedPow2(map_offset, HOST_PAGE_SIZE)); 223 DebugAssert(Common::IsAlignedPow2(map_size, HOST_PAGE_SIZE)); 224 225 // should be a placeholder. unless there's some other mapping we didn't free. 226 PlaceholderMap::iterator phit = FindPlaceholder(map_offset); 227 DebugAssertMsg(phit != m_placeholder_ranges.end(), "Page we're mapping is a placeholder"); 228 DebugAssertMsg(map_offset >= phit->first && map_offset < phit->second, "Page is in returned placeholder range"); 229 DebugAssertMsg((map_offset + map_size) <= phit->second, "Page range is in returned placeholder range"); 230 231 // do we need to split to the left? (i.e. is there a placeholder before this range) 232 const size_t old_ph_end = phit->second; 233 if (map_offset != phit->first) 234 { 235 phit->second = map_offset; 236 237 // split it (i.e. left..start and start..end are now separated) 238 if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(phit->first), (map_offset - phit->first), 239 MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) 240 { 241 Panic("Failed to left split placeholder for map"); 242 } 243 } 244 else 245 { 246 // start of the placeholder is getting used, we'll split it right below if there's anything left over 247 m_placeholder_ranges.erase(phit); 248 } 249 250 // do we need to split to the right? (i.e. is there a placeholder after this range) 251 if ((map_offset + map_size) != old_ph_end) 252 { 253 // split out end..ph_end 254 m_placeholder_ranges.emplace(map_offset + map_size, old_ph_end); 255 256 if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(map_offset), map_size, 257 MEM_RELEASE | MEM_PRESERVE_PLACEHOLDER)) 258 { 259 Panic("Failed to right split placeholder for map"); 260 } 261 } 262 263 // actually do the mapping, replacing the placeholder on the range 264 if (!MapViewOfFile3(static_cast<HANDLE>(file_handle), GetCurrentProcess(), map_base, file_offset, map_size, 265 MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE, nullptr, 0)) 266 { 267 ERROR_LOG("MapViewOfFile3() failed: {}", GetLastError()); 268 return nullptr; 269 } 270 271 if (mode != PageProtect::ReadWrite) 272 { 273 DWORD old_prot; 274 if (!VirtualProtect(map_base, map_size, static_cast<DWORD>(mode), &old_prot)) 275 Panic("Failed to protect memory mapping"); 276 } 277 278 m_num_mappings++; 279 return static_cast<u8*>(map_base); 280 } 281 282 bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size) 283 { 284 DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size)); 285 286 const size_t map_offset = static_cast<u8*>(map_base) - m_base_ptr; 287 DebugAssert(Common::IsAlignedPow2(map_offset, HOST_PAGE_SIZE)); 288 DebugAssert(Common::IsAlignedPow2(map_size, HOST_PAGE_SIZE)); 289 290 // unmap the specified range 291 if (!UnmapViewOfFile2(GetCurrentProcess(), map_base, MEM_PRESERVE_PLACEHOLDER)) 292 { 293 ERROR_LOG("UnmapViewOfFile2() failed: {}", GetLastError()); 294 return false; 295 } 296 297 // can we coalesce to the left? 298 PlaceholderMap::iterator left_it = (map_offset > 0) ? FindPlaceholder(map_offset - 1) : m_placeholder_ranges.end(); 299 if (left_it != m_placeholder_ranges.end()) 300 { 301 // the left placeholder should end at our start 302 DebugAssert(map_offset == left_it->second); 303 left_it->second = map_offset + map_size; 304 305 // combine placeholders before and the range we're unmapping, i.e. to the left 306 if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(left_it->first), left_it->second - left_it->first, 307 MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) 308 { 309 Panic("Failed to coalesce placeholders left for unmap"); 310 } 311 } 312 else 313 { 314 // this is a new placeholder 315 left_it = m_placeholder_ranges.emplace(map_offset, map_offset + map_size).first; 316 } 317 318 // can we coalesce to the right? 319 PlaceholderMap::iterator right_it = 320 ((map_offset + map_size) < m_size) ? FindPlaceholder(map_offset + map_size) : m_placeholder_ranges.end(); 321 if (right_it != m_placeholder_ranges.end()) 322 { 323 // should start at our end 324 DebugAssert(right_it->first == (map_offset + map_size)); 325 left_it->second = right_it->second; 326 m_placeholder_ranges.erase(right_it); 327 328 // combine our placeholder and the next, i.e. to the right 329 if (!VirtualFreeEx(GetCurrentProcess(), OffsetPointer(left_it->first), left_it->second - left_it->first, 330 MEM_RELEASE | MEM_COALESCE_PLACEHOLDERS)) 331 { 332 Panic("Failed to coalescae placeholders right for unmap"); 333 } 334 } 335 336 m_num_mappings--; 337 return true; 338 } 339 340 #elif defined(__APPLE__) 341 342 bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode) 343 { 344 DebugAssertMsg((size & (HOST_PAGE_SIZE - 1)) == 0, "Size is page aligned"); 345 346 kern_return_t res = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size, false, 347 static_cast<vm_prot_t>(mode)); 348 if (res != KERN_SUCCESS) [[unlikely]] 349 { 350 ERROR_LOG("mach_vm_protect() failed: {}", res); 351 return false; 352 } 353 354 return true; 355 } 356 357 std::string MemMap::GetFileMappingName(const char* prefix) 358 { 359 // name actually is not used. 360 return {}; 361 } 362 363 void* MemMap::CreateSharedMemory(const char* name, size_t size, Error* error) 364 { 365 mach_vm_size_t vm_size = size; 366 mach_port_t port; 367 const kern_return_t res = mach_make_memory_entry_64( 368 mach_task_self(), &vm_size, 0, MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE, &port, MACH_PORT_NULL); 369 if (res != KERN_SUCCESS) 370 { 371 Error::SetStringFmt(error, "mach_make_memory_entry_64() failed: {}", res); 372 return nullptr; 373 } 374 375 return reinterpret_cast<void*>(static_cast<uintptr_t>(port)); 376 } 377 378 void MemMap::DestroySharedMemory(void* ptr) 379 { 380 mach_port_deallocate(mach_task_self(), static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(ptr))); 381 } 382 383 void MemMap::DeleteSharedMemory(const char* name) 384 { 385 } 386 387 void* MemMap::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode) 388 { 389 mach_vm_address_t ptr = reinterpret_cast<mach_vm_address_t>(baseaddr); 390 const kern_return_t res = mach_vm_map(mach_task_self(), &ptr, size, 0, baseaddr ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE, 391 static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(handle)), offset, FALSE, 392 static_cast<vm_prot_t>(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE); 393 if (res != KERN_SUCCESS) 394 { 395 ERROR_LOG("mach_vm_map() failed: {}", res); 396 return nullptr; 397 } 398 399 return reinterpret_cast<void*>(ptr); 400 } 401 402 void MemMap::UnmapSharedMemory(void* baseaddr, size_t size) 403 { 404 const kern_return_t res = mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(baseaddr), size); 405 if (res != KERN_SUCCESS) 406 Panic("Failed to unmap shared memory"); 407 } 408 409 const void* MemMap::GetBaseAddress() 410 { 411 u32 name_buffer_size = 0; 412 _NSGetExecutablePath(nullptr, &name_buffer_size); 413 if (name_buffer_size > 0) [[likely]] 414 { 415 std::unique_ptr<char[]> name_buffer = std::make_unique_for_overwrite<char[]>(name_buffer_size + 1); 416 if (_NSGetExecutablePath(name_buffer.get(), &name_buffer_size) == 0) [[likely]] 417 { 418 name_buffer[name_buffer_size] = 0; 419 420 const struct segment_command_64* command = getsegbyname("__TEXT"); 421 if (command) [[likely]] 422 { 423 const u8* base = reinterpret_cast<const u8*>(command->vmaddr); 424 const u32 image_count = _dyld_image_count(); 425 for (u32 i = 0; i < image_count; i++) 426 { 427 if (std::strcmp(_dyld_get_image_name(i), name_buffer.get()) == 0) 428 return base + _dyld_get_image_vmaddr_slide(i); 429 } 430 } 431 } 432 } 433 434 return reinterpret_cast<const void*>(&GetBaseAddress); 435 } 436 437 void* MemMap::AllocateJITMemoryAt(const void* addr, size_t size) 438 { 439 #if !defined(__aarch64__) 440 kern_return_t ret = mach_vm_allocate(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&addr), size, 441 addr ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE); 442 if (ret != KERN_SUCCESS) 443 { 444 ERROR_LOG("mach_vm_allocate() returned {}", ret); 445 return nullptr; 446 } 447 448 ret = mach_vm_protect(mach_task_self(), reinterpret_cast<mach_vm_address_t>(addr), size, false, 449 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE); 450 if (ret != KERN_SUCCESS) 451 { 452 ERROR_LOG("mach_vm_protect() returned {}", ret); 453 mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(addr), size); 454 return nullptr; 455 } 456 457 return const_cast<void*>(addr); 458 #else 459 // On ARM64, we need to use MAP_JIT, which means we can't use MAP_FIXED. 460 if (addr) 461 return nullptr; 462 463 constexpr int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_JIT; 464 void* ptr = mmap(const_cast<void*>(addr), size, PROT_READ | PROT_WRITE | PROT_EXEC, flags, -1, 0); 465 if (ptr == MAP_FAILED) 466 { 467 ERROR_LOG("mmap(RWX, {}) for internal buffer failed: {}", size, errno); 468 return nullptr; 469 } 470 471 return ptr; 472 #endif 473 } 474 475 void MemMap::ReleaseJITMemory(void* ptr, size_t size) 476 { 477 #if !defined(__aarch64__) 478 const kern_return_t res = mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(ptr), size); 479 if (res != KERN_SUCCESS) 480 ERROR_LOG("mach_vm_deallocate() failed: {}", res); 481 #else 482 if (munmap(ptr, size) != 0) 483 ERROR_LOG("Failed to free code pointer {}", static_cast<void*>(ptr)); 484 #endif 485 } 486 487 #if defined(CPU_ARCH_ARM32) || defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64) 488 489 void MemMap::FlushInstructionCache(void* address, size_t size) 490 { 491 __builtin___clear_cache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size); 492 } 493 494 #endif 495 496 SharedMemoryMappingArea::SharedMemoryMappingArea() = default; 497 498 SharedMemoryMappingArea::~SharedMemoryMappingArea() 499 { 500 Destroy(); 501 } 502 503 bool SharedMemoryMappingArea::Create(size_t size) 504 { 505 AssertMsg(Common::IsAlignedPow2(size, HOST_PAGE_SIZE), "Size is page aligned"); 506 Destroy(); 507 508 const kern_return_t res = 509 mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&m_base_ptr), size, 0, VM_FLAGS_ANYWHERE, 510 MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE); 511 if (res != KERN_SUCCESS) 512 { 513 ERROR_LOG("mach_vm_map() failed: {}", res); 514 return false; 515 } 516 517 m_size = size; 518 m_num_pages = size / HOST_PAGE_SIZE; 519 return true; 520 } 521 522 void SharedMemoryMappingArea::Destroy() 523 { 524 AssertMsg(m_num_mappings == 0, "No mappings left"); 525 526 if (m_base_ptr && 527 mach_vm_deallocate(mach_task_self(), reinterpret_cast<mach_vm_address_t>(m_base_ptr), m_size) != KERN_SUCCESS) 528 { 529 Panic("Failed to release shared memory area"); 530 } 531 532 m_base_ptr = nullptr; 533 m_size = 0; 534 m_num_pages = 0; 535 } 536 537 u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, 538 PageProtect mode) 539 { 540 DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size)); 541 542 const kern_return_t res = 543 mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE, 544 static_cast<mach_port_t>(reinterpret_cast<uintptr_t>(file_handle)), file_offset, false, 545 static_cast<vm_prot_t>(mode), VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE); 546 if (res != KERN_SUCCESS) [[unlikely]] 547 { 548 ERROR_LOG("mach_vm_map() failed: {}", res); 549 return nullptr; 550 } 551 552 m_num_mappings++; 553 return static_cast<u8*>(map_base); 554 } 555 556 bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size) 557 { 558 DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size)); 559 560 const kern_return_t res = 561 mach_vm_map(mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&map_base), map_size, 0, VM_FLAGS_OVERWRITE, 562 MEMORY_OBJECT_NULL, 0, false, VM_PROT_NONE, VM_PROT_NONE, VM_INHERIT_NONE); 563 if (res != KERN_SUCCESS) [[unlikely]] 564 { 565 ERROR_LOG("mach_vm_map() failed: {}", res); 566 return false; 567 } 568 569 m_num_mappings--; 570 return true; 571 } 572 573 #ifdef __aarch64__ 574 575 static thread_local int s_code_write_depth = 0; 576 577 void MemMap::BeginCodeWrite() 578 { 579 // DEBUG_LOG("BeginCodeWrite(): {}", s_code_write_depth); 580 if ((s_code_write_depth++) == 0) 581 { 582 // DEBUG_LOG(" pthread_jit_write_protect_np(0)"); 583 pthread_jit_write_protect_np(0); 584 } 585 } 586 587 void MemMap::EndCodeWrite() 588 { 589 // DEBUG_LOG("EndCodeWrite(): {}", s_code_write_depth); 590 591 DebugAssert(s_code_write_depth > 0); 592 if ((--s_code_write_depth) == 0) 593 { 594 // DEBUG_LOG(" pthread_jit_write_protect_np(1)"); 595 pthread_jit_write_protect_np(1); 596 } 597 } 598 599 #endif 600 601 #elif !defined(__ANDROID__) 602 603 bool MemMap::MemProtect(void* baseaddr, size_t size, PageProtect mode) 604 { 605 DebugAssertMsg((size & (HOST_PAGE_SIZE - 1)) == 0, "Size is page aligned"); 606 607 const int result = mprotect(baseaddr, size, static_cast<int>(mode)); 608 if (result != 0) [[unlikely]] 609 { 610 ERROR_LOG("mprotect() for {} at {} failed", size, baseaddr); 611 return false; 612 } 613 614 return true; 615 } 616 617 std::string MemMap::GetFileMappingName(const char* prefix) 618 { 619 const unsigned pid = static_cast<unsigned>(getpid()); 620 #if defined(__FreeBSD__) 621 // FreeBSD's shm_open(3) requires name to be absolute 622 return fmt::format("/tmp/{}_{}", prefix, pid); 623 #else 624 return fmt::format("{}_{}", prefix, pid); 625 #endif 626 } 627 628 void* MemMap::CreateSharedMemory(const char* name, size_t size, Error* error) 629 { 630 const bool is_anonymous = (!name || *name == 0); 631 #if defined(__linux__) || defined(__FreeBSD__) 632 const int fd = is_anonymous ? memfd_create("", 0) : shm_open(name, O_CREAT | O_EXCL | O_RDWR, 0600); 633 if (fd < 0) 634 { 635 Error::SetErrno(error, is_anonymous ? "memfd_create() failed: " : "shm_open() failed: ", errno); 636 return nullptr; 637 } 638 #else 639 const int fd = shm_open(name, O_CREAT | O_EXCL | O_RDWR, 0600); 640 if (fd < 0) 641 { 642 Error::SetErrno(error, "shm_open() failed: ", errno); 643 return nullptr; 644 } 645 646 // we're not going to be opening this mapping in other processes, so remove the file 647 if (is_anonymous) 648 shm_unlink(name); 649 #endif 650 651 // use fallocate() to ensure we don't SIGBUS later on. 652 #ifdef __linux__ 653 if (fallocate(fd, 0, 0, static_cast<off_t>(size)) < 0) 654 { 655 Error::SetErrno(error, TinyString::from_format("fallocate({}) failed: ", size), errno); 656 close(fd); 657 if (!is_anonymous) 658 shm_unlink(name); 659 return nullptr; 660 } 661 #else 662 // ensure it's the correct size 663 if (ftruncate(fd, static_cast<off_t>(size)) < 0) 664 { 665 Error::SetErrno(error, TinyString::from_format("ftruncate({}) failed: ", size), errno); 666 close(fd); 667 if (!is_anonymous) 668 shm_unlink(name); 669 return nullptr; 670 } 671 #endif 672 673 return reinterpret_cast<void*>(static_cast<intptr_t>(fd)); 674 } 675 676 void MemMap::DestroySharedMemory(void* ptr) 677 { 678 close(static_cast<int>(reinterpret_cast<intptr_t>(ptr))); 679 } 680 681 void MemMap::DeleteSharedMemory(const char* name) 682 { 683 shm_unlink(name); 684 } 685 686 void* MemMap::MapSharedMemory(void* handle, size_t offset, void* baseaddr, size_t size, PageProtect mode) 687 { 688 const int flags = (baseaddr != nullptr) ? (MAP_SHARED | MAP_FIXED) : MAP_SHARED; 689 void* ptr = mmap(baseaddr, size, static_cast<int>(mode), flags, static_cast<int>(reinterpret_cast<intptr_t>(handle)), 690 static_cast<off_t>(offset)); 691 if (ptr == MAP_FAILED) 692 return nullptr; 693 694 return ptr; 695 } 696 697 void MemMap::UnmapSharedMemory(void* baseaddr, size_t size) 698 { 699 if (munmap(baseaddr, size) != 0) 700 Panic("Failed to unmap shared memory"); 701 } 702 703 const void* MemMap::GetBaseAddress() 704 { 705 #ifndef __APPLE__ 706 Dl_info info; 707 if (dladdr(reinterpret_cast<const void*>(&GetBaseAddress), &info) == 0) 708 { 709 ERROR_LOG("dladdr() failed"); 710 return nullptr; 711 } 712 713 return info.dli_fbase; 714 #else 715 #error Fixme 716 #endif 717 } 718 719 void* MemMap::AllocateJITMemoryAt(const void* addr, size_t size) 720 { 721 int flags = MAP_PRIVATE | MAP_ANONYMOUS; 722 #if defined(__linux__) 723 // Linux does the right thing, allows us to not disturb an existing mapping. 724 if (addr) 725 flags |= MAP_FIXED_NOREPLACE; 726 #elif defined(__FreeBSD__) 727 // FreeBSD achieves the same with MAP_FIXED and MAP_EXCL. 728 if (addr) 729 flags |= MAP_FIXED | MAP_EXCL; 730 #else 731 // Targeted mapping not available? 732 if (addr) 733 return nullptr; 734 #endif 735 736 void* ptr = mmap(const_cast<void*>(addr), size, PROT_READ | PROT_WRITE | PROT_EXEC, flags, -1, 0); 737 if (ptr == MAP_FAILED) 738 { 739 if (!addr) 740 ERROR_LOG("mmap(RWX, {}) for internal buffer failed: {}", size, errno); 741 742 return nullptr; 743 } 744 else if (addr && ptr != addr) [[unlikely]] 745 { 746 if (munmap(ptr, size) != 0) 747 ERROR_LOG("Failed to munmap() incorrectly hinted allocation: {}", errno); 748 return nullptr; 749 } 750 751 return ptr; 752 } 753 754 void MemMap::ReleaseJITMemory(void* ptr, size_t size) 755 { 756 if (munmap(ptr, size) != 0) 757 ERROR_LOG("Failed to free code pointer {}", static_cast<void*>(ptr)); 758 } 759 760 #if defined(CPU_ARCH_ARM32) || defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64) 761 762 void MemMap::FlushInstructionCache(void* address, size_t size) 763 { 764 __builtin___clear_cache(reinterpret_cast<char*>(address), reinterpret_cast<char*>(address) + size); 765 } 766 767 #endif 768 769 SharedMemoryMappingArea::SharedMemoryMappingArea() = default; 770 771 SharedMemoryMappingArea::~SharedMemoryMappingArea() 772 { 773 Destroy(); 774 } 775 776 bool SharedMemoryMappingArea::Create(size_t size) 777 { 778 AssertMsg(Common::IsAlignedPow2(size, HOST_PAGE_SIZE), "Size is page aligned"); 779 Destroy(); 780 781 void* alloc = mmap(nullptr, size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 782 if (alloc == MAP_FAILED) 783 return false; 784 785 m_base_ptr = static_cast<u8*>(alloc); 786 m_size = size; 787 m_num_pages = size / HOST_PAGE_SIZE; 788 return true; 789 } 790 791 void SharedMemoryMappingArea::Destroy() 792 { 793 AssertMsg(m_num_mappings == 0, "No mappings left"); 794 795 if (m_base_ptr && munmap(m_base_ptr, m_size) != 0) 796 Panic("Failed to release shared memory area"); 797 798 m_base_ptr = nullptr; 799 m_size = 0; 800 m_num_pages = 0; 801 } 802 803 u8* SharedMemoryMappingArea::Map(void* file_handle, size_t file_offset, void* map_base, size_t map_size, 804 PageProtect mode) 805 { 806 DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size)); 807 808 void* const ptr = mmap(map_base, map_size, static_cast<int>(mode), MAP_SHARED | MAP_FIXED, 809 static_cast<int>(reinterpret_cast<intptr_t>(file_handle)), static_cast<off_t>(file_offset)); 810 if (ptr == MAP_FAILED) 811 return nullptr; 812 813 m_num_mappings++; 814 return static_cast<u8*>(ptr); 815 } 816 817 bool SharedMemoryMappingArea::Unmap(void* map_base, size_t map_size) 818 { 819 DebugAssert(static_cast<u8*>(map_base) >= m_base_ptr && static_cast<u8*>(map_base) < (m_base_ptr + m_size)); 820 821 if (mmap(map_base, map_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0) == MAP_FAILED) 822 return false; 823 824 m_num_mappings--; 825 return true; 826 } 827 828 #endif 829 830 void* MemMap::AllocateJITMemory(size_t size) 831 { 832 const u8* base = 833 reinterpret_cast<const u8*>(Common::AlignDownPow2(reinterpret_cast<uintptr_t>(GetBaseAddress()), HOST_PAGE_SIZE)); 834 u8* ptr = nullptr; 835 #if !defined(CPU_ARCH_ARM64) || !defined(__APPLE__) 836 837 #if defined(CPU_ARCH_X64) 838 static constexpr size_t assume_binary_size = 64 * 1024 * 1024; 839 static constexpr size_t step = 64 * 1024 * 1024; 840 static constexpr size_t max_displacement = 0x80000000u; 841 #elif defined(CPU_ARCH_ARM64) || defined(CPU_ARCH_RISCV64) 842 static constexpr size_t assume_binary_size = 16 * 1024 * 1024; 843 static constexpr size_t step = 8 * 1024 * 1024; 844 static constexpr size_t max_displacement = 845 1024 * 1024 * 1024; // technically 4GB, but we don't want to spend that much time trying 846 #elif defined(CPU_ARCH_ARM32) 847 static constexpr size_t assume_binary_size = 8 * 1024 * 1024; // Wishful thinking... 848 static constexpr size_t step = 2 * 1024 * 1024; 849 static constexpr size_t max_displacement = 32 * 1024 * 1024; 850 #else 851 #error Unhandled architecture. 852 #endif 853 854 const size_t max_displacement_from_start = max_displacement - size; 855 Assert(size <= max_displacement); 856 857 // Try to find a region in the max displacement range of the process base address. 858 // Assume that the DuckStation binary will at max be some size, release is currently around 12MB on Windows. 859 // Therefore the max offset is +/- 12MB + code_size. Try allocating in steps by incrementing the pointer, then if no 860 // address range is found, go backwards from the base address (which will probably fail). 861 const u8* min_address = 862 base - std::min(reinterpret_cast<ptrdiff_t>(base), static_cast<ptrdiff_t>(max_displacement_from_start)); 863 const u8* max_address = base + max_displacement_from_start; 864 VERBOSE_LOG("Base address: {}", static_cast<const void*>(base)); 865 VERBOSE_LOG("Acceptable address range: {} - {}", static_cast<const void*>(min_address), 866 static_cast<const void*>(max_address)); 867 868 // Start offset by the expected binary size. 869 for (const u8* current_address = base + assume_binary_size;; current_address += step) 870 { 871 VERBOSE_LOG("Trying {} (displacement 0x{:X})", static_cast<const void*>(current_address), 872 static_cast<ptrdiff_t>(current_address - base)); 873 if ((ptr = static_cast<u8*>(AllocateJITMemoryAt(current_address, size)))) 874 break; 875 876 if ((reinterpret_cast<uintptr_t>(current_address) + step) > reinterpret_cast<uintptr_t>(max_address) || 877 (reinterpret_cast<uintptr_t>(current_address) + step) < reinterpret_cast<uintptr_t>(current_address)) 878 { 879 break; 880 } 881 } 882 883 // Try before (will likely fail). 884 if (!ptr && reinterpret_cast<uintptr_t>(base) >= step) 885 { 886 for (const u8* current_address = base - step;; current_address -= step) 887 { 888 VERBOSE_LOG("Trying {} (displacement 0x{:X})", static_cast<const void*>(current_address), 889 static_cast<ptrdiff_t>(base - current_address)); 890 if ((ptr = static_cast<u8*>(AllocateJITMemoryAt(current_address, size)))) 891 break; 892 893 if ((reinterpret_cast<uintptr_t>(current_address) - step) < reinterpret_cast<uintptr_t>(min_address) || 894 (reinterpret_cast<uintptr_t>(current_address) - step) > reinterpret_cast<uintptr_t>(current_address)) 895 { 896 break; 897 } 898 } 899 } 900 901 if (!ptr) 902 { 903 #ifdef CPU_ARCH_X64 904 ERROR_LOG("Failed to allocate JIT buffer in range, expect crashes."); 905 #endif 906 if (!(ptr = static_cast<u8*>(AllocateJITMemoryAt(nullptr, size)))) 907 return ptr; 908 } 909 #else 910 // We cannot control where the buffer gets allocated on Apple Silicon. Hope for the best. 911 if (!(ptr = static_cast<u8*>(AllocateJITMemoryAt(nullptr, size)))) 912 return ptr; 913 #endif 914 915 INFO_LOG("Allocated JIT buffer of size {} at {} (0x{:X} bytes / {} MB away)", size, static_cast<void*>(ptr), 916 std::abs(static_cast<ptrdiff_t>(ptr - base)), 917 (std::abs(static_cast<ptrdiff_t>(ptr - base)) + (1024 * 1024 - 1)) / (1024 * 1024)); 918 919 return ptr; 920 }