page_fault_handler.cpp (8357B)
1 // SPDX-FileCopyrightText: 2019-2024 Connor McLaughlin <stenzek@gmail.com> 2 // SPDX-License-Identifier: (GPL-3.0 OR CC-BY-NC-ND-4.0) 3 4 #include "page_fault_handler.h" 5 6 #include "common/assert.h" 7 #include "common/crash_handler.h" 8 #include "common/error.h" 9 #include "common/log.h" 10 11 #include <algorithm> 12 #include <cstring> 13 #include <mutex> 14 #include <vector> 15 16 #if defined(_WIN32) 17 #include "common/windows_headers.h" 18 #elif defined(__linux__) 19 #include <signal.h> 20 #include <ucontext.h> 21 #include <unistd.h> 22 #elif defined(__APPLE__) || defined(__FreeBSD__) 23 #include <signal.h> 24 #include <unistd.h> 25 #endif 26 27 #ifdef __APPLE__ 28 #include <mach/mach_init.h> 29 #include <mach/mach_port.h> 30 #include <mach/task.h> 31 #endif 32 33 namespace PageFaultHandler { 34 static std::recursive_mutex s_exception_handler_mutex; 35 static bool s_in_exception_handler = false; 36 static bool s_installed = false; 37 } // namespace PageFaultHandler 38 39 #if defined(CPU_ARCH_ARM64) 40 [[maybe_unused]] static bool IsStoreInstruction(const void* ptr) 41 { 42 u32 bits; 43 std::memcpy(&bits, ptr, sizeof(bits)); 44 45 // Based on vixl's disassembler Instruction::IsStore(). 46 // if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) 47 if ((bits & 0x0a000000) != 0x08000000) 48 return false; 49 50 // if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) 51 if ((bits & 0x3a000000) == 0x28000000) 52 { 53 // return Mask(LoadStorePairLBit) == 0 54 return (bits & (1 << 22)) == 0; 55 } 56 57 switch (bits & 0xC4C00000) 58 { 59 case 0x00000000: // STRB_w 60 case 0x40000000: // STRH_w 61 case 0x80000000: // STR_w 62 case 0xC0000000: // STR_x 63 case 0x04000000: // STR_b 64 case 0x44000000: // STR_h 65 case 0x84000000: // STR_s 66 case 0xC4000000: // STR_d 67 case 0x04800000: // STR_q 68 return true; 69 70 default: 71 return false; 72 } 73 } 74 #elif defined(CPU_ARCH_RISCV64) 75 [[maybe_unused]] static bool IsStoreInstruction(const void* ptr) 76 { 77 u32 bits; 78 std::memcpy(&bits, ptr, sizeof(bits)); 79 80 return ((bits & 0x7Fu) == 0b0100011u); 81 } 82 #endif 83 84 #if defined(_WIN32) 85 86 namespace PageFaultHandler { 87 static LONG ExceptionHandler(PEXCEPTION_POINTERS exi); 88 } 89 90 LONG PageFaultHandler::ExceptionHandler(PEXCEPTION_POINTERS exi) 91 { 92 // Executing the handler concurrently from multiple threads wouldn't go down well. 93 std::unique_lock lock(s_exception_handler_mutex); 94 95 // Prevent recursive exception filtering. 96 if (s_in_exception_handler) 97 return EXCEPTION_CONTINUE_SEARCH; 98 99 // Only interested in page faults. 100 if (exi->ExceptionRecord->ExceptionCode != EXCEPTION_ACCESS_VIOLATION) 101 return EXCEPTION_CONTINUE_SEARCH; 102 103 #if defined(_M_AMD64) 104 void* const exception_pc = reinterpret_cast<void*>(exi->ContextRecord->Rip); 105 #elif defined(_M_ARM64) 106 void* const exception_pc = reinterpret_cast<void*>(exi->ContextRecord->Pc); 107 #else 108 void* const exception_pc = nullptr; 109 #endif 110 111 void* const exception_address = reinterpret_cast<void*>(exi->ExceptionRecord->ExceptionInformation[1]); 112 const bool is_write = exi->ExceptionRecord->ExceptionInformation[0] == 1; 113 114 s_in_exception_handler = true; 115 116 const HandlerResult handled = HandlePageFault(exception_pc, exception_address, is_write); 117 118 s_in_exception_handler = false; 119 120 return (handled == HandlerResult::ContinueExecution) ? EXCEPTION_CONTINUE_EXECUTION : EXCEPTION_CONTINUE_SEARCH; 121 } 122 123 bool PageFaultHandler::Install(Error* error) 124 { 125 std::unique_lock lock(s_exception_handler_mutex); 126 AssertMsg(!s_installed, "Page fault handler has already been installed."); 127 128 PVOID handle = AddVectoredExceptionHandler(1, ExceptionHandler); 129 if (!handle) 130 { 131 Error::SetWin32(error, "AddVectoredExceptionHandler() failed: ", GetLastError()); 132 return false; 133 } 134 135 s_installed = true; 136 return true; 137 } 138 139 #else 140 141 namespace PageFaultHandler { 142 static void SignalHandler(int sig, siginfo_t* info, void* ctx); 143 } // namespace PageFaultHandler 144 145 void PageFaultHandler::SignalHandler(int sig, siginfo_t* info, void* ctx) 146 { 147 #if defined(__linux__) 148 void* const exception_address = reinterpret_cast<void*>(info->si_addr); 149 150 #if defined(CPU_ARCH_X64) 151 void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.gregs[REG_RIP]); 152 const bool is_write = (static_cast<ucontext_t*>(ctx)->uc_mcontext.gregs[REG_ERR] & 2) != 0; 153 #elif defined(CPU_ARCH_ARM32) 154 void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.arm_pc); 155 const bool is_write = (static_cast<ucontext_t*>(ctx)->uc_mcontext.error_code & (1 << 11)) != 0; // DFSR.WnR 156 #elif defined(CPU_ARCH_ARM64) 157 void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.pc); 158 const bool is_write = IsStoreInstruction(exception_pc); 159 #elif defined(CPU_ARCH_RISCV64) 160 void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.__gregs[REG_PC]); 161 const bool is_write = IsStoreInstruction(exception_pc); 162 #else 163 void* const exception_pc = nullptr; 164 const bool is_write = false; 165 #endif 166 167 #elif defined(__APPLE__) 168 169 #if defined(CPU_ARCH_X64) 170 void* const exception_address = 171 reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__faultvaddr); 172 void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__ss.__rip); 173 const bool is_write = (static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__err & 2) != 0; 174 #elif defined(CPU_ARCH_ARM64) 175 void* const exception_address = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__far); 176 void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__ss.__pc); 177 const bool is_write = IsStoreInstruction(exception_pc); 178 #else 179 void* const exception_address = reinterpret_cast<void*>(info->si_addr); 180 void* const exception_pc = nullptr; 181 const bool is_write = false; 182 #endif 183 184 #elif defined(__FreeBSD__) 185 186 #if defined(CPU_ARCH_X64) 187 void* const exception_address = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.mc_addr); 188 void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext.mc_rip); 189 const bool is_write = (static_cast<ucontext_t*>(ctx)->uc_mcontext.mc_err & 2) != 0; 190 #elif defined(CPU_ARCH_ARM64) 191 void* const exception_address = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__es.__far); 192 void* const exception_pc = reinterpret_cast<void*>(static_cast<ucontext_t*>(ctx)->uc_mcontext->__ss.__pc); 193 const bool is_write = IsStoreInstruction(exception_pc); 194 #else 195 void* const exception_address = reinterpret_cast<void*>(info->si_addr); 196 void* const exception_pc = nullptr; 197 const bool is_write = false; 198 #endif 199 200 #endif 201 202 // Executing the handler concurrently from multiple threads wouldn't go down well. 203 s_exception_handler_mutex.lock(); 204 205 // Prevent recursive exception filtering. 206 HandlerResult result = HandlerResult::ExecuteNextHandler; 207 if (!s_in_exception_handler) 208 { 209 s_in_exception_handler = true; 210 result = HandlePageFault(exception_pc, exception_address, is_write); 211 s_in_exception_handler = false; 212 } 213 214 s_exception_handler_mutex.unlock(); 215 216 // Resumes execution right where we left off (re-executes instruction that caused the SIGSEGV). 217 if (result == HandlerResult::ContinueExecution) 218 return; 219 220 // We couldn't handle it. Pass it off to the crash dumper. 221 CrashHandler::CrashSignalHandler(sig, info, ctx); 222 } 223 224 bool PageFaultHandler::Install(Error* error) 225 { 226 std::unique_lock lock(s_exception_handler_mutex); 227 AssertMsg(!s_installed, "Page fault handler has already been installed."); 228 229 struct sigaction sa; 230 231 sigemptyset(&sa.sa_mask); 232 sa.sa_flags = SA_SIGINFO; 233 sa.sa_sigaction = SignalHandler; 234 #ifdef __linux__ 235 // Don't block the signal from executing recursively, we want to fire the original handler. 236 sa.sa_flags |= SA_NODEFER; 237 #endif 238 if (sigaction(SIGSEGV, &sa, nullptr) != 0) 239 { 240 Error::SetErrno(error, "sigaction() for SIGSEGV failed: ", errno); 241 return false; 242 } 243 #if defined(__APPLE__) || defined(__aarch64__) 244 // MacOS uses SIGBUS for memory permission violations 245 if (sigaction(SIGBUS, &sa, nullptr) != 0) 246 { 247 Error::SetErrno(error, "sigaction() for SIGBUS failed: ", errno); 248 return false; 249 } 250 #endif 251 #ifdef __APPLE__ 252 task_set_exception_ports(mach_task_self(), EXC_MASK_BAD_ACCESS, MACH_PORT_NULL, EXCEPTION_DEFAULT, 0); 253 #endif 254 255 s_installed = true; 256 return true; 257 } 258 259 #endif