lj_mcode.c (10565B)
1 /* 2 ** Machine code management. 3 ** Copyright (C) 2005-2016 Mike Pall. See Copyright Notice in luajit.h 4 */ 5 6 #define lj_mcode_c 7 #define LUA_CORE 8 9 #if defined(__linux__) && !defined(_GNU_SOURCE) 10 #define _GNU_SOURCE 11 #endif 12 13 #include "lj_obj.h" 14 #if LJ_HASJIT 15 #include "lj_gc.h" 16 #include "lj_err.h" 17 #include "lj_jit.h" 18 #include "lj_mcode.h" 19 #include "lj_trace.h" 20 #include "lj_dispatch.h" 21 #endif 22 #if LJ_HASJIT || LJ_HASFFI 23 #include "lj_vm.h" 24 #endif 25 26 /* -- OS-specific functions ----------------------------------------------- */ 27 28 #if LJ_HASJIT || LJ_HASFFI 29 30 /* Define this if you want to run LuaJIT with Valgrind. */ 31 #ifdef LUAJIT_USE_VALGRIND 32 #include <valgrind/valgrind.h> 33 #endif 34 35 #if LJ_TARGET_IOS 36 void sys_icache_invalidate(void *start, size_t len); 37 #endif 38 39 /* Synchronize data/instruction cache. */ 40 void lj_mcode_sync(void *start, void *end) 41 { 42 #ifdef LUAJIT_USE_VALGRIND 43 VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start); 44 #endif 45 #if LJ_TARGET_X86ORX64 46 UNUSED(start); UNUSED(end); 47 #elif LJ_TARGET_IOS 48 sys_icache_invalidate(start, (char *)end-(char *)start); 49 #elif LJ_TARGET_PPC 50 lj_vm_cachesync(start, end); 51 #elif defined(__GNUC__) 52 __clear_cache(start, end); 53 #else 54 #error "Missing builtin to flush instruction cache" 55 #endif 56 } 57 58 #endif 59 60 #if LJ_HASJIT 61 62 #if LJ_TARGET_WINDOWS 63 64 #define WIN32_LEAN_AND_MEAN 65 #include <windows.h> 66 67 #define MCPROT_RW PAGE_READWRITE 68 #define MCPROT_RX PAGE_EXECUTE_READ 69 #define MCPROT_RWX PAGE_EXECUTE_READWRITE 70 71 static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot) 72 { 73 void *p = VirtualAlloc((void *)hint, sz, 74 MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot); 75 if (!p && !hint) 76 lj_trace_err(J, LJ_TRERR_MCODEAL); 77 return p; 78 } 79 80 static void mcode_free(jit_State *J, void *p, size_t sz) 81 { 82 UNUSED(J); UNUSED(sz); 83 VirtualFree(p, 0, MEM_RELEASE); 84 } 85 86 static int mcode_setprot(void *p, size_t sz, DWORD prot) 87 { 88 DWORD oprot; 89 return !VirtualProtect(p, sz, prot, &oprot); 90 } 91 92 #elif LJ_TARGET_POSIX 93 #include <stdlib.h> 94 #include <sys/mman.h> 95 96 #ifndef MAP_ANONYMOUS 97 #define MAP_ANONYMOUS MAP_ANON 98 #endif 99 100 #define MCPROT_RW (PROT_READ|PROT_WRITE) 101 #define MCPROT_RX (PROT_READ|PROT_EXEC) 102 #define MCPROT_RWX (PROT_READ|PROT_WRITE|PROT_EXEC) 103 104 #if !LJ_4GB 105 static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) 106 { 107 void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 108 if (p == MAP_FAILED) { 109 if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL); 110 p = NULL; 111 } 112 return p; 113 } 114 #endif 115 116 static void mcode_free(jit_State *J, void *p, size_t sz) 117 { 118 UNUSED(J); 119 #if LJ_4GB 120 /* Coalesce the area back to sbrk() VMA */ 121 mprotect(p, sz, MCPROT_RW); 122 free(p); 123 #else 124 munmap(p, sz); 125 #endif 126 } 127 128 static int mcode_setprot(void *p, size_t sz, int prot) 129 { 130 return mprotect(p, sz, prot); 131 } 132 133 #elif LJ_64 134 135 #error "Missing OS support for explicit placement of executable memory" 136 137 #else 138 139 /* Fallback allocator. This will fail if memory is not executable by default. */ 140 #define LUAJIT_UNPROTECT_MCODE 141 #define MCPROT_RW 0 142 #define MCPROT_RX 0 143 #define MCPROT_RWX 0 144 145 static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot) 146 { 147 UNUSED(hint); UNUSED(prot); 148 return lj_mem_new(J->L, sz); 149 } 150 151 static void mcode_free(jit_State *J, void *p, size_t sz) 152 { 153 lj_mem_free(J2G(J), p, sz); 154 } 155 156 #endif 157 158 /* -- MCode area protection ----------------------------------------------- */ 159 160 /* Define this ONLY if page protection twiddling becomes a bottleneck. */ 161 #ifdef LUAJIT_UNPROTECT_MCODE 162 163 /* It's generally considered to be a potential security risk to have 164 ** pages with simultaneous write *and* execute access in a process. 165 ** 166 ** Do not even think about using this mode for server processes or 167 ** apps handling untrusted external data (such as a browser). 168 ** 169 ** The security risk is not in LuaJIT itself -- but if an adversary finds 170 ** any *other* flaw in your C application logic, then any RWX memory page 171 ** simplifies writing an exploit considerably. 172 */ 173 #define MCPROT_GEN MCPROT_RWX 174 #define MCPROT_RUN MCPROT_RWX 175 176 static void mcode_protect(jit_State *J, int prot) 177 { 178 UNUSED(J); UNUSED(prot); 179 } 180 181 #else 182 183 /* This is the default behaviour and much safer: 184 ** 185 ** Most of the time the memory pages holding machine code are executable, 186 ** but NONE of them is writable. 187 ** 188 ** The current memory area is marked read-write (but NOT executable) only 189 ** during the short time window while the assembler generates machine code. 190 */ 191 #define MCPROT_GEN MCPROT_RW 192 #define MCPROT_RUN MCPROT_RX 193 194 /* Protection twiddling failed. Probably due to kernel security. */ 195 static LJ_NOINLINE void mcode_protfail(jit_State *J) 196 { 197 lua_CFunction panic = J2G(J)->panic; 198 if (panic) { 199 lua_State *L = J->L; 200 setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT)); 201 panic(L); 202 } 203 } 204 205 /* Change protection of MCode area. */ 206 static void mcode_protect(jit_State *J, int prot) 207 { 208 if (J->mcprot != prot) { 209 if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot))) 210 mcode_protfail(J); 211 J->mcprot = prot; 212 } 213 } 214 215 #endif 216 217 /* -- MCode area allocation ----------------------------------------------- */ 218 219 #if LJ_TARGET_X64 220 #define mcode_validptr(p) ((p) && (uintptr_t)(p) < (uintptr_t)1<<47) 221 #else 222 #define mcode_validptr(p) ((p) && (uintptr_t)(p) < 0xffff0000) 223 #endif 224 225 #ifdef LJ_TARGET_JUMPRANGE 226 227 /* Get memory within relative jump distance of our code in 64 bit mode. */ 228 static void *mcode_alloc(jit_State *J, size_t sz) 229 { 230 /* Target an address in the static assembler code (64K aligned). 231 ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB. 232 ** Use half the jump range so every address in the range can reach any other. 233 */ 234 #if LJ_TARGET_MIPS 235 /* Use the middle of the 256MB-aligned region. */ 236 uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & 0xf0000000u) + 237 0x08000000u; 238 #else 239 uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff; 240 #endif 241 const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21); 242 #if LJ_4GB 243 void *p = aligned_alloc(LJ_PAGESIZE, sz); 244 if (mcode_validptr(p) && 245 ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range)) 246 return p; 247 #else 248 /* First try a contiguous area below the last one. */ 249 uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0; 250 int i; 251 for (i = 0; i < 32; i++) { /* 32 attempts ought to be enough ... */ 252 if (mcode_validptr(hint)) { 253 void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN); 254 255 if (mcode_validptr(p) && 256 ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range)) 257 return p; 258 if (p) mcode_free(J, p, sz); /* Free badly placed area. */ 259 } 260 /* Next try probing pseudo-random addresses. */ 261 do { 262 hint = (0x78fb ^ LJ_PRNG_BITS(J, 15)) << 16; /* 64K aligned. */ 263 } while (!(hint + sz < range)); 264 hint = target + hint - (range>>1); 265 } 266 #endif 267 lj_trace_err(J, LJ_TRERR_MCODEAL); /* Give up. OS probably ignores hints? */ 268 return NULL; 269 } 270 271 #else 272 273 /* All memory addresses are reachable by relative jumps. */ 274 static void *mcode_alloc(jit_State *J, size_t sz) 275 { 276 #ifdef __OpenBSD__ 277 /* Allow better executable memory allocation for OpenBSD W^X mode. */ 278 void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN); 279 if (p && mcode_setprot(p, sz, MCPROT_GEN)) { 280 mcode_free(J, p, sz); 281 return NULL; 282 } 283 return p; 284 #else 285 return mcode_alloc_at(J, 0, sz, MCPROT_GEN); 286 #endif 287 } 288 289 #endif 290 291 /* -- MCode area management ----------------------------------------------- */ 292 293 /* Linked list of MCode areas. */ 294 typedef struct MCLink { 295 MCode *next; /* Next area. */ 296 size_t size; /* Size of current area. */ 297 } MCLink; 298 299 /* Allocate a new MCode area. */ 300 static void mcode_allocarea(jit_State *J) 301 { 302 MCode *oldarea = J->mcarea; 303 size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10; 304 sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); 305 J->mcarea = (MCode *)mcode_alloc(J, sz); 306 J->szmcarea = sz; 307 J->mcprot = MCPROT_GEN; 308 J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea); 309 J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink)); 310 ((MCLink *)J->mcarea)->next = oldarea; 311 ((MCLink *)J->mcarea)->size = sz; 312 J->szallmcarea += sz; 313 } 314 315 /* Free all MCode areas. */ 316 void lj_mcode_free(jit_State *J) 317 { 318 MCode *mc = J->mcarea; 319 J->mcarea = NULL; 320 J->szallmcarea = 0; 321 while (mc) { 322 MCode *next = ((MCLink *)mc)->next; 323 mcode_free(J, mc, ((MCLink *)mc)->size); 324 mc = next; 325 } 326 } 327 328 /* -- MCode transactions -------------------------------------------------- */ 329 330 /* Reserve the remainder of the current MCode area. */ 331 MCode *lj_mcode_reserve(jit_State *J, MCode **lim) 332 { 333 if (!J->mcarea) 334 mcode_allocarea(J); 335 else 336 mcode_protect(J, MCPROT_GEN); 337 *lim = J->mcbot; 338 return J->mctop; 339 } 340 341 /* Commit the top part of the current MCode area. */ 342 void lj_mcode_commit(jit_State *J, MCode *top) 343 { 344 J->mctop = top; 345 mcode_protect(J, MCPROT_RUN); 346 } 347 348 /* Abort the reservation. */ 349 void lj_mcode_abort(jit_State *J) 350 { 351 if (J->mcarea) 352 mcode_protect(J, MCPROT_RUN); 353 } 354 355 /* Set/reset protection to allow patching of MCode areas. */ 356 MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish) 357 { 358 #ifdef LUAJIT_UNPROTECT_MCODE 359 UNUSED(J); UNUSED(ptr); UNUSED(finish); 360 return NULL; 361 #else 362 if (finish) { 363 if (J->mcarea == ptr) 364 mcode_protect(J, MCPROT_RUN); 365 else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN))) 366 mcode_protfail(J); 367 return NULL; 368 } else { 369 MCode *mc = J->mcarea; 370 /* Try current area first to use the protection cache. */ 371 if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) { 372 mcode_protect(J, MCPROT_GEN); 373 return mc; 374 } 375 /* Otherwise search through the list of MCode areas. */ 376 for (;;) { 377 mc = ((MCLink *)mc)->next; 378 lua_assert(mc != NULL); 379 if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) { 380 if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN))) 381 mcode_protfail(J); 382 return mc; 383 } 384 } 385 } 386 #endif 387 } 388 389 /* Limit of MCode reservation reached. */ 390 void lj_mcode_limiterr(jit_State *J, size_t need) 391 { 392 size_t sizemcode, maxmcode; 393 lj_mcode_abort(J); 394 sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10; 395 sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1); 396 maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10; 397 if ((size_t)need > sizemcode) 398 lj_trace_err(J, LJ_TRERR_MCODEOV); /* Too long for any area. */ 399 if (J->szallmcarea + sizemcode > maxmcode) 400 lj_trace_err(J, LJ_TRERR_MCODEAL); 401 mcode_allocarea(J); 402 lj_trace_err(J, LJ_TRERR_MCODELM); /* Retry with new area. */ 403 } 404 405 #endif