ljx

FORK: LuaJIT with native 5.2 and 5.3 support
git clone https://git.neptards.moe/neptards/ljx.git
Log | Files | Refs | README

lj_record.c (87882B)


      1 /*
      2 ** Trace recorder (bytecode -> SSA IR).
      3 ** Copyright (C) 2005-2016 Mike Pall. See Copyright Notice in luajit.h
      4 */
      5 
      6 #define lj_record_c
      7 #define LUA_CORE
      8 
      9 #include "lj_obj.h"
     10 
     11 #if LJ_HASJIT
     12 
     13 #include "lj_err.h"
     14 #include "lj_str.h"
     15 #include "lj_tab.h"
     16 #include "lj_meta.h"
     17 #include "lj_frame.h"
     18 #if LJ_HASFFI
     19 #include "lj_ctype.h"
     20 #endif
     21 #include "lj_bc.h"
     22 #include "lj_ff.h"
     23 #if LJ_HASPROFILE
     24 #include "lj_debug.h"
     25 #endif
     26 #include "lj_ir.h"
     27 #include "lj_jit.h"
     28 #include "lj_ircall.h"
     29 #include "lj_iropt.h"
     30 #include "lj_trace.h"
     31 #include "lj_record.h"
     32 #include "lj_ffrecord.h"
     33 #include "lj_snap.h"
     34 #include "lj_dispatch.h"
     35 #include "lj_vm.h"
     36 #include "ljx_bitwise.h"
     37 
     38 /* Some local macros to save typing. Undef'd at the end. */
     39 #define IR(ref)			(&J->cur.ir[(ref)])
     40 
     41 /* Pass IR on to next optimization in chain (FOLD). */
     42 #define emitir(ot, a, b)	(lj_ir_set(J, (ot), (a), (b)), lj_opt_fold(J))
     43 
     44 /* Emit raw IR without passing through optimizations. */
     45 #define emitir_raw(ot, a, b)	(lj_ir_set(J, (ot), (a), (b)), lj_ir_emit(J))
     46 
     47 /* -- Sanity checks ------------------------------------------------------- */
     48 
     49 #ifdef LUA_USE_ASSERT
     50 /* Sanity check the whole IR -- sloooow. */
     51 static void rec_check_ir(jit_State *J)
     52 {
     53   IRRef i, nins = J->cur.nins, nk = J->cur.nk;
     54   lua_assert(nk <= REF_BIAS && nins >= REF_BIAS && nins < 65536);
     55   for (i = nk; i < nins; i++) {
     56     IRIns *ir = IR(i);
     57     uint32_t mode = lj_ir_mode[ir->o];
     58     IRRef op1 = ir->op1;
     59     IRRef op2 = ir->op2;
     60     switch (irm_op1(mode)) {
     61     case IRMnone: lua_assert(op1 == 0); break;
     62     case IRMref: lua_assert(op1 >= nk);
     63       lua_assert(i >= REF_BIAS ? op1 < i : op1 > i); break;
     64     case IRMlit: break;
     65     case IRMcst: lua_assert(i < REF_BIAS);
     66       if (irt_is64(ir->t) && ir->o != IR_KNULL)
     67 	i++;
     68       continue;
     69     }
     70     switch (irm_op2(mode)) {
     71     case IRMnone: lua_assert(op2 == 0); break;
     72     case IRMref: lua_assert(op2 >= nk);
     73       lua_assert(i >= REF_BIAS ? op2 < i : op2 > i); break;
     74     case IRMlit: break;
     75     case IRMcst: lua_assert(0); break;
     76     }
     77     if (ir->prev) {
     78       lua_assert(ir->prev >= nk);
     79       lua_assert(i >= REF_BIAS ? ir->prev < i : ir->prev > i);
     80       lua_assert(ir->o == IR_NOP || IR(ir->prev)->o == ir->o);
     81     }
     82   }
     83 }
     84 
     85 /* Compare stack slots and frames of the recorder and the VM. */
     86 static void rec_check_slots(jit_State *J)
     87 {
     88   BCReg s, nslots = J->baseslot + J->maxslot;
     89   int32_t depth = 0;
     90   cTValue *base = J->L->base - J->baseslot;
     91   lua_assert(J->baseslot >= 1+LJ_FR2 && J->baseslot < LJ_MAX_JSLOTS);
     92   lua_assert(J->baseslot == 1+LJ_FR2 || (J->slot[J->baseslot-1] & TREF_FRAME));
     93   lua_assert(nslots < LJ_MAX_JSLOTS);
     94   for (s = 0; s < nslots; s++) {
     95     TRef tr = J->slot[s];
     96     if (tr) {
     97       cTValue *tv = &base[s];
     98       IRRef ref = tref_ref(tr);
     99       IRIns *ir = NULL;  /* Silence compiler. */
    100       if (!LJ_FR2 || ref || !(tr & (TREF_FRAME | TREF_CONT))) {
    101 	lua_assert(ref >= J->cur.nk && ref < J->cur.nins);
    102 	ir = IR(ref);
    103 	lua_assert(irt_t(ir->t) == tref_t(tr));
    104       }
    105       if (s == 0) {
    106 	lua_assert(tref_isfunc(tr));
    107 #if LJ_FR2
    108       } else if (s == 1) {
    109 	lua_assert(0);
    110 #endif
    111       } else if ((tr & TREF_FRAME)) {
    112 	GCfunc *fn = gco2func(frame_gc(tv));
    113 	BCReg delta = (BCReg)(tv - frame_prev(tv));
    114 #if LJ_FR2
    115 	if (ref)
    116 	  lua_assert(ir_knum(ir)->u64 == tv->u64);
    117 	tr = J->slot[s-1];
    118 	ir = IR(tref_ref(tr));
    119 #endif
    120 	lua_assert(tref_isfunc(tr));
    121 	if (tref_isk(tr)) lua_assert(fn == ir_kfunc(ir));
    122 	lua_assert(s > delta + LJ_FR2 ? (J->slot[s-delta] & TREF_FRAME)
    123 				      : (s == delta + LJ_FR2));
    124 	depth++;
    125       } else if ((tr & TREF_CONT)) {
    126 #if LJ_FR2
    127 	if (ref)
    128 	  lua_assert(ir_knum(ir)->u64 == tv->u64);
    129 #else
    130 	lua_assert(ir_kptr(ir) == gcrefp(tv->gcr, void));
    131 #endif
    132 	lua_assert((J->slot[s+1+LJ_FR2] & TREF_FRAME));
    133 	depth++;
    134       } else {
    135 	if (tvisnumber(tv))
    136 	  lua_assert(tref_isnumber(tr));  /* Could be IRT_INT etc., too. */
    137 	else
    138 	  lua_assert(itype2irt(tv) == tref_type(tr));
    139 	if (tref_isk(tr)) {  /* Compare constants. */
    140 	  TValue tvk;
    141 	  lj_ir_kvalue(J->L, &tvk, ir);
    142 	  if (!(tvisnum(&tvk) && tvisnan(&tvk)))
    143 	    lua_assert(lj_obj_equal(tv, &tvk));
    144 	  else
    145 	    lua_assert(tvisnum(tv) && tvisnan(tv));
    146 	}
    147       }
    148     }
    149   }
    150   lua_assert(J->framedepth == depth);
    151 }
    152 #endif
    153 
    154 /* -- Type handling and specialization ------------------------------------ */
    155 
    156 /* Note: these functions return tagged references (TRef). */
    157 
    158 /* Specialize a slot to a specific type. Note: slot can be negative! */
    159 static TRef sloadt(jit_State *J, int32_t slot, IRType t, int mode)
    160 {
    161   /* Caller may set IRT_GUARD in t. */
    162   TRef ref = emitir_raw(IRT(IR_SLOAD, t), (int32_t)J->baseslot+slot, mode);
    163   J->base[slot] = ref;
    164   return ref;
    165 }
    166 
    167 /* Specialize a slot to the runtime type. Note: slot can be negative! */
    168 static TRef sload(jit_State *J, int32_t slot)
    169 {
    170   IRType t = itype2irt(&J->L->base[slot]);
    171   TRef ref = emitir_raw(IRTG(IR_SLOAD, t), (int32_t)J->baseslot+slot,
    172 			IRSLOAD_TYPECHECK);
    173   if (irtype_ispri(t)) ref = TREF_PRI(t);  /* Canonicalize primitive refs. */
    174   J->base[slot] = ref;
    175   return ref;
    176 }
    177 
    178 /* Get TRef from slot. Load slot and specialize if not done already. */
    179 #define getslot(J, s)	(J->base[(s)] ? J->base[(s)] : sload(J, (int32_t)(s)))
    180 
    181 /* Get TRef for current function. */
    182 static TRef getcurrf(jit_State *J)
    183 {
    184   if (J->base[-1-LJ_FR2])
    185     return J->base[-1-LJ_FR2];
    186   lua_assert(J->baseslot == 1+LJ_FR2);
    187   return sloadt(J, -1-LJ_FR2, IRT_FUNC, IRSLOAD_READONLY);
    188 }
    189 
    190 /* Compare for raw object equality.
    191 ** Returns 0 if the objects are the same.
    192 ** Returns 1 if they are different, but the same type.
    193 ** Returns 2 for two different types.
    194 ** Comparisons between primitives always return 1 -- no caller cares about it.
    195 */
    196 int lj_record_objcmp(jit_State *J, TRef a, TRef b, cTValue *av, cTValue *bv)
    197 {
    198   int diff = !lj_obj_equal(av, bv);
    199   if (!tref_isk2(a, b)) {  /* Shortcut, also handles primitives. */
    200     IRType ta = tref_isinteger(a) ? IRT_INT : tref_type(a);
    201     IRType tb = tref_isinteger(b) ? IRT_INT : tref_type(b);
    202     if (ta != tb) {
    203       /* Widen mixed number/int comparisons to number/number comparison. */
    204       if (ta == IRT_INT && tb == IRT_NUM) {
    205 	a = emitir(IRTN(IR_CONV), a, IRCONV_NUM_INT);
    206 	ta = IRT_NUM;
    207       } else if (ta == IRT_NUM && tb == IRT_INT) {
    208 	b = emitir(IRTN(IR_CONV), b, IRCONV_NUM_INT);
    209       } else {
    210 	return 2;  /* Two different types are never equal. */
    211       }
    212     }
    213     emitir(IRTG(diff ? IR_NE : IR_EQ, ta), a, b);
    214   }
    215   return diff;
    216 }
    217 
    218 /* Constify a value. Returns 0 for non-representable object types. */
    219 TRef lj_record_constify(jit_State *J, cTValue *o)
    220 {
    221   if (tvisgcv(o))
    222     return lj_ir_kgc(J, gcV(o), itype2irt(o));
    223   else if (tvisint(o))
    224     return lj_ir_kint(J, intV(o));
    225   else if (tvisnum(o))
    226     return lj_ir_knumint(J, numV(o));
    227   else if (tvisbool(o))
    228     return TREF_PRI(itype2irt(o));
    229   else
    230     return 0;  /* Can't represent lightuserdata (pointless). */
    231 }
    232 
    233 /* -- Record loop ops ----------------------------------------------------- */
    234 
    235 /* Loop event. */
    236 typedef enum {
    237   LOOPEV_LEAVE,		/* Loop is left or not entered. */
    238   LOOPEV_ENTERLO,	/* Loop is entered with a low iteration count left. */
    239   LOOPEV_ENTER		/* Loop is entered. */
    240 } LoopEvent;
    241 
    242 /* Canonicalize slots: convert integers to numbers. */
    243 static void canonicalize_slots(jit_State *J)
    244 {
    245   BCReg s;
    246   if (LJ_DUALNUM) return;
    247   for (s = J->baseslot+J->maxslot-1; s >= 1; s--) {
    248     TRef tr = J->slot[s];
    249     if (tref_isinteger(tr)) {
    250       IRIns *ir = IR(tref_ref(tr));
    251       if (!(ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_READONLY)))
    252 	J->slot[s] = emitir(IRTN(IR_CONV), tr, IRCONV_NUM_INT);
    253     }
    254   }
    255 }
    256 
    257 /* Stop recording. */
    258 void lj_record_stop(jit_State *J, TraceLink linktype, TraceNo lnk)
    259 {
    260 #ifdef LUAJIT_ENABLE_TABLE_BUMP
    261   if (J->retryrec)
    262     lj_trace_err(J, LJ_TRERR_RETRY);
    263 #endif
    264   lj_trace_end(J);
    265   J->cur.linktype = (uint8_t)linktype;
    266   J->cur.link = (uint16_t)lnk;
    267   /* Looping back at the same stack level? */
    268   if (lnk == J->cur.traceno && J->framedepth + J->retdepth == 0) {
    269     if ((J->flags & JIT_F_OPT_LOOP))  /* Shall we try to create a loop? */
    270       goto nocanon;  /* Do not canonicalize or we lose the narrowing. */
    271     if (J->cur.root)  /* Otherwise ensure we always link to the root trace. */
    272       J->cur.link = J->cur.root;
    273   }
    274   canonicalize_slots(J);
    275 nocanon:
    276   /* Note: all loop ops must set J->pc to the following instruction! */
    277   lj_snap_add(J);  /* Add loop snapshot. */
    278   J->needsnap = 0;
    279   J->mergesnap = 1;  /* In case recording continues. */
    280 }
    281 
    282 /* Search bytecode backwards for a int/num constant slot initializer. */
    283 static TRef find_kinit(jit_State *J, const BCIns *endpc, BCReg slot, IRType t)
    284 {
    285   /* This algorithm is rather simplistic and assumes quite a bit about
    286   ** how the bytecode is generated. It works fine for FORI initializers,
    287   ** but it won't necessarily work in other cases (e.g. iterator arguments).
    288   ** It doesn't do anything fancy, either (like backpropagating MOVs).
    289   */
    290   const BCIns *pc, *startpc = proto_bc(J->pt);
    291   for (pc = endpc-1; pc > startpc; pc--) {
    292     BCIns ins = *pc;
    293     BCOp op = bc_op(ins);
    294     /* First try to find the last instruction that stores to this slot. */
    295     if (bcmode_a(op) == BCMbase && bc_a(ins) <= slot) {
    296       return 0;  /* Multiple results, e.g. from a CALL or KNIL. */
    297     } else if (bcmode_a(op) == BCMdst && bc_a(ins) == slot) {
    298       if (op == BC_KSHORT || op == BC_KNUM) {  /* Found const. initializer. */
    299 	/* Now try to verify there's no forward jump across it. */
    300 	const BCIns *kpc = pc;
    301 	for (; pc > startpc; pc--)
    302 	  if (bc_op(*pc) == BC_JMP) {
    303 	    const BCIns *target = pc+bc_j(*pc)+1;
    304 	    if (target > kpc && target <= endpc)
    305 	      return 0;  /* Conditional assignment. */
    306 	  }
    307 	if (op == BC_KSHORT) {
    308 	  int32_t k = (int32_t)(int16_t)bc_d(ins);
    309 	  return t == IRT_INT ? lj_ir_kint(J, k) : lj_ir_knum(J, (lua_Number)k);
    310 	} else {
    311 	  cTValue *tv = proto_knumtv(J->pt, bc_d(ins));
    312 	  if (t == IRT_INT) {
    313 	    int32_t k = numberVint(tv);
    314 	    if (tvisint(tv) || numV(tv) == (lua_Number)k)  /* -0 is ok here. */
    315 	      return lj_ir_kint(J, k);
    316 	    return 0;  /* Type mismatch. */
    317 	  } else {
    318 	    return lj_ir_knum(J, numberVnum(tv));
    319 	  }
    320 	}
    321       }
    322       return 0;  /* Non-constant initializer. */
    323     }
    324   }
    325   return 0;  /* No assignment to this slot found? */
    326 }
    327 
    328 /* Load and optionally convert a FORI argument from a slot. */
    329 static TRef fori_load(jit_State *J, BCReg slot, IRType t, int mode)
    330 {
    331   int conv = (tvisint(&J->L->base[slot]) != (t==IRT_INT)) ? IRSLOAD_CONVERT : 0;
    332   return sloadt(J, (int32_t)slot,
    333 		t + (((mode & IRSLOAD_TYPECHECK) ||
    334 		      (conv && t == IRT_INT && !(mode >> 16))) ?
    335 		     IRT_GUARD : 0),
    336 		mode + conv);
    337 }
    338 
    339 /* Peek before FORI to find a const initializer. Otherwise load from slot. */
    340 static TRef fori_arg(jit_State *J, const BCIns *fori, BCReg slot,
    341 		     IRType t, int mode)
    342 {
    343   TRef tr = J->base[slot];
    344   if (!tr) {
    345     tr = find_kinit(J, fori, slot, t);
    346     if (!tr)
    347       tr = fori_load(J, slot, t, mode);
    348   }
    349   return tr;
    350 }
    351 
    352 /* Return the direction of the FOR loop iterator.
    353 ** It's important to exactly reproduce the semantics of the interpreter.
    354 */
    355 static int rec_for_direction(cTValue *o)
    356 {
    357   return (tvisint(o) ? intV(o) : (int32_t)o->u32.hi) >= 0;
    358 }
    359 
    360 /* Simulate the runtime behavior of the FOR loop iterator. */
    361 static LoopEvent rec_for_iter(IROp *op, cTValue *o, int isforl)
    362 {
    363   lua_Number stopv = numberVnum(&o[FORL_STOP]);
    364   lua_Number idxv = numberVnum(&o[FORL_IDX]);
    365   lua_Number stepv = numberVnum(&o[FORL_STEP]);
    366   if (isforl)
    367     idxv += stepv;
    368   if (rec_for_direction(&o[FORL_STEP])) {
    369     if (idxv <= stopv) {
    370       *op = IR_LE;
    371       return idxv + 2*stepv > stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER;
    372     }
    373     *op = IR_GT; return LOOPEV_LEAVE;
    374   } else {
    375     if (stopv <= idxv) {
    376       *op = IR_GE;
    377       return idxv + 2*stepv < stopv ? LOOPEV_ENTERLO : LOOPEV_ENTER;
    378     }
    379     *op = IR_LT; return LOOPEV_LEAVE;
    380   }
    381 }
    382 
    383 /* Record checks for FOR loop overflow and step direction. */
    384 static void rec_for_check(jit_State *J, IRType t, int dir,
    385 			  TRef stop, TRef step, int init)
    386 {
    387   if (!tref_isk(step)) {
    388     /* Non-constant step: need a guard for the direction. */
    389     TRef zero = (t == IRT_INT) ? lj_ir_kint(J, 0) : lj_ir_knum_zero(J);
    390     emitir(IRTG(dir ? IR_GE : IR_LT, t), step, zero);
    391     /* Add hoistable overflow checks for a narrowed FORL index. */
    392     if (init && t == IRT_INT) {
    393       if (tref_isk(stop)) {
    394 	/* Constant stop: optimize check away or to a range check for step. */
    395 	int32_t k = IR(tref_ref(stop))->i;
    396 	if (dir) {
    397 	  if (k > 0)
    398 	    emitir(IRTGI(IR_LE), step, lj_ir_kint(J, (int32_t)0x7fffffff-k));
    399 	} else {
    400 	  if (k < 0)
    401 	    emitir(IRTGI(IR_GE), step, lj_ir_kint(J, (int32_t)0x80000000-k));
    402 	}
    403       } else {
    404 	/* Stop+step variable: need full overflow check. */
    405 	TRef tr = emitir(IRTGI(IR_ADDOV), step, stop);
    406 	emitir(IRTI(IR_USE), tr, 0);  /* ADDOV is weak. Avoid dead result. */
    407       }
    408     }
    409   } else if (init && t == IRT_INT && !tref_isk(stop)) {
    410     /* Constant step: optimize overflow check to a range check for stop. */
    411     int32_t k = IR(tref_ref(step))->i;
    412     k = (int32_t)(dir ? 0x7fffffff : 0x80000000) - k;
    413     emitir(IRTGI(dir ? IR_LE : IR_GE), stop, lj_ir_kint(J, k));
    414   }
    415 }
    416 
    417 /* Record a FORL instruction. */
    418 static void rec_for_loop(jit_State *J, const BCIns *fori, ScEvEntry *scev,
    419 			 int init)
    420 {
    421   BCReg ra = bc_a(*fori);
    422   cTValue *tv = &J->L->base[ra];
    423   TRef idx = J->base[ra+FORL_IDX];
    424   IRType t = idx ? tref_type(idx) :
    425 	     (init || LJ_DUALNUM) ? lj_opt_narrow_forl(J, tv) : IRT_NUM;
    426   int mode = IRSLOAD_INHERIT +
    427     ((!LJ_DUALNUM || tvisint(tv) == (t == IRT_INT)) ? IRSLOAD_READONLY : 0);
    428   TRef stop = fori_arg(J, fori, ra+FORL_STOP, t, mode);
    429   TRef step = fori_arg(J, fori, ra+FORL_STEP, t, mode);
    430   int tc, dir = rec_for_direction(&tv[FORL_STEP]);
    431   lua_assert(bc_op(*fori) == BC_FORI || bc_op(*fori) == BC_JFORI);
    432   scev->t.irt = t;
    433   scev->dir = dir;
    434   scev->stop = tref_ref(stop);
    435   scev->step = tref_ref(step);
    436   rec_for_check(J, t, dir, stop, step, init);
    437   scev->start = tref_ref(find_kinit(J, fori, ra+FORL_IDX, IRT_INT));
    438   tc = (LJ_DUALNUM &&
    439 	!(scev->start && irref_isk(scev->stop) && irref_isk(scev->step) &&
    440 	  tvisint(&tv[FORL_IDX]) == (t == IRT_INT))) ?
    441 	IRSLOAD_TYPECHECK : 0;
    442   if (tc) {
    443     J->base[ra+FORL_STOP] = stop;
    444     J->base[ra+FORL_STEP] = step;
    445   }
    446   if (!idx)
    447     idx = fori_load(J, ra+FORL_IDX, t,
    448 		    IRSLOAD_INHERIT + tc + (J->scev.start << 16));
    449   if (!init)
    450     J->base[ra+FORL_IDX] = idx = emitir(IRT(IR_ADD, t), idx, step);
    451   J->base[ra+FORL_EXT] = idx;
    452   scev->idx = tref_ref(idx);
    453   setmref(scev->pc, fori);
    454   J->maxslot = ra+FORL_EXT+1;
    455 }
    456 
    457 /* Record FORL/JFORL or FORI/JFORI. */
    458 static LoopEvent rec_for(jit_State *J, const BCIns *fori, int isforl)
    459 {
    460   BCReg ra = bc_a(*fori);
    461   TValue *tv = &J->L->base[ra];
    462   TRef *tr = &J->base[ra];
    463   IROp op;
    464   LoopEvent ev;
    465   TRef stop;
    466   IRType t;
    467   if (isforl) {  /* Handle FORL/JFORL opcodes. */
    468     TRef idx = tr[FORL_IDX];
    469     if (mref(J->scev.pc, const BCIns) == fori && tref_ref(idx) == J->scev.idx) {
    470       t = J->scev.t.irt;
    471       stop = J->scev.stop;
    472       idx = emitir(IRT(IR_ADD, t), idx, J->scev.step);
    473       tr[FORL_EXT] = tr[FORL_IDX] = idx;
    474     } else {
    475       ScEvEntry scev;
    476       rec_for_loop(J, fori, &scev, 0);
    477       t = scev.t.irt;
    478       stop = scev.stop;
    479     }
    480   } else {  /* Handle FORI/JFORI opcodes. */
    481     BCReg i;
    482     lj_meta_for(J->L, tv);
    483     t = (LJ_DUALNUM || tref_isint(tr[FORL_IDX])) ? lj_opt_narrow_forl(J, tv) :
    484 						   IRT_NUM;
    485     for (i = FORL_IDX; i <= FORL_STEP; i++) {
    486       if (!tr[i]) sload(J, ra+i);
    487       lua_assert(tref_isnumber_str(tr[i]));
    488       if (tref_isstr(tr[i]))
    489 	tr[i] = emitir(IRTG(IR_STRTO, IRT_NUM), tr[i], 0);
    490       if (t == IRT_INT) {
    491 	if (!tref_isinteger(tr[i]))
    492 	  tr[i] = emitir(IRTGI(IR_CONV), tr[i], IRCONV_INT_NUM|IRCONV_CHECK);
    493       } else {
    494 	if (!tref_isnum(tr[i]))
    495 	  tr[i] = emitir(IRTN(IR_CONV), tr[i], IRCONV_NUM_INT);
    496       }
    497     }
    498     tr[FORL_EXT] = tr[FORL_IDX];
    499     stop = tr[FORL_STOP];
    500     rec_for_check(J, t, rec_for_direction(&tv[FORL_STEP]),
    501 		  stop, tr[FORL_STEP], 1);
    502   }
    503 
    504   ev = rec_for_iter(&op, tv, isforl);
    505   if (ev == LOOPEV_LEAVE) {
    506     J->maxslot = ra+FORL_EXT+1;
    507     J->pc = fori+1;
    508   } else {
    509     J->maxslot = ra;
    510     J->pc = fori+bc_j(*fori)+1;
    511   }
    512   lj_snap_add(J);
    513 
    514   emitir(IRTG(op, t), tr[FORL_IDX], stop);
    515 
    516   if (ev == LOOPEV_LEAVE) {
    517     J->maxslot = ra;
    518     J->pc = fori+bc_j(*fori)+1;
    519   } else {
    520     J->maxslot = ra+FORL_EXT+1;
    521     J->pc = fori+1;
    522   }
    523   J->needsnap = 1;
    524   return ev;
    525 }
    526 
    527 /* Record ITERL/JITERL. */
    528 static LoopEvent rec_iterl(jit_State *J, const BCIns iterins)
    529 {
    530   BCReg ra = bc_a(iterins);
    531   if (!tref_isnil(getslot(J, ra))) {  /* Looping back? */
    532     J->base[ra-1] = J->base[ra];  /* Copy result of ITERC to control var. */
    533     J->maxslot = ra-1+bc_b(J->pc[-1]);
    534     J->pc += bc_j(iterins)+1;
    535     return LOOPEV_ENTER;
    536   } else {
    537     J->maxslot = ra-3;
    538     J->pc++;
    539     return LOOPEV_LEAVE;
    540   }
    541 }
    542 
    543 /* Record LOOP/JLOOP. Now, that was easy. */
    544 static LoopEvent rec_loop(jit_State *J, BCReg ra)
    545 {
    546   if (ra < J->maxslot) J->maxslot = ra;
    547   J->pc++;
    548   return LOOPEV_ENTER;
    549 }
    550 
    551 /* Check if a loop repeatedly failed to trace because it didn't loop back. */
    552 static int innerloopleft(jit_State *J, const BCIns *pc)
    553 {
    554   ptrdiff_t i;
    555   for (i = 0; i < PENALTY_SLOTS; i++)
    556     if (mref(J->penalty[i].pc, const BCIns) == pc) {
    557       if ((J->penalty[i].reason == LJ_TRERR_LLEAVE ||
    558 	   J->penalty[i].reason == LJ_TRERR_LINNER) &&
    559 	  J->penalty[i].val >= 2*PENALTY_MIN)
    560 	return 1;
    561       break;
    562     }
    563   return 0;
    564 }
    565 
    566 /* Handle the case when an interpreted loop op is hit. */
    567 static void rec_loop_interp(jit_State *J, const BCIns *pc, LoopEvent ev)
    568 {
    569   if (J->parent == 0 && J->exitno == 0) {
    570     if (pc == J->startpc && J->framedepth + J->retdepth == 0) {
    571       /* Same loop? */
    572       if (ev == LOOPEV_LEAVE)  /* Must loop back to form a root trace. */
    573 	lj_trace_err(J, LJ_TRERR_LLEAVE);
    574       lj_record_stop(J, LJ_TRLINK_LOOP, J->cur.traceno);  /* Looping trace. */
    575     } else if (ev != LOOPEV_LEAVE) {  /* Entering inner loop? */
    576       /* It's usually better to abort here and wait until the inner loop
    577       ** is traced. But if the inner loop repeatedly didn't loop back,
    578       ** this indicates a low trip count. In this case try unrolling
    579       ** an inner loop even in a root trace. But it's better to be a bit
    580       ** more conservative here and only do it for very short loops.
    581       */
    582       if (bc_j(*pc) != -1 && !innerloopleft(J, pc))
    583 	lj_trace_err(J, LJ_TRERR_LINNER);  /* Root trace hit an inner loop. */
    584       if ((ev != LOOPEV_ENTERLO &&
    585 	   J->loopref && J->cur.nins - J->loopref > 24) || --J->loopunroll < 0)
    586 	lj_trace_err(J, LJ_TRERR_LUNROLL);  /* Limit loop unrolling. */
    587       J->loopref = J->cur.nins;
    588     }
    589   } else if (ev != LOOPEV_LEAVE) {  /* Side trace enters an inner loop. */
    590     J->loopref = J->cur.nins;
    591     if (--J->loopunroll < 0)
    592       lj_trace_err(J, LJ_TRERR_LUNROLL);  /* Limit loop unrolling. */
    593   }  /* Side trace continues across a loop that's left or not entered. */
    594 }
    595 
    596 /* Handle the case when an already compiled loop op is hit. */
    597 static void rec_loop_jit(jit_State *J, TraceNo lnk, LoopEvent ev)
    598 {
    599   if (J->parent == 0 && J->exitno == 0) {  /* Root trace hit an inner loop. */
    600     /* Better let the inner loop spawn a side trace back here. */
    601     lj_trace_err(J, LJ_TRERR_LINNER);
    602   } else if (ev != LOOPEV_LEAVE) {  /* Side trace enters a compiled loop. */
    603     J->instunroll = 0;  /* Cannot continue across a compiled loop op. */
    604     if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
    605       lj_record_stop(J, LJ_TRLINK_LOOP, J->cur.traceno);  /* Form extra loop. */
    606     else
    607       lj_record_stop(J, LJ_TRLINK_ROOT, lnk);  /* Link to the loop. */
    608   }  /* Side trace continues across a loop that's left or not entered. */
    609 }
    610 
    611 /* -- Record profiler hook checks ----------------------------------------- */
    612 
    613 #if LJ_HASPROFILE
    614 
    615 /* Need to insert profiler hook check? */
    616 static int rec_profile_need(jit_State *J, GCproto *pt, const BCIns *pc)
    617 {
    618   GCproto *ppt;
    619   lua_assert(J->prof_mode == 'f' || J->prof_mode == 'l');
    620   if (!pt)
    621     return 0;
    622   ppt = J->prev_pt;
    623   J->prev_pt = pt;
    624   if (pt != ppt && ppt) {
    625     J->prev_line = -1;
    626     return 1;
    627   }
    628   if (J->prof_mode == 'l') {
    629     BCLine line = lj_debug_line(pt, proto_bcpos(pt, pc));
    630     BCLine pline = J->prev_line;
    631     J->prev_line = line;
    632     if (pline != line)
    633       return 1;
    634   }
    635   return 0;
    636 }
    637 
    638 static void rec_profile_ins(jit_State *J, const BCIns *pc)
    639 {
    640   if (J->prof_mode && rec_profile_need(J, J->pt, pc)) {
    641     emitir(IRTG(IR_PROF, IRT_NIL), 0, 0);
    642     lj_snap_add(J);
    643   }
    644 }
    645 
    646 static void rec_profile_ret(jit_State *J)
    647 {
    648   if (J->prof_mode == 'f') {
    649     emitir(IRTG(IR_PROF, IRT_NIL), 0, 0);
    650     J->prev_pt = NULL;
    651     lj_snap_add(J);
    652   }
    653 }
    654 
    655 #endif
    656 
    657 /* -- Record calls and returns -------------------------------------------- */
    658 
    659 /* Specialize to the runtime value of the called function or its prototype. */
    660 static TRef rec_call_specialize(jit_State *J, GCfunc *fn, TRef tr)
    661 {
    662   TRef kfunc;
    663   if (isluafunc(fn)) {
    664     GCproto *pt = funcproto(fn);
    665     /* Too many closures created? Probably not a monomorphic function. */
    666     if (pt->flags >= PROTO_CLC_POLY) {  /* Specialize to prototype instead. */
    667       TRef trpt = emitir(IRT(IR_FLOAD, IRT_PGC), tr, IRFL_FUNC_PC);
    668       emitir(IRTG(IR_EQ, IRT_PGC), trpt, lj_ir_kptr(J, proto_bc(pt)));
    669       (void)lj_ir_kgc(J, obj2gco(pt), IRT_PROTO);  /* Prevent GC of proto. */
    670       return tr;
    671     }
    672   } else {
    673     /* Don't specialize to non-monomorphic builtins. */
    674     switch (fn->c.ffid) {
    675     case FF_coroutine_wrap_aux:
    676     case FF_string_gmatch_aux:
    677       /* NYI: io_file_iter doesn't have an ffid, yet. */
    678       {  /* Specialize to the ffid. */
    679 	TRef trid = emitir(IRT(IR_FLOAD, IRT_U8), tr, IRFL_FUNC_FFID);
    680 	emitir(IRTG(IR_EQ, IRT_INT), trid, lj_ir_kint(J, fn->c.ffid));
    681       }
    682       return tr;
    683     default:
    684       /* NYI: don't specialize to non-monomorphic C functions. */
    685       break;
    686     }
    687   }
    688   /* Otherwise specialize to the function (closure) value itself. */
    689   kfunc = lj_ir_kfunc(J, fn);
    690   emitir(IRTG(IR_EQ, IRT_FUNC), tr, kfunc);
    691   return kfunc;
    692 }
    693 
    694 /* Record call setup. */
    695 static void rec_call_setup(jit_State *J, BCReg func, ptrdiff_t nargs)
    696 {
    697   RecordIndex ix;
    698   TValue *functv = &J->L->base[func];
    699   TRef kfunc, *fbase = &J->base[func];
    700   ptrdiff_t i;
    701   (void)getslot(J, func); /* Ensure func has a reference. */
    702   for (i = 1; i <= nargs; i++)
    703     (void)getslot(J, func+LJ_FR2+i);  /* Ensure all args have a reference. */
    704   if (!tref_isfunc(fbase[0])) {  /* Resolve __call metamethod. */
    705     ix.tab = fbase[0];
    706     copyTV(J->L, &ix.tabv, functv);
    707     if (!lj_record_mm_lookup(J, &ix, MM_call) || !tref_isfunc(ix.mobj))
    708       lj_trace_err(J, LJ_TRERR_NOMM);
    709     for (i = ++nargs; i > LJ_FR2; i--)  /* Shift arguments up. */
    710       fbase[i+LJ_FR2] = fbase[i+LJ_FR2-1];
    711 #if LJ_FR2
    712     fbase[2] = fbase[0];
    713 #endif
    714     fbase[0] = ix.mobj;  /* Replace function. */
    715     functv = &ix.mobjv;
    716   }
    717   kfunc = rec_call_specialize(J, funcV(functv), fbase[0]);
    718 #if LJ_FR2
    719   fbase[0] = kfunc;
    720   fbase[1] = TREF_FRAME;
    721 #else
    722   fbase[0] = kfunc | TREF_FRAME;
    723 #endif
    724   J->maxslot = (BCReg)nargs;
    725 }
    726 
    727 /* Record call. */
    728 void lj_record_call(jit_State *J, BCReg func, ptrdiff_t nargs)
    729 {
    730   rec_call_setup(J, func, nargs);
    731   /* Bump frame. */
    732   J->framedepth++;
    733   J->base += func+1+LJ_FR2;
    734   J->baseslot += func+1+LJ_FR2;
    735 }
    736 
    737 /* Record tail call. */
    738 void lj_record_tailcall(jit_State *J, BCReg func, ptrdiff_t nargs)
    739 {
    740   rec_call_setup(J, func, nargs);
    741   if (frame_isvarg(J->L->base - 1)) {
    742     BCReg cbase = (BCReg)frame_delta(J->L->base - 1);
    743     if (--J->framedepth < 0)
    744       lj_trace_err(J, LJ_TRERR_NYIRETL);
    745     J->baseslot -= (BCReg)cbase;
    746     J->base -= cbase;
    747     func += cbase;
    748   }
    749   /* Move func + args down. */
    750   if (LJ_FR2 && J->baseslot == 2)
    751     J->base[func+1] = 0;
    752   memmove(&J->base[-1-LJ_FR2], &J->base[func], sizeof(TRef)*(J->maxslot+1+LJ_FR2));
    753   /* Note: the new TREF_FRAME is now at J->base[-1] (even for slot #0). */
    754   /* Tailcalls can form a loop, so count towards the loop unroll limit. */
    755   if (++J->tailcalled > J->loopunroll)
    756     lj_trace_err(J, LJ_TRERR_LUNROLL);
    757 }
    758 
    759 /* Check unroll limits for down-recursion. */
    760 static int check_downrec_unroll(jit_State *J, GCproto *pt)
    761 {
    762   IRRef ptref;
    763   for (ptref = J->chain[IR_KGC]; ptref; ptref = IR(ptref)->prev)
    764     if (ir_kgc(IR(ptref)) == obj2gco(pt)) {
    765       int count = 0;
    766       IRRef ref;
    767       for (ref = J->chain[IR_RETF]; ref; ref = IR(ref)->prev)
    768 	if (IR(ref)->op1 == ptref)
    769 	  count++;
    770       if (count) {
    771 	if (J->pc == J->startpc) {
    772 	  if (count + J->tailcalled > J->param[JIT_P_recunroll])
    773 	    return 1;
    774 	} else {
    775 	  lj_trace_err(J, LJ_TRERR_DOWNREC);
    776 	}
    777       }
    778     }
    779   return 0;
    780 }
    781 
    782 static TRef rec_cat(jit_State *J, BCReg baseslot, BCReg topslot);
    783 
    784 /* Record return. */
    785 void lj_record_ret(jit_State *J, BCReg rbase, ptrdiff_t gotresults)
    786 {
    787   TValue *frame = J->L->base - 1;
    788   ptrdiff_t i;
    789   for (i = 0; i < gotresults; i++)
    790     (void)getslot(J, rbase+i);  /* Ensure all results have a reference. */
    791   while (frame_ispcall(frame)) {  /* Immediately resolve pcall() returns. */
    792     BCReg cbase = (BCReg)frame_delta(frame);
    793     if (--J->framedepth <= 0)
    794       lj_trace_err(J, LJ_TRERR_NYIRETL);
    795     lua_assert(J->baseslot > 1+LJ_FR2);
    796     gotresults++;
    797     rbase += cbase;
    798     J->baseslot -= (BCReg)cbase;
    799     J->base -= cbase;
    800     J->base[--rbase] = TREF_TRUE;  /* Prepend true to results. */
    801     frame = frame_prevd(frame);
    802   }
    803   /* Return to lower frame via interpreter for unhandled cases. */
    804   if (J->framedepth == 0 && J->pt && bc_isret(bc_op(*J->pc)) &&
    805        (!frame_islua(frame) ||
    806 	(J->parent == 0 && J->exitno == 0 &&
    807 	 !bc_isret(bc_op(J->cur.startins))))) {
    808     /* NYI: specialize to frame type and return directly, not via RET*. */
    809     for (i = 0; i < (ptrdiff_t)rbase; i++)
    810       J->base[i] = 0;  /* Purge dead slots. */
    811     J->maxslot = rbase + (BCReg)gotresults;
    812     lj_record_stop(J, LJ_TRLINK_RETURN, 0);  /* Return to interpreter. */
    813     return;
    814   }
    815   if (frame_isvarg(frame)) {
    816     BCReg cbase = (BCReg)frame_delta(frame);
    817     if (--J->framedepth < 0)  /* NYI: return of vararg func to lower frame. */
    818       lj_trace_err(J, LJ_TRERR_NYIRETL);
    819     lua_assert(J->baseslot > 1+LJ_FR2);
    820     rbase += cbase;
    821     J->baseslot -= (BCReg)cbase;
    822     J->base -= cbase;
    823     frame = frame_prevd(frame);
    824   }
    825   if (frame_islua(frame)) {  /* Return to Lua frame. */
    826     BCIns callins = *(frame_pc(frame)-1);
    827     ptrdiff_t nresults = bc_b(callins) ? (ptrdiff_t)bc_b(callins)-1 :gotresults;
    828     BCReg cbase = bc_a(callins);
    829     GCproto *pt = funcproto(frame_func(frame - (cbase+1+LJ_FR2)));
    830     if ((pt->flags & PROTO_NOJIT))
    831       lj_trace_err(J, LJ_TRERR_CJITOFF);
    832     if (J->framedepth == 0 && J->pt && frame == J->L->base - 1) {
    833       if (check_downrec_unroll(J, pt)) {
    834 	J->maxslot = (BCReg)(rbase + gotresults);
    835 	lj_snap_purge(J);
    836 	lj_record_stop(J, LJ_TRLINK_DOWNREC, J->cur.traceno);  /* Down-rec. */
    837 	return;
    838       }
    839       lj_snap_add(J);
    840     }
    841     for (i = 0; i < nresults; i++)  /* Adjust results. */
    842       J->base[i-1-LJ_FR2] = i < gotresults ? J->base[rbase+i] : TREF_NIL;
    843     J->maxslot = cbase+(BCReg)nresults;
    844     if (J->framedepth > 0) {  /* Return to a frame that is part of the trace. */
    845       J->framedepth--;
    846       lua_assert(J->baseslot > cbase+1+LJ_FR2);
    847       J->baseslot -= cbase+1+LJ_FR2;
    848       J->base -= cbase+1+LJ_FR2;
    849     } else if (J->parent == 0 && J->exitno == 0 &&
    850 	       !bc_isret(bc_op(J->cur.startins))) {
    851       /* Return to lower frame would leave the loop in a root trace. */
    852       lj_trace_err(J, LJ_TRERR_LLEAVE);
    853     } else if (J->needsnap) {  /* Tailcalled to ff with side-effects. */
    854       lj_trace_err(J, LJ_TRERR_NYIRETL);  /* No way to insert snapshot here. */
    855     } else {  /* Return to lower frame. Guard for the target we return to. */
    856       TRef trpt = lj_ir_kgc(J, obj2gco(pt), IRT_PROTO);
    857       TRef trpc = lj_ir_kptr(J, (void *)frame_pc(frame));
    858       emitir(IRTG(IR_RETF, IRT_PGC), trpt, trpc);
    859       J->retdepth++;
    860       J->needsnap = 1;
    861       lua_assert(J->baseslot == 1+LJ_FR2);
    862       /* Shift result slots up and clear the slots of the new frame below. */
    863       memmove(J->base + cbase, J->base-1-LJ_FR2, sizeof(TRef)*nresults);
    864       memset(J->base-1-LJ_FR2, 0, sizeof(TRef)*(cbase+1+LJ_FR2));
    865     }
    866   } else if (frame_iscont(frame)) {  /* Return to continuation frame. */
    867     ASMFunction cont = frame_contf(frame);
    868     BCReg cbase = (BCReg)frame_delta(frame);
    869     if ((J->framedepth -= 2) < 0)
    870       lj_trace_err(J, LJ_TRERR_NYIRETL);
    871     J->baseslot -= (BCReg)cbase;
    872     J->base -= cbase;
    873     J->maxslot = cbase-(2<<LJ_FR2);
    874     if (cont == lj_cont_ra) {
    875       /* Copy result to destination slot. */
    876       BCReg dst = bc_a(*(frame_contpc(frame)-1));
    877       J->base[dst] = gotresults ? J->base[cbase+rbase] : TREF_NIL;
    878       if (dst >= J->maxslot) {
    879 	J->maxslot = dst+1;
    880       }
    881     } else if (cont == lj_cont_nop) {
    882       /* Nothing to do here. */
    883     } else if (cont == lj_cont_cat) {
    884       BCReg bslot = bc_b(*(frame_contpc(frame)-1));
    885       TRef tr = gotresults ? J->base[cbase+rbase] : TREF_NIL;
    886       if (bslot != J->maxslot) {  /* Concatenate the remainder. */
    887 	TValue *b = J->L->base, save;  /* Simulate lower frame and result. */
    888 	J->base[J->maxslot] = tr;
    889 	copyTV(J->L, &save, b-(2<<LJ_FR2));
    890 	if (gotresults)
    891 	  copyTV(J->L, b-(2<<LJ_FR2), b+rbase);
    892 	else
    893 	  setnilV(b-(2<<LJ_FR2));
    894 	J->L->base = b - cbase;
    895 	tr = rec_cat(J, bslot, cbase-(2<<LJ_FR2));
    896 	b = J->L->base + cbase;  /* Undo. */
    897 	J->L->base = b;
    898 	copyTV(J->L, b-(2<<LJ_FR2), &save);
    899       }
    900       if (tr) {  /* Store final result. */
    901 	BCReg dst = bc_a(*(frame_contpc(frame)-1));
    902 	J->base[dst] = tr;
    903 	if (dst >= J->maxslot) {
    904 	  J->maxslot = dst+1;
    905 	}
    906       }  /* Otherwise continue with another __concat call. */
    907     } else {
    908       /* Result type already specialized. */
    909       lua_assert(cont == lj_cont_condf || cont == lj_cont_condt);
    910     }
    911   } else {
    912     lj_trace_err(J, LJ_TRERR_NYIRETL);  /* NYI: handle return to C frame. */
    913   }
    914   lua_assert(J->baseslot >= 1+LJ_FR2);
    915 }
    916 
    917 /* -- Metamethod handling ------------------------------------------------- */
    918 
    919 /* Prepare to record call to metamethod. */
    920 static BCReg rec_mm_prep(jit_State *J, ASMFunction cont)
    921 {
    922   BCReg s, top = cont == lj_cont_cat ? J->maxslot : curr_proto(J->L)->framesize;
    923 #if LJ_FR2
    924   J->base[top] = lj_ir_k64(J, IR_KNUM, u64ptr(contptr(cont)));
    925   J->base[top+1] = TREF_CONT;
    926 #else
    927   J->base[top] = lj_ir_kptr(J, contptr(cont)) | TREF_CONT;
    928 #endif
    929   J->framedepth++;
    930   for (s = J->maxslot; s < top; s++)
    931     J->base[s] = 0;  /* Clear frame gap to avoid resurrecting previous refs. */
    932   return top+1+LJ_FR2;
    933 }
    934 
    935 /* Record metamethod lookup. */
    936 int lj_record_mm_lookup(jit_State *J, RecordIndex *ix, MMS mm)
    937 {
    938   RecordIndex mix;
    939   GCtab *mt;
    940   if (tref_istab(ix->tab)) {
    941     mt = tabref(tabV(&ix->tabv)->metatable);
    942     mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META);
    943   } else if (tref_isudata(ix->tab)) {
    944     int udtype = udataV(&ix->tabv)->udtype;
    945     mt = tabref(udataV(&ix->tabv)->metatable);
    946     /* The metatables of special userdata objects are treated as immutable. */
    947     if (udtype != UDTYPE_USERDATA) {
    948       cTValue *mo;
    949       if (LJ_HASFFI && udtype == UDTYPE_FFI_CLIB) {
    950 	/* Specialize to the C library namespace object. */
    951 	emitir(IRTG(IR_EQ, IRT_PGC), ix->tab, lj_ir_kptr(J, udataV(&ix->tabv)));
    952       } else {
    953 	/* Specialize to the type of userdata. */
    954 	TRef tr = emitir(IRT(IR_FLOAD, IRT_U8), ix->tab, IRFL_UDATA_UDTYPE);
    955 	emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, udtype));
    956       }
    957   immutable_mt:
    958       mo = lj_tab_getstr(mt, mmname_str(J2G(J), mm));
    959       if (!mo || tvisnil(mo))
    960 	return 0;  /* No metamethod. */
    961       /* Treat metamethod or index table as immutable, too. */
    962       if (!(tvisfunc(mo) || tvistab(mo)))
    963 	lj_trace_err(J, LJ_TRERR_BADTYPE);
    964       copyTV(J->L, &ix->mobjv, mo);
    965       ix->mobj = lj_ir_kgc(J, gcV(mo), tvisfunc(mo) ? IRT_FUNC : IRT_TAB);
    966       ix->mtv = mt;
    967       ix->mt = TREF_NIL;  /* Dummy value for comparison semantics. */
    968       return 1;  /* Got metamethod or index table. */
    969     }
    970     mix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_UDATA_META);
    971   } else {
    972     /* Specialize to base metatable. Must flush mcode in lua_setmetatable(). */
    973     mt = tabref(basemt_obj(J2G(J), &ix->tabv));
    974     if (mt == NULL) {
    975       ix->mt = TREF_NIL;
    976       return 0;  /* No metamethod. */
    977     }
    978     /* The cdata metatable is treated as immutable. */
    979     if (LJ_HASFFI && tref_iscdata(ix->tab)) goto immutable_mt;
    980 #if LJ_GC64
    981     /* TODO: fix ARM32 asm_fload(), so we can use this for all archs. */
    982     ix->mt = mix.tab = lj_ir_ggfload(J, IRT_TAB,
    983       GG_OFS(g.gcroot[GCROOT_BASEMT+itypemap(&ix->tabv)]));
    984 #else
    985     ix->mt = mix.tab = lj_ir_ktab(J, mt);
    986 #endif
    987     goto nocheck;
    988   }
    989   ix->mt = mt ? mix.tab : TREF_NIL;
    990   emitir(IRTG(mt ? IR_NE : IR_EQ, IRT_TAB), mix.tab, lj_ir_knull(J, IRT_TAB));
    991 nocheck:
    992   if (mt) {
    993     GCstr *mmstr = mmname_str(J2G(J), mm);
    994     cTValue *mo = lj_tab_getstr(mt, mmstr);
    995     if (mo && !tvisnil(mo))
    996       copyTV(J->L, &ix->mobjv, mo);
    997     ix->mtv = mt;
    998     settabV(J->L, &mix.tabv, mt);
    999     setstrV(J->L, &mix.keyv, mmstr);
   1000     mix.key = lj_ir_kstr(J, mmstr);
   1001     mix.val = 0;
   1002     mix.idxchain = 0;
   1003     ix->mobj = lj_record_idx(J, &mix);
   1004     return !tref_isnil(ix->mobj);  /* 1 if metamethod found, 0 if not. */
   1005   }
   1006   return 0;  /* No metamethod. */
   1007 }
   1008 
   1009 /* Record call to arithmetic metamethod. */
   1010 static TRef rec_mm_arith(jit_State *J, RecordIndex *ix, MMS mm)
   1011 {
   1012   /* Set up metamethod call first to save ix->tab and ix->tabv. */
   1013   BCReg func = rec_mm_prep(J, mm == MM_concat ? lj_cont_cat : lj_cont_ra);
   1014   TRef *base = J->base + func;
   1015   TValue *basev = J->L->base + func;
   1016   base[1+LJ_FR2] = ix->tab; base[2+LJ_FR2] = ix->key;
   1017   copyTV(J->L, basev+1+LJ_FR2, &ix->tabv);
   1018   copyTV(J->L, basev+2+LJ_FR2, &ix->keyv);
   1019   if (!lj_record_mm_lookup(J, ix, mm)) {  /* Lookup mm on 1st operand. */
   1020     if (mm != MM_unm) {
   1021       ix->tab = ix->key;
   1022       copyTV(J->L, &ix->tabv, &ix->keyv);
   1023       if (lj_record_mm_lookup(J, ix, mm))  /* Lookup mm on 2nd operand. */
   1024 	goto ok;
   1025     }
   1026     lj_trace_err(J, LJ_TRERR_NOMM);
   1027   }
   1028 ok:
   1029   base[0] = ix->mobj;
   1030 #if LJ_FR2
   1031   base[1] = 0;
   1032 #endif
   1033   copyTV(J->L, basev+0, &ix->mobjv);
   1034   lj_record_call(J, func, 2);
   1035   return 0;  /* No result yet. */
   1036 }
   1037 
   1038 /* Record call to __len metamethod. */
   1039 static TRef rec_mm_len(jit_State *J, TRef tr, TValue *tv)
   1040 {
   1041   RecordIndex ix;
   1042   ix.tab = tr;
   1043   copyTV(J->L, &ix.tabv, tv);
   1044   if (lj_record_mm_lookup(J, &ix, MM_len)) {
   1045     BCReg func = rec_mm_prep(J, lj_cont_ra);
   1046     TRef *base = J->base + func;
   1047     TValue *basev = J->L->base + func;
   1048     base[0] = ix.mobj; copyTV(J->L, basev+0, &ix.mobjv);
   1049     base += LJ_FR2;
   1050     basev += LJ_FR2;
   1051     base[1] = tr; copyTV(J->L, basev+1, tv);
   1052 #if LJ_51
   1053     base[2] = TREF_NIL; setnilV(basev+2);
   1054 #else
   1055     base[2] = tr; copyTV(J->L, basev+2, tv);
   1056 #endif
   1057     lj_record_call(J, func, 2);
   1058   } else {
   1059     if (!LJ_51 && tref_istab(tr))
   1060       return lj_ir_call(J, IRCALL_lj_tab_len, tr);
   1061     lj_trace_err(J, LJ_TRERR_NOMM);
   1062   }
   1063   return 0;  /* No result yet. */
   1064 }
   1065 
   1066 /* Call a comparison metamethod. */
   1067 static void rec_mm_callcomp(jit_State *J, RecordIndex *ix, int op)
   1068 {
   1069   BCReg func = rec_mm_prep(J, (op&1) ? lj_cont_condf : lj_cont_condt);
   1070   TRef *base = J->base + func + LJ_FR2;
   1071   TValue *tv = J->L->base + func + LJ_FR2;
   1072   base[-LJ_FR2] = ix->mobj; base[1] = ix->val; base[2] = ix->key;
   1073   copyTV(J->L, tv-LJ_FR2, &ix->mobjv);
   1074   copyTV(J->L, tv+1, &ix->valv);
   1075   copyTV(J->L, tv+2, &ix->keyv);
   1076   lj_record_call(J, func, 2);
   1077 }
   1078 
   1079 /* Record call to equality comparison metamethod (for tab and udata only). */
   1080 static void rec_mm_equal(jit_State *J, RecordIndex *ix, int op)
   1081 {
   1082   ix->tab = ix->val;
   1083   copyTV(J->L, &ix->tabv, &ix->valv);
   1084   if (lj_record_mm_lookup(J, ix, MM_eq)) {  /* Lookup mm on 1st operand. */
   1085     cTValue *bv;
   1086     TRef mo1 = ix->mobj;
   1087     TValue mo1v;
   1088     copyTV(J->L, &mo1v, &ix->mobjv);
   1089     /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
   1090     bv = &ix->keyv;
   1091     if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) {
   1092       TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META);
   1093       emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
   1094     } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) {
   1095       TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META);
   1096       emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
   1097     } else {  /* Lookup metamethod on 2nd operand and compare both. */
   1098       ix->tab = ix->key;
   1099       copyTV(J->L, &ix->tabv, bv);
   1100       if (!lj_record_mm_lookup(J, ix, MM_eq) ||
   1101 	  lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv))
   1102 	return;
   1103     }
   1104     rec_mm_callcomp(J, ix, op);
   1105   }
   1106 }
   1107 
   1108 /* Record call to ordered comparison metamethods (for arbitrary objects). */
   1109 static void rec_mm_comp(jit_State *J, RecordIndex *ix, int op)
   1110 {
   1111   ix->tab = ix->val;
   1112   copyTV(J->L, &ix->tabv, &ix->valv);
   1113   while (1) {
   1114     MMS mm = (op & 2) ? MM_le : MM_lt;  /* Try __le + __lt or only __lt. */
   1115 #if LJ_51
   1116     if (lj_record_mm_lookup(J, ix, mm)) {  /* Lookup mm on 1st operand. */
   1117       cTValue *bv;
   1118       TRef mo1 = ix->mobj;
   1119       TValue mo1v;
   1120       copyTV(J->L, &mo1v, &ix->mobjv);
   1121       /* Avoid the 2nd lookup and the objcmp if the metatables are equal. */
   1122       bv = &ix->keyv;
   1123       if (tvistab(bv) && tabref(tabV(bv)->metatable) == ix->mtv) {
   1124         TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_TAB_META);
   1125         emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
   1126       } else if (tvisudata(bv) && tabref(udataV(bv)->metatable) == ix->mtv) {
   1127         TRef mt2 = emitir(IRT(IR_FLOAD, IRT_TAB), ix->key, IRFL_UDATA_META);
   1128         emitir(IRTG(IR_EQ, IRT_TAB), mt2, ix->mt);
   1129       } else {  /* Lookup metamethod on 2nd operand and compare both. */
   1130         ix->tab = ix->key;
   1131         copyTV(J->L, &ix->tabv, bv);
   1132         if (!lj_record_mm_lookup(J, ix, mm) ||
   1133             lj_record_objcmp(J, mo1, ix->mobj, &mo1v, &ix->mobjv))
   1134           goto nomatch;
   1135       }
   1136       rec_mm_callcomp(J, ix, op);
   1137       return;
   1138     }
   1139 #else
   1140     if (!lj_record_mm_lookup(J, ix, mm)) {  /* Lookup mm on 1st operand. */
   1141       ix->tab = ix->key;
   1142       copyTV(J->L, &ix->tabv, &ix->keyv);
   1143       if (!lj_record_mm_lookup(J, ix, mm))  /* Lookup mm on 2nd operand. */
   1144 	goto nomatch;
   1145     }
   1146     rec_mm_callcomp(J, ix, op);
   1147     return;
   1148 #endif
   1149   nomatch:
   1150     /* Lookup failed. Retry with  __lt and swapped operands. */
   1151     if (!(op & 2)) break;  /* Already at __lt. Interpreter will throw. */
   1152     ix->tab = ix->key; ix->key = ix->val; ix->val = ix->tab;
   1153     copyTV(J->L, &ix->tabv, &ix->keyv);
   1154     copyTV(J->L, &ix->keyv, &ix->valv);
   1155     copyTV(J->L, &ix->valv, &ix->tabv);
   1156     op ^= 3;
   1157   }
   1158 }
   1159 
   1160 #if LJ_HASFFI
   1161 /* Setup call to cdata comparison metamethod. */
   1162 static void rec_mm_comp_cdata(jit_State *J, RecordIndex *ix, int op, MMS mm)
   1163 {
   1164   lj_snap_add(J);
   1165   if (tref_iscdata(ix->val)) {
   1166     ix->tab = ix->val;
   1167     copyTV(J->L, &ix->tabv, &ix->valv);
   1168   } else {
   1169     lua_assert(tref_iscdata(ix->key));
   1170     ix->tab = ix->key;
   1171     copyTV(J->L, &ix->tabv, &ix->keyv);
   1172   }
   1173   lj_record_mm_lookup(J, ix, mm);
   1174   rec_mm_callcomp(J, ix, op);
   1175 }
   1176 #endif
   1177 
   1178 /* -- Indexed access ------------------------------------------------------ */
   1179 
   1180 #ifdef LUAJIT_ENABLE_TABLE_BUMP
   1181 /* Bump table allocations in bytecode when they grow during recording. */
   1182 static void rec_idx_bump(jit_State *J, RecordIndex *ix)
   1183 {
   1184   RBCHashEntry *rbc = &J->rbchash[(ix->tab & (RBCHASH_SLOTS-1))];
   1185   if (tref_ref(ix->tab) == rbc->ref) {
   1186     const BCIns *pc = mref(rbc->pc, const BCIns);
   1187     GCtab *tb = tabV(&ix->tabv);
   1188     uint32_t nhbits;
   1189     IRIns *ir;
   1190     if (!tvisnil(&ix->keyv))
   1191       (void)lj_tab_set(J->L, tb, &ix->keyv);  /* Grow table right now. */
   1192     nhbits = tb->hmask > 0 ? lj_fls(tb->hmask)+1 : 0;
   1193     ir = IR(tref_ref(ix->tab));
   1194     if (ir->o == IR_TNEW) {
   1195       uint32_t ah = bc_d(*pc);
   1196       uint32_t asize = ah & 0x7ff, hbits = ah >> 11;
   1197       if (nhbits > hbits) hbits = nhbits;
   1198       if (tb->asize > asize) {
   1199 	asize = tb->asize <= 0x7ff ? tb->asize : 0x7ff;
   1200       }
   1201       if ((asize | (hbits<<11)) != ah) {  /* Has the size changed? */
   1202 	/* Patch bytecode, but continue recording (for more patching). */
   1203 	setbc_d(pc, (asize | (hbits<<11)));
   1204 	/* Patching TNEW operands is only safe if the trace is aborted. */
   1205 	ir->op1 = asize; ir->op2 = hbits;
   1206 	J->retryrec = 1;  /* Abort the trace at the end of recording. */
   1207       }
   1208     } else if (ir->o == IR_TDUP) {
   1209       GCtab *tpl = gco2tab(proto_kgc(&gcref(rbc->pt)->pt, ~(ptrdiff_t)bc_d(*pc)));
   1210       /* Grow template table, but preserve keys with nil values. */
   1211       if ((tb->asize > tpl->asize && (1u << nhbits)-1 == tpl->hmask) ||
   1212 	  (tb->asize == tpl->asize && (1u << nhbits)-1 > tpl->hmask)) {
   1213 	Node *node = noderef(tpl->node);
   1214 	uint32_t i, hmask = tpl->hmask, asize;
   1215 	TValue *array;
   1216 	for (i = 0; i <= hmask; i++) {
   1217 	  if (!tvisnil(&node[i].key) && tvisnil(&node[i].val))
   1218 	    settabV(J->L, &node[i].val, tpl);
   1219 	}
   1220 	if (!tvisnil(&ix->keyv) && tref_isk(ix->key)) {
   1221 	  TValue *o = lj_tab_set(J->L, tpl, &ix->keyv);
   1222 	  if (tvisnil(o)) settabV(J->L, o, tpl);
   1223 	}
   1224 	lj_tab_resize(J->L, tpl, tb->asize, nhbits);
   1225 	node = noderef(tpl->node);
   1226 	hmask = tpl->hmask;
   1227 	for (i = 0; i <= hmask; i++) {
   1228 	  /* This is safe, since template tables only hold immutable values. */
   1229 	  if (tvistab(&node[i].val))
   1230 	    setnilV(&node[i].val);
   1231 	}
   1232 	/* The shape of the table may have changed. Clean up array part, too. */
   1233 	asize = tpl->asize;
   1234 	array = tvref(tpl->array);
   1235 	for (i = 0; i < asize; i++) {
   1236 	  if (tvistab(&array[i]))
   1237 	    setnilV(&array[i]);
   1238 	}
   1239 	J->retryrec = 1;  /* Abort the trace at the end of recording. */
   1240       }
   1241     }
   1242   }
   1243 }
   1244 #endif
   1245 
   1246 /* Record bounds-check. */
   1247 static void rec_idx_abc(jit_State *J, TRef asizeref, TRef ikey, uint32_t asize)
   1248 {
   1249   /* Try to emit invariant bounds checks. */
   1250   if ((J->flags & (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) ==
   1251       (JIT_F_OPT_LOOP|JIT_F_OPT_ABC)) {
   1252     IRRef ref = tref_ref(ikey);
   1253     IRIns *ir = IR(ref);
   1254     int32_t ofs = 0;
   1255     IRRef ofsref = 0;
   1256     /* Handle constant offsets. */
   1257     if (ir->o == IR_ADD && irref_isk(ir->op2)) {
   1258       ofsref = ir->op2;
   1259       ofs = IR(ofsref)->i;
   1260       ref = ir->op1;
   1261       ir = IR(ref);
   1262     }
   1263     /* Got scalar evolution analysis results for this reference? */
   1264     if (ref == J->scev.idx) {
   1265       int32_t stop;
   1266       lua_assert(irt_isint(J->scev.t) && ir->o == IR_SLOAD);
   1267       stop = numberVint(&(J->L->base - J->baseslot)[ir->op1 + FORL_STOP]);
   1268       /* Runtime value for stop of loop is within bounds? */
   1269       if ((uint64_t)stop + ofs < (uint64_t)asize) {
   1270 	/* Emit invariant bounds check for stop. */
   1271 	emitir(IRTG(IR_ABC, IRT_P32), asizeref, ofs == 0 ? J->scev.stop :
   1272 	       emitir(IRTI(IR_ADD), J->scev.stop, ofsref));
   1273 	/* Emit invariant bounds check for start, if not const or negative. */
   1274 	if (!(J->scev.dir && J->scev.start &&
   1275 	      (int64_t)IR(J->scev.start)->i + ofs >= 0))
   1276 	  emitir(IRTG(IR_ABC, IRT_P32), asizeref, ikey);
   1277 	return;
   1278       }
   1279     }
   1280   }
   1281   emitir(IRTGI(IR_ABC), asizeref, ikey);  /* Emit regular bounds check. */
   1282 }
   1283 
   1284 /* Record indexed key lookup. */
   1285 static TRef rec_idx_key(jit_State *J, RecordIndex *ix, IRRef *rbref,
   1286 			IRType1 *rbguard)
   1287 {
   1288   TRef key;
   1289   GCtab *t = tabV(&ix->tabv);
   1290   ix->oldv = lj_tab_get(J->L, t, &ix->keyv);  /* Lookup previous value. */
   1291   *rbref = 0;
   1292   rbguard->irt = 0;
   1293 
   1294   /* Integer keys are looked up in the array part first. */
   1295   key = ix->key;
   1296   if (tref_isnumber(key)) {
   1297     int32_t k = numberVint(&ix->keyv);
   1298     if (!tvisint(&ix->keyv) && numV(&ix->keyv) != (lua_Number)k)
   1299       k = LJ_MAX_ASIZE;
   1300     if ((MSize)k < LJ_MAX_ASIZE) {  /* Potential array key? */
   1301       TRef ikey = lj_opt_narrow_index(J, key);
   1302       TRef asizeref = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE);
   1303       if ((MSize)k < t->asize) {  /* Currently an array key? */
   1304 	TRef arrayref;
   1305 	rec_idx_abc(J, asizeref, ikey, t->asize);
   1306 	arrayref = emitir(IRT(IR_FLOAD, IRT_PGC), ix->tab, IRFL_TAB_ARRAY);
   1307 	return emitir(IRT(IR_AREF, IRT_PGC), arrayref, ikey);
   1308       } else {  /* Currently not in array (may be an array extension)? */
   1309 	emitir(IRTGI(IR_ULE), asizeref, ikey);  /* Inv. bounds check. */
   1310 	if (k == 0 && tref_isk(key))
   1311 	  key = lj_ir_knum_zero(J);  /* Canonicalize 0 or +-0.0 to +0.0. */
   1312 	/* And continue with the hash lookup. */
   1313       }
   1314     } else if (!tref_isk(key)) {
   1315       /* We can rule out const numbers which failed the integerness test
   1316       ** above. But all other numbers are potential array keys.
   1317       */
   1318       if (t->asize == 0) {  /* True sparse tables have an empty array part. */
   1319 	/* Guard that the array part stays empty. */
   1320 	TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_ASIZE);
   1321 	emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0));
   1322       } else {
   1323 	lj_trace_err(J, LJ_TRERR_NYITMIX);
   1324       }
   1325     }
   1326   }
   1327 
   1328   /* Otherwise the key is located in the hash part. */
   1329   if (t->hmask == 0) {  /* Shortcut for empty hash part. */
   1330     /* Guard that the hash part stays empty. */
   1331     TRef tmp = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK);
   1332     emitir(IRTGI(IR_EQ), tmp, lj_ir_kint(J, 0));
   1333     return lj_ir_kkptr(J, niltvg(J2G(J)));
   1334   }
   1335   if (tref_isinteger(key))  /* Hash keys are based on numbers, not ints. */
   1336     key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT);
   1337   if (tref_isk(key)) {
   1338     /* Optimize lookup of constant hash keys. */
   1339     MSize hslot = (MSize)((char *)ix->oldv - (char *)&noderef(t->node)[0].val);
   1340     if (t->hmask > 0 && hslot <= t->hmask*(MSize)sizeof(Node) &&
   1341 	hslot <= 65535*(MSize)sizeof(Node)) {
   1342       TRef node, kslot, hm;
   1343       *rbref = J->cur.nins;  /* Mark possible rollback point. */
   1344       *rbguard = J->guardemit;
   1345       hm = emitir(IRTI(IR_FLOAD), ix->tab, IRFL_TAB_HMASK);
   1346       emitir(IRTGI(IR_EQ), hm, lj_ir_kint(J, (int32_t)t->hmask));
   1347       node = emitir(IRT(IR_FLOAD, IRT_PGC), ix->tab, IRFL_TAB_NODE);
   1348       kslot = lj_ir_kslot(J, key, hslot / sizeof(Node));
   1349       return emitir(IRTG(IR_HREFK, IRT_PGC), node, kslot);
   1350     }
   1351   }
   1352   /* Fall back to a regular hash lookup. */
   1353   return emitir(IRT(IR_HREF, IRT_PGC), ix->tab, key);
   1354 }
   1355 
   1356 /* Determine whether a key is NOT one of the fast metamethod names. */
   1357 static int nommstr(jit_State *J, TRef key)
   1358 {
   1359   if (tref_isstr(key)) {
   1360     if (tref_isk(key)) {
   1361       GCstr *str = ir_kstr(IR(tref_ref(key)));
   1362       uint32_t mm;
   1363       for (mm = 0; mm <= MM_FAST; mm++)
   1364 	if (mmname_str(J2G(J), mm) == str)
   1365 	  return 0;  /* MUST be one the fast metamethod names. */
   1366     } else {
   1367       return 0;  /* Variable string key MAY be a metamethod name. */
   1368     }
   1369   }
   1370   return 1;  /* CANNOT be a metamethod name. */
   1371 }
   1372 
   1373 /* Record indexed load/store. */
   1374 TRef lj_record_idx(jit_State *J, RecordIndex *ix)
   1375 {
   1376   TRef xref;
   1377   IROp xrefop, loadop;
   1378   IRRef rbref;
   1379   IRType1 rbguard;
   1380   cTValue *oldv;
   1381 
   1382   /* Remember original table before overwriting it. */
   1383   ix->otab = ix->tab;
   1384   ix->otabv = ix->tabv;
   1385 
   1386   while (!tref_istab(ix->tab)) { /* Handle non-table lookup. */
   1387     /* Never call raw lj_record_idx() on non-table. */
   1388     lua_assert(ix->idxchain != 0);
   1389     if (!lj_record_mm_lookup(J, ix, ix->val ? MM_newindex : MM_index))
   1390       lj_trace_err(J, LJ_TRERR_NOMM);
   1391   handlemm:
   1392     if (tref_isfunc(ix->mobj)) {  /* Handle metamethod call. */
   1393       BCReg func = rec_mm_prep(J, ix->val ? lj_cont_nop : lj_cont_ra);
   1394       TRef *base = J->base + func + LJ_FR2;
   1395       TValue *tv = J->L->base + func + LJ_FR2;
   1396       base[-LJ_FR2] = ix->mobj; base[1] = ix->tab; base[2] = ix->key;
   1397       setfuncV(J->L, tv-LJ_FR2, funcV(&ix->mobjv));
   1398       copyTV(J->L, tv+1, &ix->tabv);
   1399       copyTV(J->L, tv+2, &ix->keyv);
   1400       if (ix->val) {
   1401 	base[3] = ix->val;
   1402 	copyTV(J->L, tv+3, &ix->valv);
   1403 	lj_record_call(J, func, 3);  /* mobj(tab, key, val) */
   1404 	return 0;
   1405       } else {
   1406         base[3] = ix->otab;
   1407         copyTV(J->L, tv+3, &ix->otabv);
   1408         lj_record_call(J, func, 3);  /* res = mobj(tab, key, origtab) */
   1409 	return 0;  /* No result yet. */
   1410       }
   1411     }
   1412     /* Otherwise retry lookup with metaobject. */
   1413     ix->tab = ix->mobj;
   1414     copyTV(J->L, &ix->tabv, &ix->mobjv);
   1415     if (--ix->idxchain == 0)
   1416       lj_trace_err(J, LJ_TRERR_IDXLOOP);
   1417   }
   1418 
   1419   /* First catch nil and NaN keys for tables. */
   1420   if (tvisnil(&ix->keyv) || (tvisnum(&ix->keyv) && tvisnan(&ix->keyv))) {
   1421     if (ix->val)  /* Better fail early. */
   1422       lj_trace_err(J, LJ_TRERR_STORENN);
   1423     if (tref_isk(ix->key)) {
   1424       if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_index))
   1425 	goto handlemm;
   1426       return TREF_NIL;
   1427     }
   1428   }
   1429 
   1430   /* Record the key lookup. */
   1431   xref = rec_idx_key(J, ix, &rbref, &rbguard);
   1432   xrefop = IR(tref_ref(xref))->o;
   1433   loadop = xrefop == IR_AREF ? IR_ALOAD : IR_HLOAD;
   1434   /* The lj_meta_tset() inconsistency is gone, but better play safe. */
   1435   oldv = xrefop == IR_KKPTR ? (cTValue *)ir_kptr(IR(tref_ref(xref))) : ix->oldv;
   1436 
   1437   if (ix->val == 0) {  /* Indexed load */
   1438     IRType t = itype2irt(oldv);
   1439     TRef res;
   1440     if (oldv == niltvg(J2G(J))) {
   1441       emitir(IRTG(IR_EQ, IRT_PGC), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
   1442       res = TREF_NIL;
   1443     } else {
   1444       res = emitir(IRTG(loadop, t), xref, 0);
   1445     }
   1446     if (tref_ref(res) < rbref) {  /* HREFK + load forwarded? */
   1447       lj_ir_rollback(J, rbref);  /* Rollback to eliminate hmask guard. */
   1448       J->guardemit = rbguard;
   1449     }
   1450     if (t == IRT_NIL && ix->idxchain && lj_record_mm_lookup(J, ix, MM_index))
   1451       goto handlemm;
   1452     if (irtype_ispri(t)) res = TREF_PRI(t);  /* Canonicalize primitives. */
   1453     return res;
   1454   } else {  /* Indexed store. */
   1455     GCtab *mt = tabref(tabV(&ix->tabv)->metatable);
   1456     int keybarrier = tref_isgcv(ix->key) && !tref_isnil(ix->val);
   1457     if (tref_ref(xref) < rbref) {  /* HREFK forwarded? */
   1458       lj_ir_rollback(J, rbref);  /* Rollback to eliminate hmask guard. */
   1459       J->guardemit = rbguard;
   1460     }
   1461     if (tvisnil(oldv)) {  /* Previous value was nil? */
   1462       /* Need to duplicate the hasmm check for the early guards. */
   1463       int hasmm = 0;
   1464       if (ix->idxchain && mt) {
   1465 	cTValue *mo = lj_tab_getstr(mt, mmname_str(J2G(J), MM_newindex));
   1466 	hasmm = mo && !tvisnil(mo);
   1467       }
   1468       if (hasmm)
   1469 	emitir(IRTG(loadop, IRT_NIL), xref, 0);  /* Guard for nil value. */
   1470       else if (xrefop == IR_HREF)
   1471 	emitir(IRTG(oldv == niltvg(J2G(J)) ? IR_EQ : IR_NE, IRT_PGC),
   1472 	       xref, lj_ir_kkptr(J, niltvg(J2G(J))));
   1473       if (ix->idxchain && lj_record_mm_lookup(J, ix, MM_newindex)) {
   1474 	lua_assert(hasmm);
   1475 	goto handlemm;
   1476       }
   1477       lua_assert(!hasmm);
   1478       if (oldv == niltvg(J2G(J))) {  /* Need to insert a new key. */
   1479 	TRef key = ix->key;
   1480 	if (tref_isinteger(key))  /* NEWREF needs a TValue as a key. */
   1481 	  key = emitir(IRTN(IR_CONV), key, IRCONV_NUM_INT);
   1482 	xref = emitir(IRT(IR_NEWREF, IRT_PGC), ix->tab, key);
   1483 	keybarrier = 0;  /* NEWREF already takes care of the key barrier. */
   1484 #ifdef LUAJIT_ENABLE_TABLE_BUMP
   1485 	if ((J->flags & JIT_F_OPT_SINK))  /* Avoid a separate flag. */
   1486 	  rec_idx_bump(J, ix);
   1487 #endif
   1488       }
   1489     } else if (!lj_opt_fwd_wasnonnil(J, loadop, tref_ref(xref))) {
   1490       /* Cannot derive that the previous value was non-nil, must do checks. */
   1491       if (xrefop == IR_HREF)  /* Guard against store to niltv. */
   1492 	emitir(IRTG(IR_NE, IRT_PGC), xref, lj_ir_kkptr(J, niltvg(J2G(J))));
   1493       if (ix->idxchain) {  /* Metamethod lookup required? */
   1494 	/* A check for NULL metatable is cheaper (hoistable) than a load. */
   1495 	if (!mt) {
   1496 	  TRef mtref = emitir(IRT(IR_FLOAD, IRT_TAB), ix->tab, IRFL_TAB_META);
   1497 	  emitir(IRTG(IR_EQ, IRT_TAB), mtref, lj_ir_knull(J, IRT_TAB));
   1498 	} else {
   1499 	  IRType t = itype2irt(oldv);
   1500 	  emitir(IRTG(loadop, t), xref, 0);  /* Guard for non-nil value. */
   1501 	}
   1502       }
   1503     } else {
   1504       keybarrier = 0;  /* Previous non-nil value kept the key alive. */
   1505     }
   1506     /* Convert int to number before storing. */
   1507     if (!LJ_DUALNUM && tref_isinteger(ix->val))
   1508       ix->val = emitir(IRTN(IR_CONV), ix->val, IRCONV_NUM_INT);
   1509     emitir(IRT(loadop+IRDELTA_L2S, tref_type(ix->val)), xref, ix->val);
   1510     if (keybarrier || tref_isgcv(ix->val))
   1511       emitir(IRT(IR_TBAR, IRT_NIL), ix->tab, 0);
   1512     /* Invalidate neg. metamethod cache for stores with certain string keys. */
   1513     if (!nommstr(J, ix->key)) {
   1514       TRef fref = emitir(IRT(IR_FREF, IRT_PGC), ix->tab, IRFL_TAB_NOMM);
   1515       emitir(IRT(IR_FSTORE, IRT_U8), fref, lj_ir_kint(J, 0));
   1516     }
   1517     J->needsnap = 1;
   1518     return 0;
   1519   }
   1520 }
   1521 
   1522 static void rec_tsetm(jit_State *J, BCReg ra, BCReg rn, int32_t i)
   1523 {
   1524   RecordIndex ix;
   1525   cTValue *basev = J->L->base;
   1526   GCtab *t = tabV(&basev[ra-1]);
   1527   settabV(J->L, &ix.tabv, t);
   1528   ix.tab = getslot(J, ra-1);
   1529   ix.idxchain = 0;
   1530 #ifdef LUAJIT_ENABLE_TABLE_BUMP
   1531   if ((J->flags & JIT_F_OPT_SINK)) {
   1532     if (t->asize < i+rn-ra)
   1533       lj_tab_reasize(J->L, t, i+rn-ra);
   1534     setnilV(&ix.keyv);
   1535     rec_idx_bump(J, &ix);
   1536   }
   1537 #endif
   1538   for (; ra < rn; i++, ra++) {
   1539     setintV(&ix.keyv, i);
   1540     ix.key = lj_ir_kint(J, i);
   1541     copyTV(J->L, &ix.valv, &basev[ra]);
   1542     ix.val = getslot(J, ra);
   1543     lj_record_idx(J, &ix);
   1544   }
   1545 }
   1546 
   1547 /* -- Upvalue access ------------------------------------------------------ */
   1548 
   1549 /* Check whether upvalue is immutable and ok to constify. */
   1550 static int rec_upvalue_constify(jit_State *J, GCupval *uvp)
   1551 {
   1552   if (uvp->flags & UV_IMMUTABLE) {
   1553     cTValue *o = uvval(uvp);
   1554     /* Don't constify objects that may retain large amounts of memory. */
   1555 #if LJ_HASFFI
   1556     if (tviscdata(o)) {
   1557       GCcdata *cd = cdataV(o);
   1558       if (!cdataisv(cd) && !(cd->marked & LJ_GC_FINALIZED)) {
   1559 	CType *ct = ctype_raw(ctype_ctsG(J2G(J)), cd->ctypeid);
   1560 	if (!ctype_hassize(ct->info) || ct->size <= 16)
   1561 	  return 1;
   1562       }
   1563       return 0;
   1564     }
   1565 #else
   1566     UNUSED(J);
   1567 #endif
   1568     if (!(tvistab(o) || tvisudata(o) || tvisthread(o)))
   1569       return 1;
   1570   }
   1571   return 0;
   1572 }
   1573 
   1574 /* Record upvalue load/store. */
   1575 static TRef rec_upvalue(jit_State *J, uint32_t uv, TRef val)
   1576 {
   1577   GCupval *uvp = &gcref(J->fn->l.uvptr[uv])->uv;
   1578   TRef fn = getcurrf(J);
   1579   IRRef uref;
   1580   int needbarrier = 0;
   1581   if (rec_upvalue_constify(J, uvp)) {  /* Try to constify immutable upvalue. */
   1582     TRef tr, kfunc;
   1583     lua_assert(val == 0);
   1584     if (!tref_isk(fn)) {  /* Late specialization of current function. */
   1585       if (J->pt->flags >= PROTO_CLC_POLY)
   1586 	goto noconstify;
   1587       kfunc = lj_ir_kfunc(J, J->fn);
   1588       emitir(IRTG(IR_EQ, IRT_FUNC), fn, kfunc);
   1589 #if LJ_FR2
   1590       J->base[-2] = kfunc;
   1591 #else
   1592       J->base[-1] = kfunc | TREF_FRAME;
   1593 #endif
   1594       fn = kfunc;
   1595     }
   1596     tr = lj_record_constify(J, uvval(uvp));
   1597     if (tr)
   1598       return tr;
   1599   }
   1600 noconstify:
   1601   /* Note: this effectively limits LJ_MAX_UPVAL to 127. */
   1602   uv = (uv << 8) | (hashrot(uvp->dhash, uvp->dhash + HASH_BIAS) & 0xff);
   1603   if (!uvp->closed) {
   1604     uref = tref_ref(emitir(IRTG(IR_UREFO, IRT_PGC), fn, uv));
   1605     /* In current stack? */
   1606     if (uvval(uvp) >= tvref(J->L->stack) &&
   1607 	uvval(uvp) < tvref(J->L->maxstack)) {
   1608       int32_t slot = (int32_t)(uvval(uvp) - (J->L->base - J->baseslot));
   1609       if (slot >= 0) {  /* Aliases an SSA slot? */
   1610 	emitir(IRTG(IR_EQ, IRT_PGC),
   1611 	       REF_BASE,
   1612 	       emitir(IRT(IR_ADD, IRT_PGC), uref,
   1613 		      lj_ir_kint(J, (slot - 1 - LJ_FR2) * -8)));
   1614 	slot -= (int32_t)J->baseslot;  /* Note: slot number may be negative! */
   1615 	if (val == 0) {
   1616 	  return getslot(J, slot);
   1617 	} else {
   1618 	  J->base[slot] = val;
   1619 	  if (slot >= (int32_t)J->maxslot) J->maxslot = (BCReg)(slot+1);
   1620 	  return 0;
   1621 	}
   1622       }
   1623     }
   1624     emitir(IRTG(IR_UGT, IRT_PGC),
   1625 	   emitir(IRT(IR_SUB, IRT_PGC), uref, REF_BASE),
   1626 	   lj_ir_kint(J, (J->baseslot + J->maxslot) * 8));
   1627   } else {
   1628     needbarrier = 1;
   1629     uref = tref_ref(emitir(IRTG(IR_UREFC, IRT_PGC), fn, uv));
   1630   }
   1631   if (val == 0) {  /* Upvalue load */
   1632     IRType t = itype2irt(uvval(uvp));
   1633     TRef res = emitir(IRTG(IR_ULOAD, t), uref, 0);
   1634     if (irtype_ispri(t)) res = TREF_PRI(t);  /* Canonicalize primitive refs. */
   1635     return res;
   1636   } else {  /* Upvalue store. */
   1637     /* Convert int to number before storing. */
   1638     if (!LJ_DUALNUM && tref_isinteger(val))
   1639       val = emitir(IRTN(IR_CONV), val, IRCONV_NUM_INT);
   1640     emitir(IRT(IR_USTORE, tref_type(val)), uref, val);
   1641     if (needbarrier && tref_isgcv(val))
   1642       emitir(IRT(IR_OBAR, IRT_NIL), uref, val);
   1643     J->needsnap = 1;
   1644     return 0;
   1645   }
   1646 }
   1647 
   1648 /* -- Record calls to Lua functions --------------------------------------- */
   1649 
   1650 /* Check unroll limits for calls. */
   1651 static void check_call_unroll(jit_State *J, TraceNo lnk)
   1652 {
   1653   cTValue *frame = J->L->base - 1;
   1654   void *pc = mref(frame_func(frame)->l.pc, void);
   1655   int32_t depth = J->framedepth;
   1656   int32_t count = 0;
   1657   if ((J->pt->flags & PROTO_VARARG)) depth--;  /* Vararg frame still missing. */
   1658   for (; depth > 0; depth--) {  /* Count frames with same prototype. */
   1659     if (frame_iscont(frame)) depth--;
   1660     frame = frame_prev(frame);
   1661     if (mref(frame_func(frame)->l.pc, void) == pc)
   1662       count++;
   1663   }
   1664   if (J->pc == J->startpc) {
   1665     if (count + J->tailcalled > J->param[JIT_P_recunroll]) {
   1666       J->pc++;
   1667       if (J->framedepth + J->retdepth == 0)
   1668 	lj_record_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno);  /* Tail-rec. */
   1669       else
   1670 	lj_record_stop(J, LJ_TRLINK_UPREC, J->cur.traceno);  /* Up-recursion. */
   1671     }
   1672   } else {
   1673     if (count > J->param[JIT_P_callunroll]) {
   1674       if (lnk) {  /* Possible tail- or up-recursion. */
   1675 	lj_trace_flush(J, lnk);  /* Flush trace that only returns. */
   1676 	/* Set a small, pseudo-random hotcount for a quick retry of JFUNC*. */
   1677 	hotcount_set(J2GG(J), J->pc+1, LJ_PRNG_BITS(J, 4));
   1678       }
   1679       lj_trace_err(J, LJ_TRERR_CUNROLL);
   1680     }
   1681   }
   1682 }
   1683 
   1684 /* Record Lua function setup. */
   1685 static void rec_func_setup(jit_State *J)
   1686 {
   1687   GCproto *pt = J->pt;
   1688   BCReg s, numparams = pt->numparams;
   1689   if ((pt->flags & PROTO_NOJIT))
   1690     lj_trace_err(J, LJ_TRERR_CJITOFF);
   1691   if (J->baseslot + pt->framesize >= LJ_MAX_JSLOTS)
   1692     lj_trace_err(J, LJ_TRERR_STACKOV);
   1693   /* Fill up missing parameters with nil. */
   1694   for (s = J->maxslot; s < numparams; s++)
   1695     J->base[s] = TREF_NIL;
   1696   /* The remaining slots should never be read before they are written. */
   1697   J->maxslot = numparams;
   1698 }
   1699 
   1700 /* Record Lua vararg function setup. */
   1701 static void rec_func_vararg(jit_State *J)
   1702 {
   1703   GCproto *pt = J->pt;
   1704   BCReg s, fixargs, vframe = J->maxslot+1+LJ_FR2;
   1705   lua_assert((pt->flags & PROTO_VARARG));
   1706   if (J->baseslot + vframe + pt->framesize >= LJ_MAX_JSLOTS)
   1707     lj_trace_err(J, LJ_TRERR_STACKOV);
   1708   J->base[vframe-1-LJ_FR2] = J->base[-1-LJ_FR2];  /* Copy function up. */
   1709 #if LJ_FR2
   1710   J->base[vframe-1] = TREF_FRAME;
   1711 #endif
   1712   /* Copy fixarg slots up and set their original slots to nil. */
   1713   fixargs = pt->numparams < J->maxslot ? pt->numparams : J->maxslot;
   1714   for (s = 0; s < fixargs; s++) {
   1715     J->base[vframe+s] = J->base[s];
   1716     J->base[s] = TREF_NIL;
   1717   }
   1718   J->maxslot = fixargs;
   1719   J->framedepth++;
   1720   J->base += vframe;
   1721   J->baseslot += vframe;
   1722 }
   1723 
   1724 /* Record entry to a Lua function. */
   1725 static void rec_func_lua(jit_State *J)
   1726 {
   1727   rec_func_setup(J);
   1728   check_call_unroll(J, 0);
   1729 }
   1730 
   1731 /* Record entry to an already compiled function. */
   1732 static void rec_func_jit(jit_State *J, TraceNo lnk)
   1733 {
   1734   GCtrace *T;
   1735   rec_func_setup(J);
   1736   T = traceref(J, lnk);
   1737   if (T->linktype == LJ_TRLINK_RETURN) {  /* Trace returns to interpreter? */
   1738     check_call_unroll(J, lnk);
   1739     /* Temporarily unpatch JFUNC* to continue recording across function. */
   1740     J->patchins = *J->pc;
   1741     J->patchpc = (BCIns *)J->pc;
   1742     *J->patchpc = T->startins;
   1743     return;
   1744   }
   1745   J->instunroll = 0;  /* Cannot continue across a compiled function. */
   1746   if (J->pc == J->startpc && J->framedepth + J->retdepth == 0)
   1747     lj_record_stop(J, LJ_TRLINK_TAILREC, J->cur.traceno);  /* Extra tail-rec. */
   1748   else
   1749     lj_record_stop(J, LJ_TRLINK_ROOT, lnk);  /* Link to the function. */
   1750 }
   1751 
   1752 /* -- Vararg handling ----------------------------------------------------- */
   1753 
   1754 /* Detect y = select(x, ...) idiom. */
   1755 static int select_detect(jit_State *J)
   1756 {
   1757   BCIns ins = J->pc[1];
   1758   if (bc_op(ins) == BC_CALLM && bc_b(ins) == 2 && bc_c(ins) == 1) {
   1759     cTValue *func = &J->L->base[bc_a(ins)];
   1760     if (tvisfunc(func) && funcV(func)->c.ffid == FF_select) {
   1761       TRef kfunc = lj_ir_kfunc(J, funcV(func));
   1762       emitir(IRTG(IR_EQ, IRT_FUNC), getslot(J, bc_a(ins)), kfunc);
   1763       return 1;
   1764     }
   1765   }
   1766   return 0;
   1767 }
   1768 
   1769 /* Record vararg instruction. */
   1770 static void rec_varg(jit_State *J, BCReg dst, ptrdiff_t nresults)
   1771 {
   1772   int32_t numparams = J->pt->numparams;
   1773   ptrdiff_t nvararg = frame_delta(J->L->base-1) - numparams - 1 - LJ_FR2;
   1774   lua_assert(frame_isvarg(J->L->base-1));
   1775   if (J->framedepth > 0) {  /* Simple case: varargs defined on-trace. */
   1776     ptrdiff_t i;
   1777     if (nvararg < 0) nvararg = 0;
   1778     if (nresults == -1) {
   1779       nresults = nvararg;
   1780       J->maxslot = dst + (BCReg)nvararg;
   1781     } else if (dst + nresults > J->maxslot) {
   1782       J->maxslot = dst + (BCReg)nresults;
   1783     }
   1784     for (i = 0; i < nresults; i++)
   1785       J->base[dst+i] = i < nvararg ? getslot(J, i - nvararg - 1 - LJ_FR2) : TREF_NIL;
   1786   } else {  /* Unknown number of varargs passed to trace. */
   1787     TRef fr = emitir(IRTI(IR_SLOAD), LJ_FR2, IRSLOAD_READONLY|IRSLOAD_FRAME);
   1788     int32_t frofs = 8*(1+LJ_FR2+numparams)+FRAME_VARG;
   1789     if (nresults >= 0) {  /* Known fixed number of results. */
   1790       ptrdiff_t i;
   1791       if (nvararg > 0) {
   1792 	ptrdiff_t nload = nvararg >= nresults ? nresults : nvararg;
   1793 	TRef vbase;
   1794 	if (nvararg >= nresults)
   1795 	  emitir(IRTGI(IR_GE), fr, lj_ir_kint(J, frofs+8*(int32_t)nresults));
   1796 	else
   1797 	  emitir(IRTGI(IR_EQ), fr,
   1798 		 lj_ir_kint(J, (int32_t)frame_ftsz(J->L->base-1)));
   1799 	vbase = emitir(IRT(IR_SUB, IRT_IGC), REF_BASE, fr);
   1800 	vbase = emitir(IRT(IR_ADD, IRT_PGC), vbase, lj_ir_kint(J, frofs-8));
   1801 	for (i = 0; i < nload; i++) {
   1802 	  IRType t = itype2irt(&J->L->base[i-1-LJ_FR2-nvararg]);
   1803 	  TRef aref = emitir(IRT(IR_AREF, IRT_PGC),
   1804 			     vbase, lj_ir_kint(J, (int32_t)i));
   1805 	  TRef tr = emitir(IRTG(IR_VLOAD, t), aref, 0);
   1806 	  if (irtype_ispri(t)) tr = TREF_PRI(t);  /* Canonicalize primitives. */
   1807 	  J->base[dst+i] = tr;
   1808 	}
   1809       } else {
   1810 	emitir(IRTGI(IR_LE), fr, lj_ir_kint(J, frofs));
   1811 	nvararg = 0;
   1812       }
   1813       for (i = nvararg; i < nresults; i++)
   1814 	J->base[dst+i] = TREF_NIL;
   1815       if (dst + (BCReg)nresults > J->maxslot)
   1816 	J->maxslot = dst + (BCReg)nresults;
   1817     } else if (select_detect(J)) {  /* y = select(x, ...) */
   1818       TRef tridx = J->base[dst-1];
   1819       TRef tr = TREF_NIL;
   1820       ptrdiff_t idx = lj_ffrecord_select_mode(J, tridx, &J->L->base[dst-1]);
   1821       if (idx < 0) goto nyivarg;
   1822       if (idx != 0 && !tref_isinteger(tridx))
   1823 	tridx = emitir(IRTGI(IR_CONV), tridx, IRCONV_INT_NUM|IRCONV_INDEX);
   1824       if (idx != 0 && tref_isk(tridx)) {
   1825 	emitir(IRTGI(idx <= nvararg ? IR_GE : IR_LT),
   1826 	       fr, lj_ir_kint(J, frofs+8*(int32_t)idx));
   1827 	frofs -= 8;  /* Bias for 1-based index. */
   1828       } else if (idx <= nvararg) {  /* Compute size. */
   1829 	TRef tmp = emitir(IRTI(IR_ADD), fr, lj_ir_kint(J, -frofs));
   1830 	if (numparams)
   1831 	  emitir(IRTGI(IR_GE), tmp, lj_ir_kint(J, 0));
   1832 	tr = emitir(IRTI(IR_BSHR), tmp, lj_ir_kint(J, 3));
   1833 	if (idx != 0) {
   1834 	  tridx = emitir(IRTI(IR_ADD), tridx, lj_ir_kint(J, -1));
   1835 	  rec_idx_abc(J, tr, tridx, (uint32_t)nvararg);
   1836 	}
   1837       } else {
   1838 	TRef tmp = lj_ir_kint(J, frofs);
   1839 	if (idx != 0) {
   1840 	  TRef tmp2 = emitir(IRTI(IR_BSHL), tridx, lj_ir_kint(J, 3));
   1841 	  tmp = emitir(IRTI(IR_ADD), tmp2, tmp);
   1842 	} else {
   1843 	  tr = lj_ir_kint(J, 0);
   1844 	}
   1845 	emitir(IRTGI(IR_LT), fr, tmp);
   1846       }
   1847       if (idx != 0 && idx <= nvararg) {
   1848 	IRType t;
   1849 	TRef aref, vbase = emitir(IRT(IR_SUB, IRT_IGC), REF_BASE, fr);
   1850 	vbase = emitir(IRT(IR_ADD, IRT_PGC), vbase,
   1851 		       lj_ir_kint(J, frofs-(8<<LJ_FR2)));
   1852 	t = itype2irt(&J->L->base[idx-2-LJ_FR2-nvararg]);
   1853 	aref = emitir(IRT(IR_AREF, IRT_PGC), vbase, tridx);
   1854 	tr = emitir(IRTG(IR_VLOAD, t), aref, 0);
   1855 	if (irtype_ispri(t)) tr = TREF_PRI(t);  /* Canonicalize primitives. */
   1856       }
   1857       J->base[dst-2-LJ_FR2] = tr;
   1858       J->maxslot = dst-1-LJ_FR2;
   1859       J->bcskip = 2;  /* Skip CALLM + select. */
   1860     } else {
   1861     nyivarg:
   1862       setintV(&J->errinfo, BC_VARG);
   1863       lj_trace_err_info(J, LJ_TRERR_NYIBC);
   1864     }
   1865   }
   1866 }
   1867 
   1868 /* -- Record allocations -------------------------------------------------- */
   1869 
   1870 static TRef rec_tnew(jit_State *J, uint32_t ah)
   1871 {
   1872   uint32_t asize = ah & 0x7ff;
   1873   uint32_t hbits = ah >> 11;
   1874   TRef tr;
   1875   if (asize == 0x7ff) asize = 0x801;
   1876   tr = emitir(IRTG(IR_TNEW, IRT_TAB), asize, hbits);
   1877 #ifdef LUAJIT_ENABLE_TABLE_BUMP
   1878   J->rbchash[(tr & (RBCHASH_SLOTS-1))].ref = tref_ref(tr);
   1879   setmref(J->rbchash[(tr & (RBCHASH_SLOTS-1))].pc, J->pc);
   1880   setgcref(J->rbchash[(tr & (RBCHASH_SLOTS-1))].pt, obj2gco(J->pt));
   1881 #endif
   1882   return tr;
   1883 }
   1884 
   1885 /* -- Concatenation ------------------------------------------------------- */
   1886 
   1887 static TRef rec_cat(jit_State *J, BCReg baseslot, BCReg topslot)
   1888 {
   1889   TRef *top = &J->base[topslot];
   1890   TValue savetv[5];
   1891   BCReg s;
   1892   RecordIndex ix;
   1893   lua_assert(baseslot < topslot);
   1894   for (s = baseslot; s <= topslot; s++)
   1895     (void)getslot(J, s);  /* Ensure all arguments have a reference. */
   1896   if (tref_isnumber_str(top[0]) && tref_isnumber_str(top[-1])) {
   1897     TRef tr, hdr, *trp, *xbase, *base = &J->base[baseslot];
   1898     /* First convert numbers to strings. */
   1899     for (trp = top; trp >= base; trp--) {
   1900       if (tref_isnumber(*trp))
   1901 	*trp = emitir(IRT(IR_TOSTR, IRT_STR), *trp,
   1902 		      tref_isnum(*trp) ? IRTOSTR_NUM : IRTOSTR_INT);
   1903       else if (!tref_isstr(*trp))
   1904 	break;
   1905     }
   1906     xbase = ++trp;
   1907     tr = hdr = emitir(IRT(IR_BUFHDR, IRT_PGC),
   1908 		      lj_ir_kptr(J, &J2G(J)->tmpbuf), IRBUFHDR_RESET);
   1909     do {
   1910       tr = emitir(IRT(IR_BUFPUT, IRT_PGC), tr, *trp++);
   1911     } while (trp <= top);
   1912     tr = emitir(IRT(IR_BUFSTR, IRT_STR), tr, hdr);
   1913     J->maxslot = (BCReg)(xbase - J->base);
   1914     if (xbase == base) return tr;  /* Return simple concatenation result. */
   1915     /* Pass partial result. */
   1916     topslot = J->maxslot--;
   1917     *xbase = tr;
   1918     top = xbase;
   1919     setstrV(J->L, &ix.keyv, &J2G(J)->strempty);  /* Simulate string result. */
   1920   } else {
   1921     J->maxslot = topslot-1;
   1922     copyTV(J->L, &ix.keyv, &J->L->base[topslot]);
   1923   }
   1924   copyTV(J->L, &ix.tabv, &J->L->base[topslot-1]);
   1925   ix.tab = top[-1];
   1926   ix.key = top[0];
   1927   memcpy(savetv, &J->L->base[topslot-1], sizeof(savetv));  /* Save slots. */
   1928   rec_mm_arith(J, &ix, MM_concat);  /* Call __concat metamethod. */
   1929   memcpy(&J->L->base[topslot-1], savetv, sizeof(savetv));  /* Restore slots. */
   1930   return 0;  /* No result yet. */
   1931 }
   1932 
   1933 /* -- Record bytecode ops ------------------------------------------------- */
   1934 
   1935 /* Prepare for comparison. */
   1936 static void rec_comp_prep(jit_State *J)
   1937 {
   1938   /* Prevent merging with snapshot #0 (GC exit) since we fixup the PC. */
   1939   if (J->cur.nsnap == 1 && J->cur.snap[0].ref == J->cur.nins)
   1940     emitir_raw(IRT(IR_NOP, IRT_NIL), 0, 0);
   1941   lj_snap_add(J);
   1942 }
   1943 
   1944 /* Fixup comparison. */
   1945 static void rec_comp_fixup(jit_State *J, const BCIns *pc, int cond)
   1946 {
   1947   BCIns jmpins = pc[1];
   1948   const BCIns *npc = pc + 2 + (cond ? bc_j(jmpins) : 0);
   1949   SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
   1950   /* Set PC to opposite target to avoid re-recording the comp. in side trace. */
   1951 #if LJ_FR2
   1952   SnapEntry *flink = &J->cur.snapmap[snap->mapofs + snap->nent];
   1953   uint64_t pcbase;
   1954   memcpy(&pcbase, flink, sizeof(uint64_t));
   1955   pcbase = (pcbase & 0xff) | (u64ptr(npc) << 8);
   1956   memcpy(flink, &pcbase, sizeof(uint64_t));
   1957 #else
   1958   J->cur.snapmap[snap->mapofs + snap->nent] = SNAP_MKPC(npc);
   1959 #endif
   1960   J->needsnap = 1;
   1961   if (bc_a(jmpins) < J->maxslot) J->maxslot = bc_a(jmpins);
   1962   lj_snap_shrink(J);  /* Shrink last snapshot if possible. */
   1963 }
   1964 
   1965 /* Record the next bytecode instruction (_before_ it's executed). */
   1966 void lj_record_ins(jit_State *J)
   1967 {
   1968   cTValue *lbase;
   1969   RecordIndex ix;
   1970   const BCIns *pc;
   1971   BCIns ins;
   1972   BCOp op;
   1973   TRef ra, rb, rc;
   1974 
   1975   /* Perform post-processing action before recording the next instruction. */
   1976   if (LJ_UNLIKELY(J->postproc != LJ_POST_NONE)) {
   1977     switch (J->postproc) {
   1978     case LJ_POST_FIXCOMP:  /* Fixup comparison. */
   1979       pc = (const BCIns *)(uintptr_t)J2G(J)->tmptv.u64;
   1980       rec_comp_fixup(J, pc, (!tvistruecond(&J2G(J)->tmptv2) ^ (bc_op(*pc)&1)));
   1981       /* fallthrough */
   1982     case LJ_POST_FIXGUARD:  /* Fixup and emit pending guard. */
   1983     case LJ_POST_FIXGUARDSNAP:  /* Fixup and emit pending guard and snapshot. */
   1984       if (!tvistruecond(&J2G(J)->tmptv2)) {
   1985 	J->fold.ins.o ^= 1;  /* Flip guard to opposite. */
   1986 	if (J->postproc == LJ_POST_FIXGUARDSNAP) {
   1987 	  SnapShot *snap = &J->cur.snap[J->cur.nsnap-1];
   1988 	  J->cur.snapmap[snap->mapofs+snap->nent-1]--;  /* False -> true. */
   1989 	}
   1990       }
   1991       lj_opt_fold(J);  /* Emit pending guard. */
   1992       /* fallthrough */
   1993     case LJ_POST_FIXBOOL:
   1994       if (!tvistruecond(&J2G(J)->tmptv2)) {
   1995 	BCReg s;
   1996 	TValue *tv = J->L->base;
   1997 	for (s = 0; s < J->maxslot; s++)  /* Fixup stack slot (if any). */
   1998 	  if (J->base[s] == TREF_TRUE && tvisfalse(&tv[s])) {
   1999 	    J->base[s] = TREF_FALSE;
   2000 	    break;
   2001 	  }
   2002       }
   2003       break;
   2004     case LJ_POST_FIXCONST:
   2005       {
   2006 	BCReg s;
   2007 	TValue *tv = J->L->base;
   2008 	for (s = 0; s < J->maxslot; s++)  /* Constify stack slots (if any). */
   2009 	  if (J->base[s] == TREF_NIL && !tvisnil(&tv[s]))
   2010 	    J->base[s] = lj_record_constify(J, &tv[s]);
   2011       }
   2012       break;
   2013     case LJ_POST_FFRETRY:  /* Suppress recording of retried fast function. */
   2014       if (bc_op(*J->pc) >= BC__MAX)
   2015 	return;
   2016       break;
   2017     default: lua_assert(0); break;
   2018     }
   2019     J->postproc = LJ_POST_NONE;
   2020   }
   2021 
   2022   /* Need snapshot before recording next bytecode (e.g. after a store). */
   2023   if (J->needsnap) {
   2024     J->needsnap = 0;
   2025     lj_snap_purge(J);
   2026     lj_snap_add(J);
   2027     J->mergesnap = 1;
   2028   }
   2029 
   2030   /* Skip some bytecodes. */
   2031   if (LJ_UNLIKELY(J->bcskip > 0)) {
   2032     J->bcskip--;
   2033     return;
   2034   }
   2035 
   2036   /* Record only closed loops for root traces. */
   2037   pc = J->pc;
   2038   if (J->framedepth == 0 &&
   2039      (MSize)((char *)pc - (char *)J->bc_min) >= J->bc_extent)
   2040     lj_trace_err(J, LJ_TRERR_LLEAVE);
   2041 
   2042 #ifdef LUA_USE_ASSERT
   2043   rec_check_slots(J);
   2044   rec_check_ir(J);
   2045 #endif
   2046 
   2047 #if LJ_HASPROFILE
   2048   rec_profile_ins(J, pc);
   2049 #endif
   2050 
   2051   /* Keep a copy of the runtime values of var/num/str operands. */
   2052 #define rav	(&ix.valv)
   2053 #define rbv	(&ix.tabv)
   2054 #define rcv	(&ix.keyv)
   2055 
   2056   lbase = J->L->base;
   2057   ins = *pc;
   2058   op = bc_op(ins);
   2059   ra = bc_a(ins);
   2060   ix.val = 0;
   2061   switch (bcmode_a(op)) {
   2062   case BCMvar:
   2063     copyTV(J->L, rav, &lbase[ra]); ix.val = ra = getslot(J, ra); break;
   2064   default: break;  /* Handled later. */
   2065   }
   2066   rb = bc_b(ins);
   2067   rc = bc_c(ins);
   2068   switch (bcmode_b(op)) {
   2069   case BCMnone: rb = 0; rc = bc_d(ins); break;  /* Upgrade rc to 'rd'. */
   2070   case BCMvar:
   2071     copyTV(J->L, rbv, &lbase[rb]); ix.tab = rb = getslot(J, rb); break;
   2072   default: break;  /* Handled later. */
   2073   }
   2074   switch (bcmode_c(op)) {
   2075   case BCMvar:
   2076     copyTV(J->L, rcv, &lbase[rc]); ix.key = rc = getslot(J, rc); break;
   2077   case BCMpri: setpriV(rcv, ~rc); ix.key = rc = TREF_PRI(IRT_NIL+rc); break;
   2078   case BCMnum: { cTValue *tv = proto_knumtv(J->pt, rc);
   2079     copyTV(J->L, rcv, tv); ix.key = rc = tvisint(tv) ? lj_ir_kint(J, intV(tv)) :
   2080     lj_ir_knumint(J, numV(tv)); } break;
   2081   case BCMstr: { GCstr *s = gco2str(proto_kgc(J->pt, ~(ptrdiff_t)rc));
   2082     setstrV(J->L, rcv, s); ix.key = rc = lj_ir_kstr(J, s); } break;
   2083   default: break;  /* Handled later. */
   2084   }
   2085 
   2086   switch (op) {
   2087 
   2088   /* -- Comparison ops ---------------------------------------------------- */
   2089 
   2090   case BC_ISLT: case BC_ISGE: case BC_ISLE: case BC_ISGT:
   2091 #if LJ_HASFFI
   2092     if (tref_iscdata(ra) || tref_iscdata(rc)) {
   2093       rec_mm_comp_cdata(J, &ix, op, ((int)op & 2) ? MM_le : MM_lt);
   2094       break;
   2095     }
   2096 #endif
   2097     /* Emit nothing for two numeric or string consts. */
   2098     if (!(tref_isk2(ra,rc) && tref_isnumber_str(ra) && tref_isnumber_str(rc))) {
   2099       IRType ta = tref_isinteger(ra) ? IRT_INT : tref_type(ra);
   2100       IRType tc = tref_isinteger(rc) ? IRT_INT : tref_type(rc);
   2101       int irop;
   2102       if (ta != tc) {
   2103 	/* Widen mixed number/int comparisons to number/number comparison. */
   2104 	if (ta == IRT_INT && tc == IRT_NUM) {
   2105 	  ra = emitir(IRTN(IR_CONV), ra, IRCONV_NUM_INT);
   2106 	  ta = IRT_NUM;
   2107 	} else if (ta == IRT_NUM && tc == IRT_INT) {
   2108 	  rc = emitir(IRTN(IR_CONV), rc, IRCONV_NUM_INT);
   2109         } else if (!LJ_51) {
   2110           ta = IRT_NIL;  /* Force metamethod for different types. */
   2111         } else if (!((ta == IRT_FALSE || ta == IRT_TRUE) &&
   2112                      (tc == IRT_FALSE || tc == IRT_TRUE))) {
   2113           break;  /* Interpreter will throw for two different types. */
   2114         }
   2115       }
   2116       rec_comp_prep(J);
   2117       irop = (int)op - (int)BC_ISLT + (int)IR_LT;
   2118       if (ta == IRT_NUM) {
   2119 	if ((irop & 1)) irop ^= 4;  /* ISGE/ISGT are unordered. */
   2120 	if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop))
   2121 	  irop ^= 5;
   2122       } else if (ta == IRT_INT) {
   2123 	if (!lj_ir_numcmp(numberVnum(rav), numberVnum(rcv), (IROp)irop))
   2124 	  irop ^= 1;
   2125       } else if (ta == IRT_STR) {
   2126 	if (!lj_ir_strcmp(strV(rav), strV(rcv), (IROp)irop)) irop ^= 1;
   2127 	ra = lj_ir_call(J, IRCALL_lj_str_cmp, ra, rc);
   2128 	rc = lj_ir_kint(J, 0);
   2129 	ta = IRT_INT;
   2130       } else {
   2131 	rec_mm_comp(J, &ix, (int)op);
   2132 	break;
   2133       }
   2134       emitir(IRTG(irop, ta), ra, rc);
   2135       rec_comp_fixup(J, J->pc, ((int)op ^ irop) & 1);
   2136     }
   2137     break;
   2138 
   2139   case BC_ISEQV: case BC_ISNEV:
   2140   case BC_ISEQS: case BC_ISNES:
   2141   case BC_ISEQN: case BC_ISNEN:
   2142   case BC_ISEQP: case BC_ISNEP:
   2143 #if LJ_HASFFI
   2144     if (tref_iscdata(ra) || tref_iscdata(rc)) {
   2145       rec_mm_comp_cdata(J, &ix, op, MM_eq);
   2146       break;
   2147     }
   2148 #endif
   2149     /* Emit nothing for two non-table, non-udata consts. */
   2150     if (!(tref_isk2(ra, rc) && !(tref_istab(ra) || tref_isudata(ra)))) {
   2151       int diff;
   2152       rec_comp_prep(J);
   2153       diff = lj_record_objcmp(J, ra, rc, rav, rcv);
   2154       if (diff == 2 || !(tref_istab(ra) || tref_isudata(ra)))
   2155 	rec_comp_fixup(J, J->pc, ((int)op & 1) == !diff);
   2156       else if (diff == 1)  /* Only check __eq if different, but same type. */
   2157 	rec_mm_equal(J, &ix, (int)op);
   2158     }
   2159     break;
   2160 
   2161   /* -- Unary test and copy ops ------------------------------------------- */
   2162 
   2163   case BC_ISTC: case BC_ISFC:
   2164     if ((op & 1) == tref_istruecond(rc))
   2165       rc = 0;  /* Don't store if condition is not true. */
   2166     /* fallthrough */
   2167   case BC_IST: case BC_ISF:  /* Type specialization suffices. */
   2168     if (bc_a(pc[1]) < J->maxslot)
   2169       J->maxslot = bc_a(pc[1]);  /* Shrink used slots. */
   2170     break;
   2171 
   2172   case BC_ISTYPE: case BC_ISNUM:
   2173     /* These coercions need to correspond with lj_meta_istype(). */
   2174     if (LJ_DUALNUM && rc == ~LJ_TNUMX+1)
   2175       ra = lj_opt_narrow_toint(J, ra);
   2176     else if (rc == ~LJ_TNUMX+2)
   2177       ra = lj_ir_tonum(J, ra);
   2178     else if (rc == ~LJ_TSTR+1)
   2179       ra = lj_ir_tostr(J, ra);
   2180     /* else: type specialization suffices. */
   2181     J->base[bc_a(ins)] = ra;
   2182     break;
   2183 
   2184   /* -- Unary ops --------------------------------------------------------- */
   2185 
   2186   case BC_NOT:
   2187     /* Type specialization already forces const result. */
   2188     rc = tref_istruecond(rc) ? TREF_FALSE : TREF_TRUE;
   2189     break;
   2190 
   2191   case BC_LEN:
   2192     if (tref_isstr(rc))
   2193       rc = emitir(IRTI(IR_FLOAD), rc, IRFL_STR_LEN);
   2194     else if (LJ_51 && tref_istab(rc))
   2195       rc = lj_ir_call(J, IRCALL_lj_tab_len, rc);
   2196     else
   2197       rc = rec_mm_len(J, rc, rcv);
   2198     break;
   2199 #if LJ_53
   2200   /* -- Bitwise ops ---------------------------------------------------- */
   2201   case BC_BNOT:
   2202     ix.tab = rc;
   2203     copyTV(J->L, rbv, rcv);
   2204   case BC_IDIV:
   2205   case BC_BAND:
   2206   case BC_BOR:
   2207   case BC_BXOR:
   2208   case BC_SHL:
   2209   case BC_SHR:
   2210   {
   2211     MMS opmm = bcmode_mm(op);
   2212     if (!(rc = ljx_rec_bitwise(J, rb, rc, rbv, rcv, opmm)))
   2213       rc = rec_mm_arith(J, &ix, bcmode_mm(op));
   2214     break;
   2215   }
   2216 #endif
   2217 
   2218   /* -- Arithmetic ops ---------------------------------------------------- */
   2219 
   2220   case BC_UNM:
   2221     if (tref_isnumber_str(rc)) {
   2222       rc = lj_opt_narrow_unm(J, rc, rcv);
   2223     } else {
   2224       ix.tab = rc;
   2225       copyTV(J->L, &ix.tabv, rcv);
   2226       rc = rec_mm_arith(J, &ix, MM_unm);
   2227     }
   2228     break;
   2229 
   2230   case BC_ADDNV: case BC_SUBNV: case BC_MULNV: case BC_DIVNV: case BC_MODNV:
   2231     /* Swap rb/rc and rbv/rcv. rav is temp. */
   2232     ix.tab = rc; ix.key = rc = rb; rb = ix.tab;
   2233     copyTV(J->L, rav, rbv);
   2234     copyTV(J->L, rbv, rcv);
   2235     copyTV(J->L, rcv, rav);
   2236     if (op == BC_MODNV)
   2237       goto recmod;
   2238     /* fallthrough */
   2239   case BC_ADDVN: case BC_SUBVN: case BC_MULVN: case BC_DIVVN:
   2240   case BC_ADDVV: case BC_SUBVV: case BC_MULVV: case BC_DIVVV: {
   2241     MMS mm = bcmode_mm(op);
   2242     if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
   2243       rc = lj_opt_narrow_arith(J, rb, rc, rbv, rcv,
   2244 			       (int)mm - (int)MM_add + (int)IR_ADD);
   2245     else
   2246       rc = rec_mm_arith(J, &ix, mm);
   2247     break;
   2248     }
   2249 
   2250   case BC_MODVN: case BC_MODVV:
   2251   recmod:
   2252     if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
   2253       rc = lj_opt_narrow_mod(J, rb, rc, rbv, rcv);
   2254     else
   2255       rc = rec_mm_arith(J, &ix, MM_mod);
   2256     break;
   2257 
   2258   case BC_POW:
   2259     if (tref_isnumber_str(rb) && tref_isnumber_str(rc))
   2260       rc = lj_opt_narrow_pow(J, rb, rc, rbv, rcv);
   2261     else
   2262       rc = rec_mm_arith(J, &ix, MM_pow);
   2263     break;
   2264 
   2265   /* -- Miscellaneous ops ------------------------------------------------- */
   2266 
   2267   case BC_CAT:
   2268     rc = rec_cat(J, rb, rc);
   2269     break;
   2270 
   2271   /* -- Constant and move ops --------------------------------------------- */
   2272 
   2273   case BC_MOV:
   2274     /* Clear gap of method call to avoid resurrecting previous refs. */
   2275     if (ra > J->maxslot) {
   2276 #if LJ_FR2
   2277       memset(J->base + J->maxslot, 0, (ra - J->maxslot) * sizeof(TRef));
   2278 #else
   2279       J->base[ra-1] = 0;
   2280 #endif
   2281     }
   2282     break;
   2283   case BC_KSTR: case BC_KNUM: case BC_KPRI:
   2284     break;
   2285   case BC_KSHORT:
   2286     rc = lj_ir_kint(J, (int32_t)(int16_t)rc);
   2287     break;
   2288   case BC_KNIL:
   2289     while (ra <= rc)
   2290       J->base[ra++] = TREF_NIL;
   2291     if (rc >= J->maxslot) J->maxslot = rc+1;
   2292     break;
   2293 #if LJ_HASFFI
   2294   case BC_KCDATA:
   2295     rc = lj_ir_kgc(J, proto_kgc(J->pt, ~(ptrdiff_t)rc), IRT_CDATA);
   2296     break;
   2297 #endif
   2298 
   2299   /* -- Upvalue and function ops ------------------------------------------ */
   2300 
   2301   case BC_UGET:
   2302     rc = rec_upvalue(J, rc, 0);
   2303     break;
   2304   case BC_USETV: case BC_USETS: case BC_USETN: case BC_USETP:
   2305     rec_upvalue(J, ra, rc);
   2306     break;
   2307 
   2308   /* -- Table ops --------------------------------------------------------- */
   2309 
   2310   case BC_GGET: case BC_GSET:
   2311     settabV(J->L, &ix.tabv, tabref(J->fn->l.env));
   2312     ix.tab = emitir(IRT(IR_FLOAD, IRT_TAB), getcurrf(J), IRFL_FUNC_ENV);
   2313     ix.idxchain = LJ_MAX_IDXCHAIN;
   2314     rc = lj_record_idx(J, &ix);
   2315     break;
   2316 
   2317   case BC_TGETB: case BC_TSETB:
   2318     setintV(&ix.keyv, (int32_t)rc);
   2319     ix.key = lj_ir_kint(J, (int32_t)rc);
   2320     /* fallthrough */
   2321   case BC_TGETV: case BC_TGETS: case BC_TSETV: case BC_TSETS:
   2322     ix.idxchain = LJ_MAX_IDXCHAIN;
   2323     rc = lj_record_idx(J, &ix);
   2324     break;
   2325   case BC_TGETR: case BC_TSETR:
   2326     ix.idxchain = 0;
   2327     rc = lj_record_idx(J, &ix);
   2328     break;
   2329 
   2330   case BC_TSETM:
   2331     rec_tsetm(J, ra, (BCReg)(J->L->top - J->L->base), (int32_t)rcv->u32.lo);
   2332     break;
   2333 
   2334   case BC_TNEW:
   2335     rc = rec_tnew(J, rc);
   2336     break;
   2337   case BC_TDUP:
   2338     rc = emitir(IRTG(IR_TDUP, IRT_TAB),
   2339 		lj_ir_ktab(J, gco2tab(proto_kgc(J->pt, ~(ptrdiff_t)rc))), 0);
   2340 #ifdef LUAJIT_ENABLE_TABLE_BUMP
   2341     J->rbchash[(rc & (RBCHASH_SLOTS-1))].ref = tref_ref(rc);
   2342     setmref(J->rbchash[(rc & (RBCHASH_SLOTS-1))].pc, pc);
   2343     setgcref(J->rbchash[(rc & (RBCHASH_SLOTS-1))].pt, obj2gco(J->pt));
   2344 #endif
   2345     break;
   2346 
   2347   /* -- Calls and vararg handling ----------------------------------------- */
   2348 
   2349   case BC_ITERC:
   2350     J->base[ra] = getslot(J, ra-3);
   2351     J->base[ra+1+LJ_FR2] = getslot(J, ra-2);
   2352     J->base[ra+2+LJ_FR2] = getslot(J, ra-1);
   2353     { /* Do the actual copy now because lj_record_call needs the values. */
   2354       TValue *b = &J->L->base[ra];
   2355       copyTV(J->L, b, b-3);
   2356       copyTV(J->L, b+1+LJ_FR2, b-2);
   2357       copyTV(J->L, b+2+LJ_FR2, b-1);
   2358     }
   2359     lj_record_call(J, ra, (ptrdiff_t)rc-1);
   2360     break;
   2361 
   2362   /* L->top is set to L->base+ra+rc+NARGS-1+1. See lj_dispatch_ins(). */
   2363   case BC_CALLM:
   2364     rc = (BCReg)(J->L->top - J->L->base) - ra - LJ_FR2;
   2365     /* fallthrough */
   2366   case BC_CALL:
   2367     lj_record_call(J, ra, (ptrdiff_t)rc-1);
   2368     break;
   2369 
   2370   case BC_CALLMT:
   2371     rc = (BCReg)(J->L->top - J->L->base) - ra - LJ_FR2;
   2372     /* fallthrough */
   2373   case BC_CALLT:
   2374     lj_record_tailcall(J, ra, (ptrdiff_t)rc-1);
   2375     break;
   2376 
   2377   case BC_VARG:
   2378     rec_varg(J, ra, (ptrdiff_t)rb-1);
   2379     break;
   2380 
   2381   /* -- Returns ----------------------------------------------------------- */
   2382 
   2383   case BC_RETM:
   2384     /* L->top is set to L->base+ra+rc+NRESULTS-1, see lj_dispatch_ins(). */
   2385     rc = (BCReg)(J->L->top - J->L->base) - ra + 1;
   2386     /* fallthrough */
   2387   case BC_RET: case BC_RET0: case BC_RET1:
   2388 #if LJ_HASPROFILE
   2389     rec_profile_ret(J);
   2390 #endif
   2391     lj_record_ret(J, ra, (ptrdiff_t)rc-1);
   2392     break;
   2393 
   2394   /* -- Loops and branches ------------------------------------------------ */
   2395 
   2396   case BC_FORI:
   2397     if (rec_for(J, pc, 0) != LOOPEV_LEAVE)
   2398       J->loopref = J->cur.nins;
   2399     break;
   2400   case BC_JFORI:
   2401     lua_assert(bc_op(pc[(ptrdiff_t)rc-BCBIAS_J]) == BC_JFORL);
   2402     if (rec_for(J, pc, 0) != LOOPEV_LEAVE)  /* Link to existing loop. */
   2403       lj_record_stop(J, LJ_TRLINK_ROOT, bc_d(pc[(ptrdiff_t)rc-BCBIAS_J]));
   2404     /* Continue tracing if the loop is not entered. */
   2405     break;
   2406 
   2407   case BC_FORL:
   2408     rec_loop_interp(J, pc, rec_for(J, pc+((ptrdiff_t)rc-BCBIAS_J), 1));
   2409     break;
   2410   case BC_ITERL:
   2411     rec_loop_interp(J, pc, rec_iterl(J, *pc));
   2412     break;
   2413   case BC_LOOP:
   2414     rec_loop_interp(J, pc, rec_loop(J, ra));
   2415     break;
   2416 
   2417   case BC_JFORL:
   2418     rec_loop_jit(J, rc, rec_for(J, pc+bc_j(traceref(J, rc)->startins), 1));
   2419     break;
   2420   case BC_JITERL:
   2421     rec_loop_jit(J, rc, rec_iterl(J, traceref(J, rc)->startins));
   2422     break;
   2423   case BC_JLOOP:
   2424     rec_loop_jit(J, rc, rec_loop(J, ra));
   2425     break;
   2426 
   2427   case BC_IFORL:
   2428   case BC_IITERL:
   2429   case BC_ILOOP:
   2430   case BC_IFUNCF:
   2431   case BC_IFUNCV:
   2432     lj_trace_err(J, LJ_TRERR_BLACKL);
   2433     break;
   2434 
   2435   case BC_JMP:
   2436     if (ra < J->maxslot)
   2437       J->maxslot = ra;  /* Shrink used slots. */
   2438     break;
   2439 
   2440   /* -- Function headers -------------------------------------------------- */
   2441 
   2442   case BC_FUNCF:
   2443     rec_func_lua(J);
   2444     break;
   2445   case BC_JFUNCF:
   2446     rec_func_jit(J, rc);
   2447     break;
   2448 
   2449   case BC_FUNCV:
   2450     rec_func_vararg(J);
   2451     rec_func_lua(J);
   2452     break;
   2453   case BC_JFUNCV:
   2454     lua_assert(0);  /* Cannot happen. No hotcall counting for varag funcs. */
   2455     break;
   2456 
   2457   case BC_FUNCC:
   2458   case BC_FUNCCW:
   2459     lj_ffrecord_func(J);
   2460     break;
   2461 
   2462   default:
   2463     if (op >= BC__MAX) {
   2464       lj_ffrecord_func(J);
   2465       break;
   2466     }
   2467   case BC_UCLO:
   2468     /* No UVs to be closed or none concerning us -> NOP */
   2469     if ((!gcref(J->L->openupval)) ||
   2470         (uvval((gco2uv(gcref(J->L->openupval)))) < (lbase + ra))) {
   2471       if (ra < J->maxslot)
   2472         J->maxslot = ra;  /* Shrink used slots. */
   2473       break;
   2474     }
   2475     /* TBD: Need to close UV -> call lj_func_closeuv */
   2476     /* fallthrough */
   2477   case BC_ITERN:
   2478   case BC_ISNEXT:
   2479   case BC_FNEW:
   2480   case BC_ESETV:
   2481     setintV(&J->errinfo, (int32_t)op);
   2482     lj_trace_err_info(J, LJ_TRERR_NYIBC);
   2483     break;
   2484   }
   2485 
   2486   /* rc == 0 if we have no result yet, e.g. pending __index metamethod call. */
   2487   if (bcmode_a(op) == BCMdst && rc) {
   2488     J->base[ra] = rc;
   2489     if (ra >= J->maxslot) {
   2490 #if LJ_FR2
   2491       if (ra > J->maxslot) J->base[ra-1] = 0;
   2492 #endif
   2493       J->maxslot = ra+1;
   2494     }
   2495   }
   2496 
   2497 #undef rav
   2498 #undef rbv
   2499 #undef rcv
   2500 
   2501   /* Limit the number of recorded IR instructions. */
   2502   if (J->cur.nins > REF_FIRST+(IRRef)J->param[JIT_P_maxrecord])
   2503     lj_trace_err(J, LJ_TRERR_TRACEOV);
   2504 }
   2505 
   2506 /* -- Recording setup ----------------------------------------------------- */
   2507 
   2508 /* Setup recording for a root trace started by a hot loop. */
   2509 static const BCIns *rec_setup_root(jit_State *J)
   2510 {
   2511   /* Determine the next PC and the bytecode range for the loop. */
   2512   const BCIns *pcj, *pc = J->pc;
   2513   BCIns ins = *pc;
   2514   BCReg ra = bc_a(ins);
   2515   switch (bc_op(ins)) {
   2516   case BC_FORL:
   2517     J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
   2518     pc += 1+bc_j(ins);
   2519     J->bc_min = pc;
   2520     break;
   2521   case BC_ITERL:
   2522     lua_assert(bc_op(pc[-1]) == BC_ITERC);
   2523     J->maxslot = ra + bc_b(pc[-1]) - 1;
   2524     J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
   2525     pc += 1+bc_j(ins);
   2526     lua_assert(bc_op(pc[-1]) == BC_JMP);
   2527     J->bc_min = pc;
   2528     break;
   2529   case BC_LOOP:
   2530     /* Only check BC range for real loops, but not for "repeat until true". */
   2531     pcj = pc + bc_j(ins);
   2532     ins = *pcj;
   2533     if (bc_op(ins) == BC_JMP && bc_j(ins) < 0) {
   2534       J->bc_min = pcj+1 + bc_j(ins);
   2535       J->bc_extent = (MSize)(-bc_j(ins))*sizeof(BCIns);
   2536     }
   2537     J->maxslot = ra;
   2538     pc++;
   2539     break;
   2540   case BC_RET:
   2541   case BC_RET0:
   2542   case BC_RET1:
   2543     /* No bytecode range check for down-recursive root traces. */
   2544     J->maxslot = ra + bc_d(ins) - 1;
   2545     break;
   2546   case BC_FUNCF:
   2547     /* No bytecode range check for root traces started by a hot call. */
   2548     J->maxslot = J->pt->numparams;
   2549     pc++;
   2550     break;
   2551   case BC_CALLM:
   2552   case BC_CALL:
   2553   case BC_ITERC:
   2554     /* No bytecode range check for stitched traces. */
   2555     pc++;
   2556     break;
   2557   default:
   2558     lua_assert(0);
   2559     break;
   2560   }
   2561   return pc;
   2562 }
   2563 
   2564 /* Setup for recording a new trace. */
   2565 void lj_record_setup(jit_State *J)
   2566 {
   2567   uint32_t i;
   2568 
   2569   /* Initialize state related to current trace. */
   2570   memset(J->slot, 0, sizeof(J->slot));
   2571   memset(J->chain, 0, sizeof(J->chain));
   2572 #ifdef LUAJIT_ENABLE_TABLE_BUMP
   2573   memset(J->rbchash, 0, sizeof(J->rbchash));
   2574 #endif
   2575   memset(J->bpropcache, 0, sizeof(J->bpropcache));
   2576   J->scev.idx = REF_NIL;
   2577   setmref(J->scev.pc, NULL);
   2578 
   2579   J->baseslot = 1+LJ_FR2;  /* Invoking function is at base[-1-LJ_FR2]. */
   2580   J->base = J->slot + J->baseslot;
   2581   J->maxslot = 0;
   2582   J->framedepth = 0;
   2583   J->retdepth = 0;
   2584 
   2585   J->instunroll = J->param[JIT_P_instunroll];
   2586   J->loopunroll = J->param[JIT_P_loopunroll];
   2587   J->tailcalled = 0;
   2588   J->loopref = 0;
   2589 
   2590   J->bc_min = NULL;  /* Means no limit. */
   2591   J->bc_extent = ~(MSize)0;
   2592 
   2593   /* Emit instructions for fixed references. Also triggers initial IR alloc. */
   2594   emitir_raw(IRT(IR_BASE, IRT_PGC), J->parent, J->exitno);
   2595   for (i = 0; i <= 2; i++) {
   2596     IRIns *ir = IR(REF_NIL-i);
   2597     ir->i = 0;
   2598     ir->t.irt = (uint8_t)(IRT_NIL+i);
   2599     ir->o = IR_KPRI;
   2600     ir->prev = 0;
   2601   }
   2602   J->cur.nk = REF_TRUE;
   2603 
   2604   J->startpc = J->pc;
   2605   setmref(J->cur.startpc, J->pc);
   2606   if (J->parent) {  /* Side trace. */
   2607     GCtrace *T = traceref(J, J->parent);
   2608     TraceNo root = T->root ? T->root : J->parent;
   2609     J->cur.root = (uint16_t)root;
   2610     J->cur.startins = BCINS_AD(BC_JMP, 0, 0);
   2611     /* Check whether we could at least potentially form an extra loop. */
   2612     if (J->exitno == 0 && T->snap[0].nent == 0) {
   2613       /* We can narrow a FORL for some side traces, too. */
   2614       if (J->pc > proto_bc(J->pt) && bc_op(J->pc[-1]) == BC_JFORI &&
   2615 	  bc_d(J->pc[bc_j(J->pc[-1])-1]) == root) {
   2616 	lj_snap_add(J);
   2617 	rec_for_loop(J, J->pc-1, &J->scev, 1);
   2618 	goto sidecheck;
   2619       }
   2620     } else {
   2621       J->startpc = NULL;  /* Prevent forming an extra loop. */
   2622     }
   2623     lj_snap_replay(J, T);
   2624   sidecheck:
   2625     if (traceref(J, J->cur.root)->nchild >= J->param[JIT_P_maxside] ||
   2626 	T->snap[J->exitno].count >= J->param[JIT_P_hotexit] +
   2627 				    J->param[JIT_P_tryside]) {
   2628       lj_record_stop(J, LJ_TRLINK_INTERP, 0);
   2629     }
   2630   } else {  /* Root trace. */
   2631     J->cur.root = 0;
   2632     J->cur.startins = *J->pc;
   2633     J->pc = rec_setup_root(J);
   2634     /* Note: the loop instruction itself is recorded at the end and not
   2635     ** at the start! So snapshot #0 needs to point to the *next* instruction.
   2636     */
   2637     lj_snap_add(J);
   2638     if (bc_op(J->cur.startins) == BC_FORL)
   2639       rec_for_loop(J, J->pc-1, &J->scev, 1);
   2640     else if (bc_op(J->cur.startins) == BC_ITERC)
   2641       J->startpc = NULL;
   2642     if (1 + J->pt->framesize >= LJ_MAX_JSLOTS)
   2643       lj_trace_err(J, LJ_TRERR_STACKOV);
   2644   }
   2645 #if LJ_HASPROFILE
   2646   J->prev_pt = NULL;
   2647   J->prev_line = -1;
   2648 #endif
   2649 #ifdef LUAJIT_ENABLE_CHECKHOOK
   2650   /* Regularly check for instruction/line hooks from compiled code and
   2651   ** exit to the interpreter if the hooks are set.
   2652   **
   2653   ** This is a compile-time option and disabled by default, since the
   2654   ** hook checks may be quite expensive in tight loops.
   2655   **
   2656   ** Note this is only useful if hooks are *not* set most of the time.
   2657   ** Use this only if you want to *asynchronously* interrupt the execution.
   2658   **
   2659   ** You can set the instruction hook via lua_sethook() with a count of 1
   2660   ** from a signal handler or another native thread. Please have a look
   2661   ** at the first few functions in luajit.c for an example (Ctrl-C handler).
   2662   */
   2663   {
   2664     TRef tr = emitir(IRT(IR_XLOAD, IRT_U8),
   2665 		     lj_ir_kptr(J, &J2G(J)->hookmask), IRXLOAD_VOLATILE);
   2666     tr = emitir(IRTI(IR_BAND), tr, lj_ir_kint(J, (LUA_MASKLINE|LUA_MASKCOUNT)));
   2667     emitir(IRTGI(IR_EQ), tr, lj_ir_kint(J, 0));
   2668   }
   2669 #endif
   2670 }
   2671 
   2672 #undef IR
   2673 #undef emitir_raw
   2674 #undef emitir
   2675 
   2676 #endif