ljx

FORK: LuaJIT with native 5.2 and 5.3 support
git clone https://git.neptards.moe/neptards/ljx.git
Log | Files | Refs | README

lj_gc.c (28231B)


      1 /*
      2 ** Garbage collector.
      3 ** Copyright (C) 2005-2016 Mike Pall. See Copyright Notice in luajit.h
      4 **
      5 ** Major portions taken verbatim or adapted from the Lua interpreter.
      6 ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
      7 */
      8 
      9 #define lj_gc_c
     10 #define LUA_CORE
     11 
     12 #include "lj_obj.h"
     13 #include "lj_gc.h"
     14 #include "lj_err.h"
     15 #include "lj_buf.h"
     16 #include "lj_str.h"
     17 #include "lj_tab.h"
     18 #include "lj_func.h"
     19 #include "lj_udata.h"
     20 #include "lj_meta.h"
     21 #include "lj_state.h"
     22 #include "lj_frame.h"
     23 #if LJ_HASFFI
     24 #include "lj_ctype.h"
     25 #include "lj_cdata.h"
     26 #endif
     27 #include "lj_trace.h"
     28 #include "lj_vm.h"
     29 
     30 #define GCSTEPSIZE	1024u
     31 #define GCSWEEPMAX	40
     32 #define GCSWEEPCOST	10
     33 #define GCFINALIZECOST	100
     34 
     35 /* Macros to set GCobj colors and flags. */
     36 #define white2gray(x)		((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
     37 #define gray2black(x)		((x)->gch.marked |= LJ_GC_BLACK)
     38 #define isfinalized(u)		((u)->marked & LJ_GC_FINALIZED)
     39 
     40 /* -- Mark phase ---------------------------------------------------------- */
     41 
     42 /* Mark a TValue (if needed). */
     43 #define gc_marktv(g, tv) \
     44   { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \
     45     if (tviswhite(tv)) gc_mark(g, gcV(tv)); }
     46 
     47 /* Mark a GCobj (if needed). */
     48 #define gc_markobj(g, o) \
     49   { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }
     50 
     51 /* Mark a string object. */
     52 #define gc_mark_str(s)		((s)->marked &= (uint8_t)~LJ_GC_WHITES)
     53 
     54 static int gc_traverse_tab(global_State *g, GCtab *t);
     55 
     56 /* Mark a white GCobj. */
     57 static void gc_mark(global_State *g, GCobj *o)
     58 {
     59   int gct = o->gch.gct;
     60   lua_assert(iswhite(o) && !isdead(g, o));
     61   white2gray(o);
     62   if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
     63     GCtab *mt = tabref(gco2ud(o)->metatable);
     64     gray2black(o);  /* Userdata are never gray. */
     65     if (mt) gc_markobj(g, mt);
     66     gc_markobj(g, gcref(gco2ud(o)->env));
     67   } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
     68     GCupval *uv = gco2uv(o);
     69     gc_marktv(g, uvval(uv));
     70     if (uv->closed)
     71       gray2black(o);  /* Closed upvalues are never gray. */
     72   } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
     73     lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
     74 	       gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO || gct == ~LJ_TTRACE);
     75     if (gct == ~LJ_TTAB && isfinalized(gco2tab(o))) {
     76       /* UNSURE: Always stays gray if __gc. */
     77 //      gray2black(o);
     78       gc_traverse_tab(g, gco2tab(o));
     79     } else {
     80       setgcrefr(o->gch.gclist, g->gc.gray);
     81       setgcref(g->gc.gray, o);
     82     }
     83   }
     84 }
     85 
     86 /* Mark GC roots. */
     87 static void gc_mark_gcroot(global_State *g)
     88 {
     89   ptrdiff_t i;
     90   for (i = 0; i < GCROOT_MAX; i++)
     91     if (gcref(g->gcroot[i]) != NULL)
     92       gc_markobj(g, gcref(g->gcroot[i]));
     93 }
     94 
     95 /* Start a GC cycle and mark the root set. */
     96 static void gc_mark_start(global_State *g)
     97 {
     98   setgcrefnull(g->gc.gray);
     99   setgcrefnull(g->gc.grayagain);
    100   setgcrefnull(g->gc.weak);
    101   gc_markobj(g, mainthread(g));
    102   gc_markobj(g, tabref(mainthread(g)->env));
    103   gc_marktv(g, &g->registrytv);
    104   gc_mark_gcroot(g);
    105   g->gc.state = GCSpropagate;
    106 }
    107 
    108 /* Mark open upvalues. */
    109 static void gc_mark_uv(global_State *g)
    110 {
    111   GCupval *uv;
    112   for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
    113     lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
    114     if (isgray(obj2gco(uv)))
    115       gc_marktv(g, uvval(uv));
    116   }
    117 }
    118 
    119 /* Mark userdata and tables in mmudata list. */
    120 static void gc_mark_mmudata(global_State *g)
    121 {
    122   GCobj *root = gcref(g->gc.mmudata);
    123   GCobj *u = root;
    124   if (u) {
    125     do {
    126       u = gcnext(u);
    127       makewhite(g, u);  /* Could be from previous GC. */
    128       gc_mark(g, u);
    129     } while (u != root);
    130   }
    131 }
    132 
    133 /* Separate userdata objects to be finalized to mmudata list. */
    134 size_t lj_gc_separateudata(global_State *g, int all)
    135 {
    136   size_t m = 0;
    137   GCRef *p = &mainthread(g)->nextgc;
    138   GCobj *o;
    139   while ((o = gcref(*p)) != NULL) {
    140     int isud = o->gch.gct == ~LJ_TUDATA;
    141     lua_assert(isud || o->gch.gct == ~LJ_TTAB);
    142     if (!(iswhite(o) || all) || (isud && isfinalized(&(o->gch)))) {
    143       p = &o->gch.nextgc;  /* Nothing to do. */
    144     } else if (isud && !lj_meta_fastg(g, tabref(o->ud.metatable), MM_gc)) {
    145       markfinalized(o);  /* Done, as there's no __gc metamethod. */
    146       p = &o->gch.nextgc;
    147     } else {  /* Otherwise move it to mmudata list. */
    148       if (isud)
    149         m += sizeudata(gco2ud(o));
    150       *p = o->gch.nextgc; /* Advance */
    151       if (gcref(g->gc.mmudata)) {  /* Link to end of mmudata list. */
    152 	GCobj *root = gcref(g->gc.mmudata);
    153 	setgcrefr(o->gch.nextgc, root->gch.nextgc);
    154 	setgcref(root->gch.nextgc, o);
    155 	setgcref(g->gc.mmudata, o);
    156       } else {  /* Create circular list. */
    157 	setgcref(o->gch.nextgc, o);
    158 	setgcref(g->gc.mmudata, o);
    159       }
    160     }
    161   }
    162   return m;
    163 }
    164 
    165 /* -- Propagation phase --------------------------------------------------- */
    166 
    167 /* Traverse a table. */
    168 static int gc_traverse_tab(global_State *g, GCtab *t)
    169 {
    170   int weak = 0;
    171   cTValue *mode;
    172   GCtab *mt = tabref(t->metatable);
    173   int tofin = isfinalized(t);
    174   if (mt)
    175     gc_markobj(g, mt);
    176   mode = lj_meta_fastg(g, mt, MM_mode);
    177   if (mode && tvisstr(mode)) {  /* Valid __mode field? */
    178     const char *modestr = strVdata(mode);
    179     int c;
    180     while ((c = *modestr++)) {
    181       if (c == 'k') weak |= LJ_GC_WEAKKEY;
    182       else if (c == 'v') weak |= LJ_GC_WEAKVAL;
    183       else if (c == 'K') weak = (int)(~0u & ~LJ_GC_WEAKVAL);
    184     }
    185     if (weak > 0) {  /* Weak tables are cleared in the atomic phase. */
    186       t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
    187       if (!tofin) { /* UNSURE: Wait until it is finalized. */
    188         setgcrefr(t->gclist, g->gc.weak);
    189         setgcref(g->gc.weak, obj2gco(t));
    190       }
    191     }
    192   }
    193   if (weak == LJ_GC_WEAK)  /* Nothing to mark if both keys/values are weak. */
    194     return 1;
    195   if (!(weak & LJ_GC_WEAKVAL)) {  /* Mark array part. */
    196     MSize i, asize = t->asize;
    197     for (i = 0; i < asize; i++)
    198       gc_marktv(g, arrayslot(t, i));
    199   }
    200   if (t->hmask > 0) {  /* Mark hash part. */
    201     Node *node = noderef(t->node);
    202     MSize i, hmask = t->hmask;
    203     for (i = 0; i <= hmask; i++) {
    204       Node *n = &node[i];
    205       if (!tvisnil(&n->val)) {  /* Mark non-empty slot. */
    206 	lua_assert(!tvisnil(&n->key));
    207 	if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
    208 	if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
    209       }
    210     }
    211   }
    212   return weak || tofin; /* UNSURE: Keep gray if __gc */
    213 }
    214 
    215 /* Traverse a function. */
    216 static void gc_traverse_func(global_State *g, GCfunc *fn)
    217 {
    218   gc_markobj(g, tabref(fn->c.env));
    219   if (isluafunc(fn)) {
    220     uint32_t i;
    221     lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv);
    222     gc_markobj(g, funcproto(fn));
    223     for (i = 0; i < fn->l.nupvalues; i++)  /* Mark Lua function upvalues. */
    224       gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
    225   } else {
    226     uint32_t i;
    227     for (i = 0; i < fn->c.nupvalues; i++)  /* Mark C function upvalues. */
    228       gc_marktv(g, &fn->c.upvalue[i]);
    229   }
    230 }
    231 
    232 #if LJ_HASJIT
    233 /* Mark a trace. */
    234 static void gc_marktrace(global_State *g, TraceNo traceno)
    235 {
    236   GCobj *o = obj2gco(traceref(G2J(g), traceno));
    237   lua_assert(traceno != G2J(g)->cur.traceno);
    238   if (iswhite(o)) {
    239     white2gray(o);
    240     setgcrefr(o->gch.gclist, g->gc.gray);
    241     setgcref(g->gc.gray, o);
    242   }
    243 }
    244 
    245 /* Traverse a trace. */
    246 static void gc_traverse_trace(global_State *g, GCtrace *T)
    247 {
    248   IRRef ref;
    249   if (T->traceno == 0) return;
    250   for (ref = T->nk; ref < REF_TRUE; ref++) {
    251     IRIns *ir = &T->ir[ref];
    252     if (ir->o == IR_KGC)
    253       gc_markobj(g, ir_kgc(ir));
    254     if (irt_is64(ir->t) && ir->o != IR_KNULL)
    255       ref++;
    256   }
    257   if (T->link) gc_marktrace(g, T->link);
    258   if (T->nextroot) gc_marktrace(g, T->nextroot);
    259   if (T->nextside) gc_marktrace(g, T->nextside);
    260   gc_markobj(g, gcref(T->startpt));
    261 }
    262 
    263 /* The current trace is a GC root while not anchored in the prototype (yet). */
    264 #define gc_traverse_curtrace(g)	gc_traverse_trace(g, &G2J(g)->cur)
    265 #else
    266 #define gc_traverse_curtrace(g)	UNUSED(g)
    267 #endif
    268 
    269 /* Traverse a prototype. */
    270 static void gc_traverse_proto(global_State *g, GCproto *pt)
    271 {
    272   ptrdiff_t i;
    273   gc_mark_str(proto_chunkname(pt));
    274   for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++)  /* Mark collectable consts. */
    275     gc_markobj(g, proto_kgc(pt, i));
    276 #if LJ_HASJIT
    277   if (pt->trace) gc_marktrace(g, pt->trace);
    278 #endif
    279 }
    280 
    281 /* Traverse the frame structure of a stack. */
    282 static MSize gc_traverse_frames(global_State *g, lua_State *th)
    283 {
    284   TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
    285   /* Note: extra vararg frame not skipped, marks function twice (harmless). */
    286   for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) {
    287     GCfunc *fn = frame_func(frame);
    288     TValue *ftop = frame;
    289     if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
    290     if (ftop > top) top = ftop;
    291     if (!LJ_FR2) gc_markobj(g, fn);  /* Need to mark hidden function (or L). */
    292   }
    293   top++;  /* Correct bias of -1 (frame == base-1). */
    294   if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
    295   return (MSize)(top - bot);  /* Return minimum needed stack size. */
    296 }
    297 
    298 /* Traverse a thread object. */
    299 static void gc_traverse_thread(global_State *g, lua_State *th)
    300 {
    301   TValue *o, *top = th->top;
    302   for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++)
    303     gc_marktv(g, o);
    304   if (g->gc.state == GCSatomic) {
    305     top = tvref(th->stack) + th->stacksize;
    306     for (; o < top; o++)  /* Clear unmarked slots. */
    307       setnilV(o);
    308   }
    309   gc_markobj(g, tabref(th->env));
    310   lj_state_shrinkstack(th, gc_traverse_frames(g, th));
    311 }
    312 
    313 /* Propagate one gray object. Traverse it and turn it black. */
    314 static size_t propagatemark(global_State *g)
    315 {
    316   GCobj *o = gcref(g->gc.gray);
    317   int gct = o->gch.gct;
    318   lua_assert(isgray(o));
    319   gray2black(o);
    320   setgcrefr(g->gc.gray, o->gch.gclist);  /* Remove from gray list. */
    321   if (LJ_LIKELY(gct == ~LJ_TTAB)) {
    322     GCtab *t = gco2tab(o);
    323     if (gc_traverse_tab(g, t) > 0)
    324       black2gray(o);  /* Keep weak tables gray. */
    325     return sizeof(GCtab) + sizeof(TValue) * t->asize +
    326 			   sizeof(Node) * (t->hmask + 1);
    327   } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
    328     GCfunc *fn = gco2func(o);
    329     gc_traverse_func(g, fn);
    330     return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
    331 			   sizeCfunc((MSize)fn->c.nupvalues);
    332   } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
    333     GCproto *pt = gco2pt(o);
    334     gc_traverse_proto(g, pt);
    335     return pt->sizept;
    336   } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
    337     lua_State *th = gco2th(o);
    338     setgcrefr(th->gclist, g->gc.grayagain);
    339     setgcref(g->gc.grayagain, o);
    340     black2gray(o);  /* Threads are never black. */
    341     gc_traverse_thread(g, th);
    342     return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
    343   } else {
    344 #if LJ_HASJIT
    345     GCtrace *T = gco2trace(o);
    346     gc_traverse_trace(g, T);
    347     return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
    348 	   T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
    349 #else
    350     lua_assert(0);
    351     return 0;
    352 #endif
    353   }
    354 }
    355 
    356 /* Propagate all gray objects. */
    357 static size_t gc_propagate_gray(global_State *g)
    358 {
    359   size_t m = 0;
    360   while (gcref(g->gc.gray) != NULL)
    361     m += propagatemark(g);
    362   return m;
    363 }
    364 
    365 /* -- Sweep phase --------------------------------------------------------- */
    366 
    367 /* Type of GC free functions. */
    368 typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);
    369 
    370 /* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
    371 static const GCFreeFunc gc_freefunc[] = {
    372   (GCFreeFunc)lj_str_free,
    373   (GCFreeFunc)lj_func_freeuv,
    374   (GCFreeFunc)lj_state_free,
    375   (GCFreeFunc)lj_func_freeproto,
    376   (GCFreeFunc)lj_func_free,
    377 #if LJ_HASJIT
    378   (GCFreeFunc)lj_trace_free,
    379 #else
    380   (GCFreeFunc)0,
    381 #endif
    382 #if LJ_HASFFI
    383   (GCFreeFunc)lj_cdata_free,
    384 #else
    385   (GCFreeFunc)0,
    386 #endif
    387   (GCFreeFunc)lj_tab_free,
    388   (GCFreeFunc)lj_udata_free
    389 };
    390 
    391 /* Full sweep of a GC list. */
    392 #define gc_fullsweep(g, p)	gc_sweep(g, (p), ~(uint32_t)0)
    393 
    394 /* Partial sweep of a GC list. */
    395 static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
    396 {
    397   /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
    398   int ow = otherwhite(g);
    399   GCobj *o;
    400   while ((o = gcref(*p)) != NULL && lim-- > 0) {
    401     if (o->gch.gct == ~LJ_TTHREAD)  /* Need to sweep open upvalues, too. */
    402       gc_fullsweep(g, &gco2th(o)->openupval);
    403     if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) {  /* Black or current white? */
    404       lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED));
    405       makewhite(g, o);  /* Value is alive, change to the current white. */
    406       p = &o->gch.nextgc;
    407     } else {  /* Otherwise value is dead, free it. */
    408       lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED);
    409       setgcrefr(*p, o->gch.nextgc);
    410       if (o == gcref(g->gc.root))
    411 	setgcrefr(g->gc.root, o->gch.nextgc);  /* Adjust list anchor. */
    412       gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
    413     }
    414   }
    415   return p;
    416 }
    417 
    418 /* Check whether we can clear a key or a value slot from a table. */
    419 static int gc_mayclear(cTValue *o, int val)
    420 {
    421   if (tvisgcv(o)) {  /* Only collectable objects can be weak references. */
    422     if (tvisstr(o)) {  /* But strings cannot be used as weak references. */
    423       gc_mark_str(strV(o));  /* And need to be marked. */
    424       return 0;
    425     }
    426     if (iswhite(gcV(o)))
    427       return 1;  /* Object is about to be collected. */
    428     if (tvisudata(o) && val && isfinalized(udataV(o)))
    429       return 1;  /* Finalized userdata is dropped only from values. */
    430   }
    431   return 0;  /* Cannot clear. */
    432 }
    433 
    434 /* Clear collected entries from weak tables. */
    435 static void gc_clearweak(GCobj *o)
    436 {
    437   while (o) {
    438     GCtab *t = gco2tab(o);
    439     lua_assert((t->marked & LJ_GC_WEAK));
    440     if ((t->marked & LJ_GC_WEAKVAL)) {
    441       MSize i, asize = t->asize;
    442       for (i = 0; i < asize; i++) {
    443 	/* Clear array slot when value is about to be collected. */
    444 	TValue *tv = arrayslot(t, i);
    445 	if (gc_mayclear(tv, 1))
    446 	  setnilV(tv);
    447       }
    448     }
    449     if (t->hmask > 0) {
    450       Node *node = noderef(t->node);
    451       MSize i, hmask = t->hmask;
    452       for (i = 0; i <= hmask; i++) {
    453 	Node *n = &node[i];
    454 	/* Clear hash slot when key or value is about to be collected. */
    455 	if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
    456 				  gc_mayclear(&n->val, 1)))
    457 	  setnilV(&n->val);
    458       }
    459     }
    460     o = gcref(t->gclist);
    461   }
    462 }
    463 
    464 /* Call a userdata or cdata finalizer. */
    465 static void gc_call_finalizer(global_State *g, lua_State *L,
    466 			      cTValue *mo, GCobj *o)
    467 {
    468   /* Save and restore lots of state around the __gc callback. */
    469   uint8_t oldh = hook_save(g);
    470   GCSize oldt = g->gc.threshold;
    471   int errcode;
    472   TValue *top;
    473   lj_trace_abort(g);
    474   hook_entergc(g);  /* Disable hooks and new traces during __gc. */
    475   g->gc.threshold = LJ_MAX_MEM;  /* Prevent GC steps. */
    476   top = L->top;
    477   copyTV(L, top++, mo);
    478   if (LJ_FR2) setnilV(top++);
    479   setgcV(L, top, o, ~o->gch.gct);
    480   L->top = top+1;
    481   errcode = lj_vm_pcall(L, top, 1+0, -1);  /* Stack: |mo|o| -> | */
    482   hook_restore(g, oldh);
    483   g->gc.threshold = oldt;  /* Restore GC threshold. */
    484   if (errcode)
    485     lj_err_throw(L, errcode);  /* Propagate errors. */
    486 }
    487 
    488 /* Finalize one userdata or cdata object from the mmudata list. */
    489 static void gc_finalize(lua_State *L)
    490 {
    491   global_State *g = G(L);
    492   GCobj *o = gcnext(gcref(g->gc.mmudata));
    493   cTValue *mo;
    494   int gct = o->gch.gct;
    495   lua_assert(tvref(g->jit_base) == NULL);  /* Must not be called on trace. */
    496   /* Unchain from list of userdata to be finalized. */
    497   if (o == gcref(g->gc.mmudata))
    498     setgcrefnull(g->gc.mmudata);
    499   else
    500     setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
    501 #if LJ_HASFFI
    502   if (gct == ~LJ_TCDATA) {
    503     TValue tmp, *tv;
    504     /* Add cdata back to the GC list and make it white. */
    505     setgcrefr(o->gch.nextgc, g->gc.root);
    506     setgcref(g->gc.root, o);
    507     makewhite(g, o);
    508     o->gch.marked &= (uint8_t)~LJ_GC_FINALIZED;
    509     /* Resolve finalizer. */
    510     setcdataV(L, &tmp, gco2cd(o));
    511     tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
    512     if (!tvisnil(tv)) {
    513       g->gc.nocdatafin = 0;
    514       copyTV(L, &tmp, tv);
    515       setnilV(tv);  /* Clear entry in finalizer table. */
    516       gc_call_finalizer(g, L, &tmp, o);
    517     }
    518     return;
    519   }
    520 #endif
    521   if (gct == ~LJ_TTAB) {
    522     /* Add table back to the main userdata list and make it white. */
    523     setgcrefr(o->gch.nextgc, g->gc.root);
    524     setgcref(g->gc.root, o);
    525     clearfinalized(o); /* This stops it from being finalized again.  */
    526   } else {
    527     /* Add userdata back to the main userdata list and make it white. */
    528     setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
    529     setgcref(mainthread(g)->nextgc, o);
    530     markfinalized(o); /* This stops it from being finalized again.  */
    531   }
    532   makewhite(g, o);
    533   /* Resolve the __gc metamethod. */
    534   mo = lj_meta_fastg(g, tabref(o->ud.metatable), MM_gc);
    535   if (mo)
    536     gc_call_finalizer(g, L, mo, o);
    537 }
    538 
    539 /* Finalize all userdata objects from mmudata list. */
    540 void lj_gc_finalize_udata(lua_State *L)
    541 {
    542   while (gcref(G(L)->gc.mmudata) != NULL)
    543     gc_finalize(L);
    544 }
    545 
    546 void lj_gc_checkfinalizer(lua_State *L, GCobj *o)
    547 {
    548   /* TBD */
    549 }
    550 
    551 void lj_gc_tab_finalized(lua_State *L, GCobj *o)
    552 {
    553   global_State *g = G(L);
    554   GCobj *p, *t = obj2gco(mainthread(g));
    555   GCRef *ref = &g->gc.root;
    556 
    557   /* Already marked for finalization. */
    558   if (isfinalized(gco2tab(o)))
    559     return;
    560 
    561   /* UNSURE: Sweep points to current object, skip it.
    562    * TBD: gclist?
    563    * We could advance GC to some safe state, but wouldn't that be slow? */
    564   if (mref(g->gc.sweep, GCRef) == &o->gch.nextgc)
    565     setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), 1));
    566 
    567   /* Find object in gc root list. */
    568   for (p = gcref(g->gc.root); p != o; p = gcnext(p))
    569     ref = &p->gch.nextgc;
    570 
    571   /* Unlink. */
    572   setgcref(*ref, gcnext(o));
    573 
    574   /* And put it into userdata list. */
    575   setgcrefr(o->gch.nextgc, t->gch.nextgc);
    576   setgcref(t->gch.nextgc, o);
    577 
    578   /* Mark as such. This is cleared just before __gc is called. */
    579   markfinalized(o);
    580 
    581   /* UNSURE: GC invariant. */
    582   if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
    583     makewhite(g, o);
    584 }
    585 
    586 #if LJ_HASFFI
    587 /* Finalize all cdata objects from finalizer table. */
    588 void lj_gc_finalize_cdata(lua_State *L)
    589 {
    590   global_State *g = G(L);
    591   CTState *cts = ctype_ctsG(g);
    592   if (cts) {
    593     GCtab *t = cts->finalizer;
    594     Node *node = noderef(t->node);
    595     ptrdiff_t i;
    596     setgcrefnull(t->metatable);  /* Mark finalizer table as disabled. */
    597     for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
    598       if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
    599 	GCobj *o = gcV(&node[i].key);
    600 	TValue tmp;
    601 	makewhite(g, o);
    602 	o->gch.marked &= (uint8_t)~LJ_GC_FINALIZED;
    603 	copyTV(L, &tmp, &node[i].val);
    604 	setnilV(&node[i].val);
    605 	gc_call_finalizer(g, L, &tmp, o);
    606       }
    607   }
    608 }
    609 #endif
    610 
    611 /* Free all remaining GC objects. */
    612 void lj_gc_freeall(global_State *g)
    613 {
    614   MSize i, strmask;
    615   /* Free everything, except super-fixed objects (the main thread). */
    616   g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
    617   gc_fullsweep(g, &g->gc.root);
    618   strmask = g->strmask;
    619   for (i = 0; i <= strmask; i++)  /* Free all string hash chains. */
    620     gc_fullsweep(g, &g->strhash[i]);
    621 }
    622 
    623 /* -- Collector ----------------------------------------------------------- */
    624 
    625 /* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
    626 static void atomic(global_State *g, lua_State *L)
    627 {
    628   size_t udsize;
    629 
    630   gc_mark_uv(g);  /* Need to remark open upvalues (the thread may be dead). */
    631   gc_propagate_gray(g);  /* Propagate any left-overs. */
    632 
    633   setgcrefr(g->gc.gray, g->gc.weak);  /* Empty the list of weak tables. */
    634   setgcrefnull(g->gc.weak);
    635   lua_assert(!iswhite(obj2gco(mainthread(g))));
    636   gc_markobj(g, L);  /* Mark running thread. */
    637   gc_traverse_curtrace(g);  /* Traverse current trace. */
    638   gc_mark_gcroot(g);  /* Mark GC roots (again). */
    639   gc_propagate_gray(g);  /* Propagate all of the above. */
    640 
    641   setgcrefr(g->gc.gray, g->gc.grayagain);  /* Empty the 2nd chance list. */
    642   setgcrefnull(g->gc.grayagain);
    643   gc_propagate_gray(g);  /* Propagate it. */
    644 
    645   udsize = lj_gc_separateudata(g, 0);  /* Separate userdata to be finalized. */
    646   gc_mark_mmudata(g);  /* Mark them. */
    647   udsize += gc_propagate_gray(g);  /* And propagate the marks. */
    648 
    649   /* All marking done, clear weak tables. */
    650   gc_clearweak(gcref(g->gc.weak));
    651 
    652   lj_buf_shrink(L, &g->tmpbuf);  /* Shrink temp buffer. */
    653 
    654   /* Prepare for sweep phase. */
    655   g->gc.currentwhite = (uint8_t)otherwhite(g);  /* Flip current white. */
    656   g->strempty.marked = g->gc.currentwhite;
    657   setmref(g->gc.sweep, &g->gc.root);
    658   g->gc.estimate = g->gc.total - (GCSize)udsize;  /* Initial estimate. */
    659 }
    660 
    661 /* GC state machine. Returns a cost estimate for each step performed. */
    662 static size_t gc_onestep(lua_State *L)
    663 {
    664   global_State *g = G(L);
    665   switch (g->gc.state) {
    666   case GCSpause:
    667     gc_mark_start(g);  /* Start a new GC cycle by marking all GC roots. */
    668     return 0;
    669   case GCSpropagate:
    670     if (gcref(g->gc.gray) != NULL)
    671       return propagatemark(g);  /* Propagate one gray object. */
    672     g->gc.state = GCSatomic;  /* End of mark phase. */
    673     return 0;
    674   case GCSatomic:
    675     if (tvref(g->jit_base))  /* Don't run atomic phase on trace. */
    676       return LJ_MAX_MEM;
    677     atomic(g, L);
    678     g->gc.state = GCSsweepstring;  /* Start of sweep phase. */
    679     g->gc.sweepstr = 0;
    680     return 0;
    681   case GCSsweepstring: {
    682     GCSize old = g->gc.total;
    683     gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]);  /* Sweep one chain. */
    684     if (g->gc.sweepstr > g->strmask)
    685       g->gc.state = GCSsweep;  /* All string hash chains sweeped. */
    686     lua_assert(old >= g->gc.total);
    687     g->gc.estimate -= old - g->gc.total;
    688     return GCSWEEPCOST;
    689     }
    690   case GCSsweep: {
    691     GCSize old = g->gc.total;
    692     setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
    693     lua_assert(old >= g->gc.total);
    694     g->gc.estimate -= old - g->gc.total;
    695     if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
    696       if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
    697 	lj_str_resize(L, g->strmask >> 1);  /* Shrink string table. */
    698       if (gcref(g->gc.mmudata)) {  /* Need any finalizations? */
    699 	g->gc.state = GCSfinalize;
    700 #if LJ_HASFFI
    701 	g->gc.nocdatafin = 1;
    702 #endif
    703       } else {  /* Otherwise skip this phase to help the JIT. */
    704 	g->gc.state = GCSpause;  /* End of GC cycle. */
    705 	g->gc.debt = 0;
    706       }
    707     }
    708     return GCSWEEPMAX*GCSWEEPCOST;
    709     }
    710   case GCSfinalize:
    711     if (gcref(g->gc.mmudata) != NULL) {
    712       if (tvref(g->jit_base))  /* Don't call finalizers on trace. */
    713 	return LJ_MAX_MEM;
    714       gc_finalize(L);  /* Finalize one userdata object. */
    715       if (g->gc.estimate > GCFINALIZECOST)
    716 	g->gc.estimate -= GCFINALIZECOST;
    717       return GCFINALIZECOST;
    718     }
    719 #if LJ_HASFFI
    720     if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer);
    721 #endif
    722     g->gc.state = GCSpause;  /* End of GC cycle. */
    723     g->gc.debt = 0;
    724     return 0;
    725   default:
    726     lua_assert(0);
    727     return 0;
    728   }
    729 }
    730 
    731 /* Perform a limited amount of incremental GC steps. */
    732 int LJ_FASTCALL lj_gc_step(lua_State *L)
    733 {
    734   global_State *g = G(L);
    735   int64_t lim;
    736   int32_t ostate = g->vmstate;
    737   setvmstate(g, GC);
    738   lim = (GCSTEPSIZE/100) * g->gc.stepmul;
    739   if (lim == 0)
    740     lim = LJ_MAX_MEM;
    741   if (g->gc.total > g->gc.threshold)
    742     g->gc.debt += g->gc.total - g->gc.threshold;
    743   do {
    744     lim -= (GCSize)gc_onestep(L);
    745     if (g->gc.state == GCSpause) {
    746       int64_t nt = (g->gc.estimate/100) * g->gc.pause;
    747       if (nt > LJ_MAX_MEM)
    748         nt = LJ_MAX_MEM;
    749       g->gc.threshold = nt;
    750       g->vmstate = ostate;
    751       return 1;  /* Finished a GC cycle. */
    752     }
    753   } while (lim > 0);
    754   if (g->gc.debt < GCSTEPSIZE) {
    755     g->gc.threshold = g->gc.total + GCSTEPSIZE;
    756     g->vmstate = ostate;
    757     return -1;
    758   } else {
    759     g->gc.debt -= GCSTEPSIZE;
    760     g->gc.threshold = g->gc.total;
    761     g->vmstate = ostate;
    762     return 0;
    763   }
    764 }
    765 
    766 /* Ditto, but fix the stack top first. */
    767 void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
    768 {
    769   if (curr_funcisL(L)) L->top = curr_topL(L);
    770   lj_gc_step(L);
    771 }
    772 
    773 #if LJ_HASJIT
    774 /* Perform multiple GC steps. Called from JIT-compiled code. */
    775 int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
    776 {
    777   lua_State *L = gco2th(gcref(g->cur_L));
    778   L->base = tvref(G(L)->jit_base);
    779   L->top = curr_topL(L);
    780   while (steps-- > 0 && lj_gc_step(L) == 0)
    781     ;
    782   /* Return 1 to force a trace exit. */
    783   return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
    784 }
    785 #endif
    786 
    787 /* Perform a full GC cycle. */
    788 void lj_gc_fullgc(lua_State *L)
    789 {
    790   global_State *g = G(L);
    791   int32_t ostate = g->vmstate;
    792   setvmstate(g, GC);
    793   if (g->gc.state <= GCSatomic) {  /* Caught somewhere in the middle. */
    794     setmref(g->gc.sweep, &g->gc.root);  /* Sweep everything (preserving it). */
    795     setgcrefnull(g->gc.gray);  /* Reset lists from partial propagation. */
    796     setgcrefnull(g->gc.grayagain);
    797     setgcrefnull(g->gc.weak);
    798     g->gc.state = GCSsweepstring;  /* Fast forward to the sweep phase. */
    799     g->gc.sweepstr = 0;
    800   }
    801   while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
    802     gc_onestep(L);  /* Finish sweep. */
    803   lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause);
    804   /* Now perform a full GC. */
    805   g->gc.state = GCSpause;
    806   do { gc_onestep(L); } while (g->gc.state != GCSpause);
    807   g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
    808   g->vmstate = ostate;
    809 }
    810 
    811 /* -- Write barriers ------------------------------------------------------ */
    812 
    813 /* Move the GC propagation frontier forward. */
    814 void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
    815 {
    816   lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
    817   lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
    818   lua_assert(o->gch.gct != ~LJ_TTAB);
    819   /* Preserve invariant during propagation. Otherwise it doesn't matter. */
    820   if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
    821     gc_mark(g, v);  /* Move frontier forward. */
    822   else
    823     makewhite(g, o);  /* Make it white to avoid the following barrier. */
    824 }
    825 
    826 /* Specialized barrier for closed upvalue. Pass &uv->tv. */
    827 void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
    828 {
    829 #define TV2MARKED(x) \
    830   (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
    831   if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
    832     gc_mark(g, gcV(tv));
    833   else
    834     TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
    835 #undef TV2MARKED
    836 }
    837 
    838 /* Close upvalue. Also needs a write barrier. */
    839 void lj_gc_closeuv(global_State *g, GCupval *uv)
    840 {
    841   GCobj *o = obj2gco(uv);
    842   /* Copy stack slot to upvalue itself and point to the copy. */
    843   copyTV(mainthread(g), &uv->tv, uvval(uv));
    844   setmref(uv->v, &uv->tv);
    845   uv->closed = 1;
    846   setgcrefr(o->gch.nextgc, g->gc.root);
    847   setgcref(g->gc.root, o);
    848   if (isgray(o)) {  /* A closed upvalue is never gray, so fix this. */
    849     if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
    850       gray2black(o);  /* Make it black and preserve invariant. */
    851       if (tviswhite(&uv->tv))
    852 	lj_gc_barrierf(g, o, gcV(&uv->tv));
    853     } else {
    854       makewhite(g, o);  /* Make it white, i.e. sweep the upvalue. */
    855       lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
    856     }
    857   }
    858 }
    859 
    860 #if LJ_HASJIT
    861 /* Mark a trace if it's saved during the propagation phase. */
    862 void lj_gc_barriertrace(global_State *g, uint32_t traceno)
    863 {
    864   if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
    865     gc_marktrace(g, traceno);
    866 }
    867 #endif
    868 
    869 /* -- Allocator ----------------------------------------------------------- */
    870 
    871 /* Call pluggable memory allocator to allocate or resize a fragment. */
    872 void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz)
    873 {
    874   global_State *g = G(L);
    875   lua_assert((osz == 0) == (p == NULL));
    876   p = g->allocf(g->allocd, p, osz, nsz);
    877   if (nsz > 0 && (!p || !checkptrGC(p)))
    878     lj_err_mem(L);
    879   lua_assert((nsz == 0) == (p == NULL));
    880   g->gc.total = (g->gc.total - osz) + nsz;
    881   return p;
    882 }
    883 
    884 /* Allocate new GC object and link it to the root set. */
    885 void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size)
    886 {
    887   global_State *g = G(L);
    888   GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
    889   if (o == NULL)
    890     lj_err_mem(L);
    891   lua_assert(checkptrGC(o));
    892   g->gc.total += size;
    893   setgcrefr(o->gch.nextgc, g->gc.root);
    894   setgcref(g->gc.root, o);
    895   newwhite(g, o);
    896   return o;
    897 }
    898 
    899 /* Resize growable vector. */
    900 void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
    901 {
    902   MSize sz = (*szp) << 1;
    903   if (sz < LJ_MIN_VECSZ)
    904     sz = LJ_MIN_VECSZ;
    905   if (sz > lim)
    906     sz = lim;
    907   p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
    908   *szp = sz;
    909   return p;
    910 }
    911