lj_tab.c (18823B)
1 /* 2 ** Table handling. 3 ** Copyright (C) 2005-2016 Mike Pall. See Copyright Notice in luajit.h 4 ** 5 ** Major portions taken verbatim or adapted from the Lua interpreter. 6 ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h 7 */ 8 9 #define lj_tab_c 10 #define LUA_CORE 11 12 #include "lj_obj.h" 13 #include "lj_gc.h" 14 #include "lj_err.h" 15 #include "lj_tab.h" 16 17 /* -- Object hashing ------------------------------------------------------ */ 18 19 /* Hash values are masked with the table hash mask and used as an index. */ 20 static LJ_AINLINE Node *hashmask(const GCtab *t, uint32_t hash) 21 { 22 Node *n = noderef(t->node); 23 return &n[hash & t->hmask]; 24 } 25 26 /* String hashes are precomputed when they are interned. */ 27 #define hashstr(t, s) hashmask(t, (s)->hash) 28 29 #define hashlohi(t, lo, hi) hashmask((t), hashrot((lo), (hi))) 30 #define hashnum(t, o) hashlohi((t), (o)->u32.lo, ((o)->u32.hi << 1)) 31 #define hashptr(t, p) hashlohi((t), u32ptr(p), u32ptr(p) + HASH_BIAS) 32 #if LJ_GC64 33 #define hashgcref(t, r) \ 34 hashlohi((t), (uint32_t)gcrefu(r), (uint32_t)(gcrefu(r) >> 32)) 35 #else 36 #define hashgcref(t, r) hashlohi((t), gcrefu(r), gcrefu(r) + HASH_BIAS) 37 #endif 38 39 /* Hash an arbitrary key and return its anchor position in the hash table. */ 40 static Node *hashkey(const GCtab *t, cTValue *key) 41 { 42 lua_assert(!tvisint(key)); 43 if (tvisstr(key)) 44 return hashstr(t, strV(key)); 45 else if (tvisnum(key)) 46 return hashnum(t, key); 47 else if (tvisbool(key)) 48 return hashmask(t, boolV(key)); 49 else 50 return hashgcref(t, key->gcr); 51 /* Only hash 32 bits of lightuserdata on a 64 bit CPU. Good enough? */ 52 } 53 54 /* -- Table creation and destruction -------------------------------------- */ 55 56 /* Create new hash part for table. */ 57 static LJ_AINLINE void newhpart(lua_State *L, GCtab *t, uint32_t hbits) 58 { 59 uint32_t hsize; 60 Node *node; 61 lua_assert(hbits != 0); 62 if (hbits > LJ_MAX_HBITS) 63 lj_err_msg(L, LJ_ERR_TABOV); 64 hsize = 1u << hbits; 65 node = lj_mem_newvec(L, hsize, Node); 66 setmref(t->node, node); 67 setfreetop(t, node, &node[hsize]); 68 t->hmask = hsize-1; 69 } 70 71 /* 72 ** Q: Why all of these copies of t->hmask, t->node etc. to local variables? 73 ** A: Because alias analysis for C is _really_ tough. 74 ** Even state-of-the-art C compilers won't produce good code without this. 75 */ 76 77 /* Clear hash part of table. */ 78 static LJ_AINLINE void clearhpart(GCtab *t) 79 { 80 uint32_t i, hmask = t->hmask; 81 Node *node = noderef(t->node); 82 lua_assert(t->hmask != 0); 83 for (i = 0; i <= hmask; i++) { 84 Node *n = &node[i]; 85 setmref(n->next, NULL); 86 setnilV(&n->key); 87 setnilV(&n->val); 88 } 89 } 90 91 /* Clear array part of table. */ 92 static LJ_AINLINE void clearapart(GCtab *t) 93 { 94 uint32_t i, asize = t->asize; 95 TValue *array = tvref(t->array); 96 for (i = 0; i < asize; i++) 97 setnilV(&array[i]); 98 } 99 100 /* Create a new table. Note: the slots are not initialized (yet). */ 101 static GCtab *newtab(lua_State *L, uint32_t asize, uint32_t hbits) 102 { 103 GCtab *t; 104 /* First try to colocate the array part. */ 105 if (LJ_MAX_COLOSIZE != 0 && asize > 0 && asize <= LJ_MAX_COLOSIZE) { 106 Node *nilnode; 107 lua_assert((sizeof(GCtab) & 7) == 0); 108 t = (GCtab *)lj_mem_newgco(L, sizetabcolo(asize)); 109 t->gct = ~LJ_TTAB; 110 t->nomm = (uint8_t)~0; 111 t->colo = (int8_t)asize; 112 setmref(t->array, (TValue *)((char *)t + sizeof(GCtab))); 113 setgcrefnull(t->metatable); 114 t->asize = asize; 115 t->hmask = 0; 116 nilnode = &G(L)->nilnode; 117 setmref(t->node, nilnode); 118 #if LJ_GC64 119 setmref(t->freetop, nilnode); 120 #endif 121 } else { /* Otherwise separately allocate the array part. */ 122 Node *nilnode; 123 t = lj_mem_newobj(L, GCtab); 124 t->gct = ~LJ_TTAB; 125 t->nomm = (uint8_t)~0; 126 t->colo = 0; 127 setmref(t->array, NULL); 128 setgcrefnull(t->metatable); 129 t->asize = 0; /* In case the array allocation fails. */ 130 t->hmask = 0; 131 nilnode = &G(L)->nilnode; 132 setmref(t->node, nilnode); 133 #if LJ_GC64 134 setmref(t->freetop, nilnode); 135 #endif 136 if (asize > 0) { 137 if (asize > LJ_MAX_ASIZE) 138 lj_err_msg(L, LJ_ERR_TABOV); 139 setmref(t->array, lj_mem_newvec(L, asize, TValue)); 140 t->asize = asize; 141 } 142 } 143 if (hbits) 144 newhpart(L, t, hbits); 145 return t; 146 } 147 148 /* Create a new table. 149 ** 150 ** IMPORTANT NOTE: The API differs from lua_createtable()! 151 ** 152 ** The array size is non-inclusive. E.g. asize=128 creates array slots 153 ** for 0..127, but not for 128. If you need slots 1..128, pass asize=129 154 ** (slot 0 is wasted in this case). 155 ** 156 ** The hash size is given in hash bits. hbits=0 means no hash part. 157 ** hbits=1 creates 2 hash slots, hbits=2 creates 4 hash slots and so on. 158 */ 159 GCtab *lj_tab_new(lua_State *L, uint32_t asize, uint32_t hbits) 160 { 161 GCtab *t = newtab(L, asize, hbits); 162 clearapart(t); 163 if (t->hmask > 0) clearhpart(t); 164 return t; 165 } 166 167 /* The API of this function conforms to lua_createtable(). */ 168 GCtab *lj_tab_new_ah(lua_State *L, int32_t a, int32_t h) 169 { 170 return lj_tab_new(L, (uint32_t)(a > 0 ? a+1 : 0), hsize2hbits(h)); 171 } 172 173 #if LJ_HASJIT 174 GCtab * LJ_FASTCALL lj_tab_new1(lua_State *L, uint32_t ahsize) 175 { 176 GCtab *t; 177 t = newtab(L, ahsize & 0xffffff, ahsize >> 24); 178 clearapart(t); 179 if (t->hmask > 0) clearhpart(t); 180 return t; 181 } 182 #endif 183 184 /* Duplicate a table. */ 185 GCtab * LJ_FASTCALL lj_tab_dup(lua_State *L, const GCtab *kt) 186 { 187 GCtab *t; 188 uint32_t asize, hmask; 189 t = newtab(L, kt->asize, kt->hmask > 0 ? lj_fls(kt->hmask)+1 : 0); 190 lua_assert(kt->asize == t->asize && kt->hmask == t->hmask); 191 t->nomm = 0; /* Keys with metamethod names may be present. */ 192 asize = kt->asize; 193 if (asize > 0) { 194 TValue *array = tvref(t->array); 195 TValue *karray = tvref(kt->array); 196 if (asize < 64) { /* An inlined loop beats memcpy for < 512 bytes. */ 197 uint32_t i; 198 for (i = 0; i < asize; i++) 199 copyTV(L, &array[i], &karray[i]); 200 } else { 201 memcpy(array, karray, asize*sizeof(TValue)); 202 } 203 } 204 hmask = kt->hmask; 205 if (hmask > 0) { 206 uint32_t i; 207 Node *node = noderef(t->node); 208 Node *knode = noderef(kt->node); 209 ptrdiff_t d = (char *)node - (char *)knode; 210 setfreetop(t, node, (Node *)((char *)getfreetop(kt, knode) + d)); 211 for (i = 0; i <= hmask; i++) { 212 Node *kn = &knode[i]; 213 Node *n = &node[i]; 214 Node *next = nextnode(kn); 215 /* Don't use copyTV here, since it asserts on a copy of a dead key. */ 216 n->val = kn->val; n->key = kn->key; 217 setmref(n->next, next == NULL? next : (Node *)((char *)next + d)); 218 } 219 } 220 return t; 221 } 222 223 /* Clear a table. */ 224 void LJ_FASTCALL lj_tab_clear(GCtab *t) 225 { 226 clearapart(t); 227 if (t->hmask > 0) { 228 Node *node = noderef(t->node); 229 setfreetop(t, node, &node[t->hmask+1]); 230 clearhpart(t); 231 } 232 } 233 234 /* Free a table. */ 235 void LJ_FASTCALL lj_tab_free(global_State *g, GCtab *t) 236 { 237 if (t->hmask > 0) 238 lj_mem_freevec(g, noderef(t->node), t->hmask+1, Node); 239 if (t->asize > 0 && LJ_MAX_COLOSIZE != 0 && t->colo <= 0) 240 lj_mem_freevec(g, tvref(t->array), t->asize, TValue); 241 if (LJ_MAX_COLOSIZE != 0 && t->colo) 242 lj_mem_free(g, t, sizetabcolo((uint32_t)t->colo & 0x7f)); 243 else 244 lj_mem_freet(g, t); 245 } 246 247 /* -- Table resizing ------------------------------------------------------ */ 248 249 /* Resize a table to fit the new array/hash part sizes. */ 250 void lj_tab_resize(lua_State *L, GCtab *t, uint32_t asize, uint32_t hbits) 251 { 252 Node *oldnode = noderef(t->node); 253 uint32_t oldasize = t->asize; 254 uint32_t oldhmask = t->hmask; 255 if (asize > oldasize) { /* Array part grows? */ 256 TValue *array; 257 uint32_t i; 258 if (asize > LJ_MAX_ASIZE) 259 lj_err_msg(L, LJ_ERR_TABOV); 260 if (LJ_MAX_COLOSIZE != 0 && t->colo > 0) { 261 /* A colocated array must be separated and copied. */ 262 TValue *oarray = tvref(t->array); 263 array = lj_mem_newvec(L, asize, TValue); 264 t->colo = (int8_t)(t->colo | 0x80); /* Mark as separated (colo < 0). */ 265 for (i = 0; i < oldasize; i++) 266 copyTV(L, &array[i], &oarray[i]); 267 } else { 268 array = (TValue *)lj_mem_realloc(L, tvref(t->array), 269 oldasize*sizeof(TValue), asize*sizeof(TValue)); 270 } 271 setmref(t->array, array); 272 t->asize = asize; 273 for (i = oldasize; i < asize; i++) /* Clear newly allocated slots. */ 274 setnilV(&array[i]); 275 } 276 /* Create new (empty) hash part. */ 277 if (hbits) { 278 newhpart(L, t, hbits); 279 clearhpart(t); 280 } else { 281 global_State *g = G(L); 282 setmref(t->node, &g->nilnode); 283 #if LJ_GC64 284 setmref(t->freetop, &g->nilnode); 285 #endif 286 t->hmask = 0; 287 } 288 if (asize < oldasize) { /* Array part shrinks? */ 289 TValue *array = tvref(t->array); 290 uint32_t i; 291 t->asize = asize; /* Note: This 'shrinks' even colocated arrays. */ 292 for (i = asize; i < oldasize; i++) /* Reinsert old array values. */ 293 if (!tvisnil(&array[i])) 294 copyTV(L, lj_tab_setinth(L, t, (int32_t)i), &array[i]); 295 /* Physically shrink only separated arrays. */ 296 if (LJ_MAX_COLOSIZE != 0 && t->colo <= 0) 297 setmref(t->array, lj_mem_realloc(L, array, 298 oldasize*sizeof(TValue), asize*sizeof(TValue))); 299 } 300 if (oldhmask > 0) { /* Reinsert pairs from old hash part. */ 301 global_State *g; 302 uint32_t i; 303 for (i = 0; i <= oldhmask; i++) { 304 Node *n = &oldnode[i]; 305 if (!tvisnil(&n->val)) 306 copyTV(L, lj_tab_set(L, t, &n->key), &n->val); 307 } 308 g = G(L); 309 lj_mem_freevec(g, oldnode, oldhmask+1, Node); 310 } 311 } 312 313 static uint32_t countint(cTValue *key, uint32_t *bins) 314 { 315 lua_assert(!tvisint(key)); 316 if (tvisnum(key)) { 317 lua_Number nk = numV(key); 318 int32_t k = lj_num2int(nk); 319 if ((uint32_t)k < LJ_MAX_ASIZE && nk == (lua_Number)k) { 320 bins[(k > 2 ? lj_fls((uint32_t)(k-1)) : 0)]++; 321 return 1; 322 } 323 } 324 return 0; 325 } 326 327 static uint32_t countarray(const GCtab *t, uint32_t *bins) 328 { 329 uint32_t na, b, i; 330 if (t->asize == 0) return 0; 331 for (na = i = b = 0; b < LJ_MAX_ABITS; b++) { 332 uint32_t n, top = 2u << b; 333 TValue *array; 334 if (top >= t->asize) { 335 top = t->asize-1; 336 if (i > top) 337 break; 338 } 339 array = tvref(t->array); 340 for (n = 0; i <= top; i++) 341 if (!tvisnil(&array[i])) 342 n++; 343 bins[b] += n; 344 na += n; 345 } 346 return na; 347 } 348 349 static uint32_t counthash(const GCtab *t, uint32_t *bins, uint32_t *narray) 350 { 351 uint32_t total, na, i, hmask = t->hmask; 352 Node *node = noderef(t->node); 353 for (total = na = 0, i = 0; i <= hmask; i++) { 354 Node *n = &node[i]; 355 if (!tvisnil(&n->val)) { 356 na += countint(&n->key, bins); 357 total++; 358 } 359 } 360 *narray += na; 361 return total; 362 } 363 364 static uint32_t bestasize(uint32_t bins[], uint32_t *narray) 365 { 366 uint32_t b, sum, na = 0, sz = 0, nn = *narray; 367 for (b = 0, sum = 0; 2*nn > (1u<<b) && sum != nn; b++) 368 if (bins[b] > 0 && 2*(sum += bins[b]) > (1u<<b)) { 369 sz = (2u<<b)+1; 370 na = sum; 371 } 372 *narray = sz; 373 return na; 374 } 375 376 static void rehashtab(lua_State *L, GCtab *t, cTValue *ek) 377 { 378 uint32_t bins[LJ_MAX_ABITS]; 379 uint32_t total, asize, na, i; 380 for (i = 0; i < LJ_MAX_ABITS; i++) bins[i] = 0; 381 asize = countarray(t, bins); 382 total = 1 + asize; 383 total += counthash(t, bins, &asize); 384 asize += countint(ek, bins); 385 na = bestasize(bins, &asize); 386 total -= na; 387 lj_tab_resize(L, t, asize, hsize2hbits(total)); 388 } 389 390 #if LJ_HASFFI 391 void lj_tab_rehash(lua_State *L, GCtab *t) 392 { 393 rehashtab(L, t, niltv(L)); 394 } 395 #endif 396 397 void lj_tab_reasize(lua_State *L, GCtab *t, uint32_t nasize) 398 { 399 lj_tab_resize(L, t, nasize+1, t->hmask > 0 ? lj_fls(t->hmask)+1 : 0); 400 } 401 402 /* -- Table getters ------------------------------------------------------- */ 403 404 cTValue * LJ_FASTCALL lj_tab_getinth(GCtab *t, int32_t key) 405 { 406 TValue k; 407 Node *n; 408 k.n = (lua_Number)key; 409 n = hashnum(t, &k); 410 do { 411 if (tvisnum(&n->key) && n->key.n == k.n) 412 return &n->val; 413 } while ((n = nextnode(n))); 414 return NULL; 415 } 416 417 cTValue *lj_tab_getstr(GCtab *t, GCstr *key) 418 { 419 Node *n = hashstr(t, key); 420 do { 421 if (tvisstr(&n->key) && strV(&n->key) == key) 422 return &n->val; 423 } while ((n = nextnode(n))); 424 return NULL; 425 } 426 427 cTValue *lj_tab_get(lua_State *L, GCtab *t, cTValue *key) 428 { 429 if (tvisstr(key)) { 430 cTValue *tv = lj_tab_getstr(t, strV(key)); 431 if (tv) 432 return tv; 433 } else if (tvisint(key)) { 434 cTValue *tv = lj_tab_getint(t, intV(key)); 435 if (tv) 436 return tv; 437 } else if (tvisnum(key)) { 438 lua_Number nk = numV(key); 439 int32_t k = lj_num2int(nk); 440 if (nk == (lua_Number)k) { 441 cTValue *tv = lj_tab_getint(t, k); 442 if (tv) 443 return tv; 444 } else { 445 goto genlookup; /* Else use the generic lookup. */ 446 } 447 } else if (!tvisnil(key)) { 448 Node *n; 449 genlookup: 450 n = hashkey(t, key); 451 do { 452 if (lj_obj_equal(&n->key, key)) 453 return &n->val; 454 } while ((n = nextnode(n))); 455 } 456 return niltv(L); 457 } 458 459 /* -- Table setters ------------------------------------------------------- */ 460 461 /* Insert new key. Use Brent's variation to optimize the chain length. */ 462 TValue *lj_tab_newkey(lua_State *L, GCtab *t, cTValue *key) 463 { 464 Node *n = hashkey(t, key); 465 if (!tvisnil(&n->val) || t->hmask == 0) { 466 Node *nodebase = noderef(t->node); 467 Node *collide, *freenode = getfreetop(t, nodebase); 468 lua_assert(freenode >= nodebase && freenode <= nodebase+t->hmask+1); 469 do { 470 if (freenode == nodebase) { /* No free node found? */ 471 rehashtab(L, t, key); /* Rehash table. */ 472 return lj_tab_set(L, t, key); /* Retry key insertion. */ 473 } 474 } while (!tvisnil(&(--freenode)->key)); 475 setfreetop(t, nodebase, freenode); 476 lua_assert(freenode != &G(L)->nilnode); 477 collide = hashkey(t, &n->key); 478 if (collide != n) { /* Colliding node not the main node? */ 479 while (noderef(collide->next) != n) /* Find predecessor. */ 480 collide = nextnode(collide); 481 setmref(collide->next, freenode); /* Relink chain. */ 482 /* Copy colliding node into free node and free main node. */ 483 freenode->val = n->val; 484 freenode->key = n->key; 485 freenode->next = n->next; 486 setmref(n->next, NULL); 487 setnilV(&n->val); 488 /* Rechain pseudo-resurrected string keys with colliding hashes. */ 489 while (nextnode(freenode)) { 490 Node *nn = nextnode(freenode); 491 if (tvisstr(&nn->key) && !tvisnil(&nn->val) && 492 hashstr(t, strV(&nn->key)) == n) { 493 freenode->next = nn->next; 494 nn->next = n->next; 495 setmref(n->next, nn); 496 } else { 497 freenode = nn; 498 } 499 } 500 } else { /* Otherwise use free node. */ 501 setmrefr(freenode->next, n->next); /* Insert into chain. */ 502 setmref(n->next, freenode); 503 n = freenode; 504 } 505 } 506 n->key.u64 = key->u64; 507 if (LJ_UNLIKELY(tvismzero(&n->key))) 508 n->key.u64 = 0; 509 lj_gc_anybarriert(L, t); 510 lua_assert(tvisnil(&n->val)); 511 return &n->val; 512 } 513 514 TValue *lj_tab_setinth(lua_State *L, GCtab *t, int32_t key) 515 { 516 TValue k; 517 Node *n; 518 k.n = (lua_Number)key; 519 n = hashnum(t, &k); 520 do { 521 if (tvisnum(&n->key) && n->key.n == k.n) 522 return &n->val; 523 } while ((n = nextnode(n))); 524 return lj_tab_newkey(L, t, &k); 525 } 526 527 TValue *lj_tab_setstr(lua_State *L, GCtab *t, GCstr *key) 528 { 529 TValue k; 530 Node *n = hashstr(t, key); 531 do { 532 if (tvisstr(&n->key) && strV(&n->key) == key) 533 return &n->val; 534 } while ((n = nextnode(n))); 535 setstrV(L, &k, key); 536 return lj_tab_newkey(L, t, &k); 537 } 538 539 TValue *lj_tab_set(lua_State *L, GCtab *t, cTValue *key) 540 { 541 Node *n; 542 t->nomm = 0; /* Invalidate negative metamethod cache. */ 543 if (tvisstr(key)) { 544 return lj_tab_setstr(L, t, strV(key)); 545 } else if (tvisint(key)) { 546 return lj_tab_setint(L, t, intV(key)); 547 } else if (tvisnum(key)) { 548 lua_Number nk = numV(key); 549 int32_t k = lj_num2int(nk); 550 if (nk == (lua_Number)k) 551 return lj_tab_setint(L, t, k); 552 if (tvisnan(key)) 553 lj_err_msg(L, LJ_ERR_NANIDX); 554 /* Else use the generic lookup. */ 555 } else if (tvisnil(key)) { 556 lj_err_msg(L, LJ_ERR_NILIDX); 557 } 558 n = hashkey(t, key); 559 do { 560 if (lj_obj_equal(&n->key, key)) 561 return &n->val; 562 } while ((n = nextnode(n))); 563 return lj_tab_newkey(L, t, key); 564 } 565 566 /* -- Table traversal ----------------------------------------------------- */ 567 568 /* Get the traversal index of a key. */ 569 static uint32_t keyindex(lua_State *L, GCtab *t, cTValue *key) 570 { 571 TValue tmp; 572 if (tvisint(key)) { 573 int32_t k = intV(key); 574 if ((uint32_t)k < t->asize) 575 return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */ 576 setnumV(&tmp, (lua_Number)k); 577 key = &tmp; 578 } else if (tvisnum(key)) { 579 lua_Number nk = numV(key); 580 int32_t k = lj_num2int(nk); 581 if ((uint32_t)k < t->asize && nk == (lua_Number)k) 582 return (uint32_t)k; /* Array key indexes: [0..t->asize-1] */ 583 } 584 if (!tvisnil(key)) { 585 Node *n = hashkey(t, key); 586 do { 587 if (lj_obj_equal(&n->key, key)) 588 return t->asize + (uint32_t)(n - noderef(t->node)); 589 /* Hash key indexes: [t->asize..t->asize+t->nmask] */ 590 } while ((n = nextnode(n))); 591 if (key->u32.hi == 0xfffe7fff) /* ITERN was despecialized while running. */ 592 return key->u32.lo - 1; 593 lj_err_msg(L, LJ_ERR_NEXTIDX); 594 return 0; /* unreachable */ 595 } 596 return ~0u; /* A nil key starts the traversal. */ 597 } 598 599 /* Advance to the next step in a table traversal. */ 600 int lj_tab_next(lua_State *L, GCtab *t, TValue *key) 601 { 602 uint32_t i = keyindex(L, t, key); /* Find predecessor key index. */ 603 for (i++; i < t->asize; i++) /* First traverse the array keys. */ 604 if (!tvisnil(arrayslot(t, i))) { 605 setintV(key, i); 606 copyTV(L, key+1, arrayslot(t, i)); 607 return 1; 608 } 609 for (i -= t->asize; i <= t->hmask; i++) { /* Then traverse the hash keys. */ 610 Node *n = &noderef(t->node)[i]; 611 if (!tvisnil(&n->val)) { 612 copyTV(L, key, &n->key); 613 copyTV(L, key+1, &n->val); 614 return 1; 615 } 616 } 617 return 0; /* End of traversal. */ 618 } 619 620 /* -- Table length calculation -------------------------------------------- */ 621 622 static MSize unbound_search(GCtab *t, MSize j) 623 { 624 cTValue *tv; 625 MSize i = j; /* i is zero or a present index */ 626 j++; 627 /* find `i' and `j' such that i is present and j is not */ 628 while ((tv = lj_tab_getint(t, (int32_t)j)) && !tvisnil(tv)) { 629 i = j; 630 j *= 2; 631 if (j > (MSize)(INT_MAX-2)) { /* overflow? */ 632 /* table was built with bad purposes: resort to linear search */ 633 i = 1; 634 while ((tv = lj_tab_getint(t, (int32_t)i)) && !tvisnil(tv)) i++; 635 return i - 1; 636 } 637 } 638 /* now do a binary search between them */ 639 while (j - i > 1) { 640 MSize m = (i+j)/2; 641 cTValue *tvb = lj_tab_getint(t, (int32_t)m); 642 if (tvb && !tvisnil(tvb)) i = m; else j = m; 643 } 644 return i; 645 } 646 647 /* 648 ** Try to find a boundary in table `t'. A `boundary' is an integer index 649 ** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil). 650 */ 651 MSize LJ_FASTCALL lj_tab_len(GCtab *t) 652 { 653 MSize j = (MSize)t->asize; 654 if (j > 1 && tvisnil(arrayslot(t, j-1))) { 655 MSize i = 1; 656 while (j - i > 1) { 657 MSize m = (i+j)/2; 658 if (tvisnil(arrayslot(t, m-1))) j = m; else i = m; 659 } 660 return i-1; 661 } 662 if (j) j--; 663 if (t->hmask <= 0) 664 return j; 665 return unbound_search(t, j); 666 } 667