capnproto

FORK: Cap'n Proto serialization/RPC system - core tools and C++ library
git clone https://git.neptards.moe/neptards/capnproto.git
Log | Files | Refs | README | LICENSE

mutex-test.c++ (26558B)


      1 // Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors
      2 // Licensed under the MIT License:
      3 //
      4 // Permission is hereby granted, free of charge, to any person obtaining a copy
      5 // of this software and associated documentation files (the "Software"), to deal
      6 // in the Software without restriction, including without limitation the rights
      7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      8 // copies of the Software, and to permit persons to whom the Software is
      9 // furnished to do so, subject to the following conditions:
     10 //
     11 // The above copyright notice and this permission notice shall be included in
     12 // all copies or substantial portions of the Software.
     13 //
     14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     20 // THE SOFTWARE.
     21 
     22 #if _WIN32
     23 #include "win32-api-version.h"
     24 #define NOGDI  // NOGDI is needed to make EXPECT_EQ(123u, *lock) compile for some reason
     25 #endif
     26 
     27 #include "time.h"
     28 
     29 #define KJ_MUTEX_TEST 1
     30 
     31 #include "mutex.h"
     32 #include "debug.h"
     33 #include "thread.h"
     34 #include <kj/compat/gtest.h>
     35 #include <stdlib.h>
     36 
     37 #if _WIN32
     38 #include <windows.h>
     39 #undef NOGDI
     40 #else
     41 #include <pthread.h>
     42 #include <unistd.h>
     43 #endif
     44 
     45 #ifdef KJ_CONTENTION_WARNING_THRESHOLD
     46 #include <vector>
     47 #endif
     48 
     49 #if KJ_TRACK_LOCK_BLOCKING
     50 #include <syscall.h>
     51 #include <signal.h>
     52 #include <time.h>
     53 #include <atomic>
     54 #endif
     55 
     56 namespace kj {
     57 namespace {
     58 
     59 #if _WIN32
     60 inline void delay() { Sleep(10); }
     61 #else
     62 inline void delay() { usleep(10000); }
     63 #endif
     64 
     65 TEST(Mutex, MutexGuarded) {
     66   MutexGuarded<uint> value(123);
     67 
     68   {
     69     Locked<uint> lock = value.lockExclusive();
     70     EXPECT_EQ(123u, *lock);
     71     EXPECT_EQ(123u, value.getAlreadyLockedExclusive());
     72 
     73 #if KJ_USE_FUTEX
     74     auto timeout = MILLISECONDS * 50;
     75 
     76     auto startTime = systemPreciseMonotonicClock().now();
     77     EXPECT_TRUE(value.lockExclusiveWithTimeout(timeout) == nullptr);
     78     auto duration = startTime - systemPreciseMonotonicClock().now();
     79     EXPECT_TRUE(duration < timeout);
     80 
     81     startTime = systemPreciseMonotonicClock().now();
     82     EXPECT_TRUE(value.lockSharedWithTimeout(timeout) == nullptr);
     83     duration = startTime - systemPreciseMonotonicClock().now();
     84     EXPECT_TRUE(duration < timeout);
     85 
     86     // originally, upon timing out, the exclusive requested flag would be removed
     87     // from the futex state. if we did remove the exclusive request flag this test
     88     // would hang.
     89     Thread lockTimeoutThread([&]() {
     90       // try to timeout during 10 ms delay
     91       Maybe<Locked<uint>> maybeLock = value.lockExclusiveWithTimeout(MILLISECONDS * 8);
     92       EXPECT_TRUE(maybeLock == nullptr);
     93     });
     94 #endif
     95 
     96     Thread thread([&]() {
     97       Locked<uint> threadLock = value.lockExclusive();
     98       EXPECT_EQ(456u, *threadLock);
     99       *threadLock = 789;
    100     });
    101 
    102     delay();
    103     EXPECT_EQ(123u, *lock);
    104     *lock = 456;
    105     auto earlyRelease = kj::mv(lock);
    106   }
    107 
    108 #if KJ_USE_FUTEX
    109   EXPECT_EQ(789u, *KJ_ASSERT_NONNULL(value.lockExclusiveWithTimeout(MILLISECONDS * 50)));
    110   EXPECT_EQ(789u, *KJ_ASSERT_NONNULL(value.lockSharedWithTimeout(MILLISECONDS * 50)));
    111 #endif
    112 
    113   EXPECT_EQ(789u, *value.lockExclusive());
    114 
    115   {
    116     auto rlock1 = value.lockShared();
    117     EXPECT_EQ(789u, *rlock1);
    118     EXPECT_EQ(789u, value.getAlreadyLockedShared());
    119 
    120     {
    121       auto rlock2 = value.lockShared();
    122       EXPECT_EQ(789u, *rlock2);
    123       auto rlock3 = value.lockShared();
    124       EXPECT_EQ(789u, *rlock3);
    125       auto rlock4 = value.lockShared();
    126       EXPECT_EQ(789u, *rlock4);
    127     }
    128 
    129     Thread thread2([&]() {
    130       Locked<uint> threadLock = value.lockExclusive();
    131       *threadLock = 321;
    132     });
    133 
    134 #if KJ_USE_FUTEX
    135     // So, it turns out that pthread_rwlock on BSD "prioritizes" readers over writers.  The result
    136     // is that if one thread tries to take multiple read locks, but another thread happens to
    137     // request a write lock it between, you get a deadlock.  This seems to contradict the man pages
    138     // and common sense, but this is how it is.  The futex-based implementation doesn't currently
    139     // have this problem because it does not prioritize writers.  Perhaps it will in the future,
    140     // but we'll leave this test here until then to make sure we notice the change.
    141 
    142     delay();
    143     EXPECT_EQ(789u, *rlock1);
    144 
    145     {
    146       auto rlock2 = value.lockShared();
    147       EXPECT_EQ(789u, *rlock2);
    148       auto rlock3 = value.lockShared();
    149       EXPECT_EQ(789u, *rlock3);
    150       auto rlock4 = value.lockShared();
    151       EXPECT_EQ(789u, *rlock4);
    152     }
    153 #endif
    154 
    155     delay();
    156     EXPECT_EQ(789u, *rlock1);
    157     auto earlyRelease = kj::mv(rlock1);
    158   }
    159 
    160   EXPECT_EQ(321u, *value.lockExclusive());
    161 
    162 #if !_WIN32 && !__CYGWIN__  // Not checked on win32.
    163   EXPECT_DEBUG_ANY_THROW(value.getAlreadyLockedExclusive());
    164   EXPECT_DEBUG_ANY_THROW(value.getAlreadyLockedShared());
    165 #endif
    166   EXPECT_EQ(321u, value.getWithoutLock());
    167 }
    168 
    169 TEST(Mutex, When) {
    170   MutexGuarded<uint> value(123);
    171 
    172   {
    173     uint m = value.when([](uint n) { return n < 200; }, [](uint& n) {
    174       ++n;
    175       return n + 2;
    176     });
    177     KJ_EXPECT(m == 126);
    178 
    179     KJ_EXPECT(*value.lockShared() == 124);
    180   }
    181 
    182   {
    183     kj::Thread thread([&]() {
    184       delay();
    185       *value.lockExclusive() = 321;
    186     });
    187 
    188     uint m = value.when([](uint n) { return n > 200; }, [](uint& n) {
    189       ++n;
    190       return n + 2;
    191     });
    192     KJ_EXPECT(m == 324);
    193 
    194     KJ_EXPECT(*value.lockShared() == 322);
    195   }
    196 
    197   {
    198     // Stress test. 100 threads each wait for a value and then set the next value.
    199     *value.lockExclusive() = 0;
    200 
    201     auto threads = kj::heapArrayBuilder<kj::Own<kj::Thread>>(100);
    202     for (auto i: kj::zeroTo(100)) {
    203       threads.add(kj::heap<kj::Thread>([i,&value]() {
    204         if (i % 2 == 0) delay();
    205         uint m = value.when([i](const uint& n) { return n == i; },
    206             [](uint& n) { return n++; });
    207         KJ_ASSERT(m == i);
    208       }));
    209     }
    210 
    211     uint m = value.when([](uint n) { return n == 100; }, [](uint& n) {
    212       return n++;
    213     });
    214     KJ_EXPECT(m == 100);
    215 
    216     KJ_EXPECT(*value.lockShared() == 101);
    217   }
    218 
    219 #if !KJ_NO_EXCEPTIONS
    220   {
    221     // Throw from predicate.
    222     KJ_EXPECT_THROW_MESSAGE("oops threw", value.when([](uint n) -> bool {
    223       KJ_FAIL_ASSERT("oops threw");
    224     }, [](uint& n) {
    225       KJ_FAIL_EXPECT("shouldn't get here");
    226     }));
    227 
    228     // Throw from predicate later on.
    229     kj::Thread thread([&]() {
    230       delay();
    231       *value.lockExclusive() = 321;
    232     });
    233 
    234     KJ_EXPECT_THROW_MESSAGE("oops threw", value.when([](uint n) -> bool {
    235       KJ_ASSERT(n != 321, "oops threw");
    236       return false;
    237     }, [](uint& n) {
    238       KJ_FAIL_EXPECT("shouldn't get here");
    239     }));
    240   }
    241 
    242   {
    243     // Verify the exceptions didn't break the mutex.
    244     uint m = value.when([](uint n) { return n > 0; }, [](uint& n) {
    245       return n;
    246     });
    247     KJ_EXPECT(m == 321);
    248 
    249     kj::Thread thread([&]() {
    250       delay();
    251       *value.lockExclusive() = 654;
    252     });
    253 
    254     m = value.when([](uint n) { return n > 500; }, [](uint& n) {
    255       return n;
    256     });
    257     KJ_EXPECT(m == 654);
    258   }
    259 #endif
    260 }
    261 
    262 TEST(Mutex, WhenWithTimeout) {
    263   auto& clock = systemPreciseMonotonicClock();
    264   MutexGuarded<uint> value(123);
    265 
    266   // A timeout that won't expire.
    267   static constexpr Duration LONG_TIMEOUT = 10 * kj::SECONDS;
    268 
    269   {
    270     uint m = value.when([](uint n) { return n < 200; }, [](uint& n) {
    271       ++n;
    272       return n + 2;
    273     }, LONG_TIMEOUT);
    274     KJ_EXPECT(m == 126);
    275 
    276     KJ_EXPECT(*value.lockShared() == 124);
    277   }
    278 
    279   {
    280     kj::Thread thread([&]() {
    281       delay();
    282       *value.lockExclusive() = 321;
    283     });
    284 
    285     uint m = value.when([](uint n) { return n > 200; }, [](uint& n) {
    286       ++n;
    287       return n + 2;
    288     }, LONG_TIMEOUT);
    289     KJ_EXPECT(m == 324);
    290 
    291     KJ_EXPECT(*value.lockShared() == 322);
    292   }
    293 
    294   {
    295     // Stress test. 100 threads each wait for a value and then set the next value.
    296     *value.lockExclusive() = 0;
    297 
    298     auto threads = kj::heapArrayBuilder<kj::Own<kj::Thread>>(100);
    299     for (auto i: kj::zeroTo(100)) {
    300       threads.add(kj::heap<kj::Thread>([i,&value]() {
    301         if (i % 2 == 0) delay();
    302         uint m = value.when([i](const uint& n) { return n == i; },
    303             [](uint& n) { return n++; }, LONG_TIMEOUT);
    304         KJ_ASSERT(m == i);
    305       }));
    306     }
    307 
    308     uint m = value.when([](uint n) { return n == 100; }, [](uint& n) {
    309       return n++;
    310     }, LONG_TIMEOUT);
    311     KJ_EXPECT(m == 100);
    312 
    313     KJ_EXPECT(*value.lockShared() == 101);
    314   }
    315 
    316   {
    317     auto start = clock.now();
    318     uint m = value.when([](uint n) { return n == 0; }, [&](uint& n) {
    319       KJ_ASSERT(n == 101);
    320       auto t = clock.now() - start;
    321       KJ_EXPECT(t >= 10 * kj::MILLISECONDS, t);
    322       return 12;
    323     }, 10 * kj::MILLISECONDS);
    324     KJ_EXPECT(m == 12);
    325 
    326     m = value.when([](uint n) { return n == 0; }, [&](uint& n) {
    327       KJ_ASSERT(n == 101);
    328       auto t = clock.now() - start;
    329       KJ_EXPECT(t >= 20 * kj::MILLISECONDS, t);
    330       return 34;
    331     }, 10 * kj::MILLISECONDS);
    332     KJ_EXPECT(m == 34);
    333 
    334     m = value.when([](uint n) { return n > 0; }, [&](uint& n) {
    335       KJ_ASSERT(n == 101);
    336       return 56;
    337     }, LONG_TIMEOUT);
    338     KJ_EXPECT(m == 56);
    339   }
    340 
    341 #if !KJ_NO_EXCEPTIONS
    342   {
    343     // Throw from predicate.
    344     KJ_EXPECT_THROW_MESSAGE("oops threw", value.when([](uint n) -> bool {
    345       KJ_FAIL_ASSERT("oops threw");
    346     }, [](uint& n) {
    347       KJ_FAIL_EXPECT("shouldn't get here");
    348     }, LONG_TIMEOUT));
    349 
    350     // Throw from predicate later on.
    351     kj::Thread thread([&]() {
    352       delay();
    353       *value.lockExclusive() = 321;
    354     });
    355 
    356     KJ_EXPECT_THROW_MESSAGE("oops threw", value.when([](uint n) -> bool {
    357       KJ_ASSERT(n != 321, "oops threw");
    358       return false;
    359     }, [](uint& n) {
    360       KJ_FAIL_EXPECT("shouldn't get here");
    361     }, LONG_TIMEOUT));
    362   }
    363 
    364   {
    365     // Verify the exceptions didn't break the mutex.
    366     uint m = value.when([](uint n) { return n > 0; }, [](uint& n) {
    367       return n;
    368     }, LONG_TIMEOUT);
    369     KJ_EXPECT(m == 321);
    370 
    371     auto start = clock.now();
    372     m = value.when([](uint n) { return n == 0; }, [&](uint& n) {
    373       KJ_EXPECT(clock.now() - start >= 10 * kj::MILLISECONDS);
    374       return n + 1;
    375     }, 10 * kj::MILLISECONDS);
    376     KJ_EXPECT(m == 322);
    377 
    378     kj::Thread thread([&]() {
    379       delay();
    380       *value.lockExclusive() = 654;
    381     });
    382 
    383     m = value.when([](uint n) { return n > 500; }, [](uint& n) {
    384       return n;
    385     }, LONG_TIMEOUT);
    386     KJ_EXPECT(m == 654);
    387   }
    388 #endif
    389 }
    390 
    391 TEST(Mutex, WhenWithTimeoutPreciseTiming) {
    392   // Test that MutexGuarded::when() with a timeout sleeps for precisely the right amount of time.
    393 
    394   auto& clock = systemPreciseMonotonicClock();
    395 
    396   for (uint retryCount = 0; retryCount < 20; retryCount++) {
    397     MutexGuarded<uint> value(123);
    398 
    399     auto start = clock.now();
    400     uint m = value.when([&value](uint n) {
    401       // HACK: Reset the value as a way of testing what happens when the waiting thread is woken
    402       //   up but then finds it's not ready yet.
    403       value.getWithoutLock() = 123;
    404       return n == 321;
    405     }, [](uint& n) {
    406       return 456;
    407     }, 100 * kj::MILLISECONDS);
    408 
    409     KJ_EXPECT(m == 456);
    410 
    411     auto t = clock.now() - start;
    412     KJ_EXPECT(t >= 100 * kj::MILLISECONDS);
    413     // Provide a large margin of error here because some operating systems (e.g. Windows) can have
    414     // long timeslices (13ms) and won't schedule more precisely than a timeslice.
    415     if (t <= 120 * kj::MILLISECONDS) {
    416       return;
    417     }
    418   }
    419   KJ_FAIL_ASSERT("time not within expected bounds even after retries");
    420 }
    421 
    422 TEST(Mutex, WhenWithTimeoutPreciseTimingAfterInterrupt) {
    423   // Test that MutexGuarded::when() with a timeout sleeps for precisely the right amount of time,
    424   // even if the thread is spuriously woken in the middle.
    425 
    426   auto& clock = systemPreciseMonotonicClock();
    427 
    428   for (uint retryCount = 0; retryCount < 20; retryCount++) {
    429     MutexGuarded<uint> value(123);
    430 
    431     kj::Thread thread([&]() {
    432       delay();
    433       value.lockExclusive().induceSpuriousWakeupForTest();
    434     });
    435 
    436     auto start = clock.now();
    437     uint m = value.when([](uint n) {
    438       return n == 321;
    439     }, [](uint& n) {
    440       return 456;
    441     }, 100 * kj::MILLISECONDS);
    442 
    443     KJ_EXPECT(m == 456);
    444 
    445     auto t = clock.now() - start;
    446     KJ_EXPECT(t >= 100 * kj::MILLISECONDS, t / kj::MILLISECONDS);
    447     // Provide a large margin of error here because some operating systems (e.g. Windows) can have
    448     // long timeslices (13ms) and won't schedule more precisely than a timeslice.
    449     if (t <= 120 * kj::MILLISECONDS) {
    450       return;
    451     }
    452   }
    453   KJ_FAIL_ASSERT("time not within expected bounds even after retries");
    454 }
    455 
    456 KJ_TEST("wait()s wake each other") {
    457   MutexGuarded<uint> value(0);
    458 
    459   {
    460     kj::Thread thread([&]() {
    461       auto lock = value.lockExclusive();
    462       ++*lock;
    463       lock.wait([](uint value) { return value == 2; });
    464       ++*lock;
    465       lock.wait([](uint value) { return value == 4; });
    466     });
    467 
    468     {
    469       auto lock = value.lockExclusive();
    470       lock.wait([](uint value) { return value == 1; });
    471       ++*lock;
    472       lock.wait([](uint value) { return value == 3; });
    473       ++*lock;
    474     }
    475   }
    476 }
    477 
    478 TEST(Mutex, Lazy) {
    479   Lazy<uint> lazy;
    480   volatile bool initStarted = false;
    481 
    482   Thread thread([&]() {
    483     EXPECT_EQ(123u, lazy.get([&](SpaceFor<uint>& space) -> Own<uint> {
    484       initStarted = true;
    485       delay();
    486       return space.construct(123);
    487     }));
    488   });
    489 
    490   // Spin until the initializer has been entered in the thread.
    491   while (!initStarted) {
    492 #if _WIN32
    493     Sleep(0);
    494 #else
    495     sched_yield();
    496 #endif
    497   }
    498 
    499   EXPECT_EQ(123u, lazy.get([](SpaceFor<uint>& space) { return space.construct(456); }));
    500   EXPECT_EQ(123u, lazy.get([](SpaceFor<uint>& space) { return space.construct(789); }));
    501 }
    502 
    503 TEST(Mutex, LazyException) {
    504   Lazy<uint> lazy;
    505 
    506   auto exception = kj::runCatchingExceptions([&]() {
    507     lazy.get([&](SpaceFor<uint>& space) -> Own<uint> {
    508           KJ_FAIL_ASSERT("foo") { break; }
    509           return space.construct(123);
    510         });
    511   });
    512   EXPECT_TRUE(exception != nullptr);
    513 
    514   uint i = lazy.get([&](SpaceFor<uint>& space) -> Own<uint> {
    515         return space.construct(456);
    516       });
    517 
    518   // Unfortunately, the results differ depending on whether exceptions are enabled.
    519   // TODO(someday):  Fix this?  Does it matter?
    520 #if KJ_NO_EXCEPTIONS
    521   EXPECT_EQ(123, i);
    522 #else
    523   EXPECT_EQ(456, i);
    524 #endif
    525 }
    526 
    527 class OnlyTouchUnderLock {
    528 public:
    529   OnlyTouchUnderLock(): ptr(nullptr) {}
    530   OnlyTouchUnderLock(MutexGuarded<uint>& ref): ptr(&ref) {
    531     ptr->getAlreadyLockedExclusive()++;
    532   }
    533   OnlyTouchUnderLock(OnlyTouchUnderLock&& other): ptr(other.ptr) {
    534     other.ptr = nullptr;
    535     if (ptr) {
    536       // Just verify it's locked. Don't increment because different compilers may or may not
    537       // elide moves.
    538       ptr->getAlreadyLockedExclusive();
    539     }
    540   }
    541   OnlyTouchUnderLock& operator=(OnlyTouchUnderLock&& other) {
    542     if (ptr) {
    543       ptr->getAlreadyLockedExclusive()++;
    544     }
    545     ptr = other.ptr;
    546     other.ptr = nullptr;
    547     if (ptr) {
    548       // Just verify it's locked. Don't increment because different compilers may or may not
    549       // elide moves.
    550       ptr->getAlreadyLockedExclusive();
    551     }
    552     return *this;
    553   }
    554   ~OnlyTouchUnderLock() noexcept(false) {
    555     if (ptr != nullptr) {
    556       ptr->getAlreadyLockedExclusive()++;
    557     }
    558   }
    559 
    560   void frob() {
    561     ptr->getAlreadyLockedExclusive()++;
    562   }
    563 
    564 private:
    565   MutexGuarded<uint>* ptr;
    566 };
    567 
    568 KJ_TEST("ExternalMutexGuarded<T> destroy after release") {
    569   MutexGuarded<uint> guarded(0);
    570 
    571   {
    572     ExternalMutexGuarded<OnlyTouchUnderLock> ext;
    573 
    574     {
    575       auto lock = guarded.lockExclusive();
    576       ext.set(lock, guarded);
    577       KJ_EXPECT(*lock == 1, *lock);
    578       ext.get(lock).frob();
    579       KJ_EXPECT(*lock == 2, *lock);
    580     }
    581 
    582     {
    583       auto lock = guarded.lockExclusive();
    584       auto released = ext.release(lock);
    585       KJ_EXPECT(*lock == 2, *lock);
    586       released.frob();
    587       KJ_EXPECT(*lock == 3, *lock);
    588     }
    589   }
    590 
    591   {
    592     auto lock = guarded.lockExclusive();
    593     KJ_EXPECT(*lock == 4, *lock);
    594   }
    595 }
    596 
    597 KJ_TEST("ExternalMutexGuarded<T> destroy without release") {
    598   MutexGuarded<uint> guarded(0);
    599 
    600   {
    601     ExternalMutexGuarded<OnlyTouchUnderLock> ext;
    602 
    603     {
    604       auto lock = guarded.lockExclusive();
    605       ext.set(lock, guarded);
    606       KJ_EXPECT(*lock == 1);
    607       ext.get(lock).frob();
    608       KJ_EXPECT(*lock == 2);
    609     }
    610   }
    611 
    612   {
    613     auto lock = guarded.lockExclusive();
    614     KJ_EXPECT(*lock == 3);
    615   }
    616 }
    617 
    618 KJ_TEST("condvar wait with flapping predicate") {
    619   // This used to deadlock under some implementations due to a wait() checking its own predicate
    620   // as part of unlock()ing the mutex. Adding `waiterToSkip` fixed this (and also eliminated a
    621   // redundant call to the predicate).
    622 
    623   MutexGuarded<uint> guarded(0);
    624 
    625   Thread thread([&]() {
    626     delay();
    627     *guarded.lockExclusive() = 1;
    628   });
    629 
    630   {
    631     auto lock = guarded.lockExclusive();
    632     bool flap = true;
    633     lock.wait([&](uint i) {
    634       flap = !flap;
    635       return i == 1 || flap;
    636     });
    637   }
    638 }
    639 
    640 #if KJ_TRACK_LOCK_BLOCKING
    641 #if !__GLIBC_PREREQ(2, 30)
    642 #ifndef SYS_gettid
    643 #error SYS_gettid is unavailable on this system
    644 #endif
    645 
    646 #define gettid() ((pid_t)syscall(SYS_gettid))
    647 #endif
    648 
    649 KJ_TEST("tracking blocking on mutex acquisition") {
    650   // SIGEV_THREAD is supposed to be "private" to the pthreads implementation, but, as
    651   // usual, the higher-level POSIX API that we're supposed to use sucks: the "handler" runs on
    652   // some other thread, which means the stack trace it prints won't be useful.
    653   //
    654   // So, we cheat and work around libc.
    655   MutexGuarded<int> foo(5);
    656   auto lock = foo.lockExclusive();
    657 
    658   struct BlockDetected {
    659     volatile bool blockedOnMutexAcquisition;
    660     SourceLocation blockLocation;
    661   } blockingInfo = {};
    662 
    663   struct sigaction handler;
    664   memset(&handler, 0, sizeof(handler));
    665   handler.sa_sigaction = [](int, siginfo_t* info, void*) {
    666     auto& blockage = *reinterpret_cast<BlockDetected *>(info->si_value.sival_ptr);
    667     KJ_IF_MAYBE(r, blockedReason()) {
    668       KJ_SWITCH_ONEOF(*r) {
    669         KJ_CASE_ONEOF(b, BlockedOnMutexAcquisition) {
    670           blockage.blockedOnMutexAcquisition = true;
    671           blockage.blockLocation = b.origin;
    672         }
    673         KJ_CASE_ONEOF_DEFAULT {}
    674       }
    675     }
    676   };
    677   handler.sa_flags = SA_SIGINFO | SA_RESTART;
    678 
    679   sigaction(SIGINT, &handler, nullptr);
    680 
    681   timer_t timer;
    682   struct sigevent event;
    683   memset(&event, 0, sizeof(event));
    684   event.sigev_notify = SIGEV_THREAD_ID;
    685   event.sigev_signo = SIGINT;
    686   event.sigev_value.sival_ptr = &blockingInfo;
    687   KJ_SYSCALL(event._sigev_un._tid = gettid());
    688   KJ_SYSCALL(timer_create(CLOCK_MONOTONIC, &event, &timer));
    689   KJ_DEFER(timer_delete(timer));
    690 
    691   kj::Duration timeout = 50 * MILLISECONDS;
    692   struct itimerspec spec;
    693   memset(&spec, 0, sizeof(spec));
    694   spec.it_value.tv_sec = timeout / kj::SECONDS;
    695   spec.it_value.tv_nsec = timeout % kj::SECONDS / kj::NANOSECONDS;
    696   // We can't use KJ_SYSCALL() because it is not async-signal-safe.
    697   KJ_REQUIRE(-1 != timer_settime(timer, 0, &spec, nullptr));
    698 
    699   kj::SourceLocation expectedBlockLocation;
    700   KJ_REQUIRE(foo.lockSharedWithTimeout(100 * MILLISECONDS, expectedBlockLocation) == nullptr);
    701 
    702   KJ_EXPECT(blockingInfo.blockedOnMutexAcquisition);
    703   KJ_EXPECT(blockingInfo.blockLocation == expectedBlockLocation);
    704 }
    705 
    706 KJ_TEST("tracking blocked on CondVar::wait") {
    707   // SIGEV_THREAD is supposed to be "private" to the pthreads implementation, but, as
    708   // usual, the higher-level POSIX API that we're supposed to use sucks: the "handler" runs on
    709   // some other thread, which means the stack trace it prints won't be useful.
    710   //
    711   // So, we cheat and work around libc.
    712   MutexGuarded<int> foo(5);
    713   auto lock = foo.lockExclusive();
    714 
    715   struct BlockDetected {
    716     volatile bool blockedOnCondVar;
    717     SourceLocation blockLocation;
    718   } blockingInfo = {};
    719 
    720   struct sigaction handler;
    721   memset(&handler, 0, sizeof(handler));
    722   handler.sa_sigaction = [](int, siginfo_t* info, void*) {
    723     auto& blockage = *reinterpret_cast<BlockDetected *>(info->si_value.sival_ptr);
    724     KJ_IF_MAYBE(r, blockedReason()) {
    725       KJ_SWITCH_ONEOF(*r) {
    726         KJ_CASE_ONEOF(b, BlockedOnCondVarWait) {
    727           blockage.blockedOnCondVar = true;
    728           blockage.blockLocation = b.origin;
    729         }
    730         KJ_CASE_ONEOF_DEFAULT {}
    731       }
    732     }
    733   };
    734   handler.sa_flags = SA_SIGINFO | SA_RESTART;
    735 
    736   sigaction(SIGINT, &handler, nullptr);
    737 
    738   timer_t timer;
    739   struct sigevent event;
    740   memset(&event, 0, sizeof(event));
    741   event.sigev_notify = SIGEV_THREAD_ID;
    742   event.sigev_signo = SIGINT;
    743   event.sigev_value.sival_ptr = &blockingInfo;
    744   KJ_SYSCALL(event._sigev_un._tid = gettid());
    745   KJ_SYSCALL(timer_create(CLOCK_MONOTONIC, &event, &timer));
    746   KJ_DEFER(timer_delete(timer));
    747 
    748   kj::Duration timeout = 50 * MILLISECONDS;
    749   struct itimerspec spec;
    750   memset(&spec, 0, sizeof(spec));
    751   spec.it_value.tv_sec = timeout / kj::SECONDS;
    752   spec.it_value.tv_nsec = timeout % kj::SECONDS / kj::NANOSECONDS;
    753   // We can't use KJ_SYSCALL() because it is not async-signal-safe.
    754   KJ_REQUIRE(-1 != timer_settime(timer, 0, &spec, nullptr));
    755 
    756   SourceLocation waitLocation;
    757 
    758   lock.wait([](const int& value) {
    759     return false;
    760   }, 100 * MILLISECONDS, waitLocation);
    761 
    762   KJ_EXPECT(blockingInfo.blockedOnCondVar);
    763   KJ_EXPECT(blockingInfo.blockLocation == waitLocation);
    764 }
    765 
    766 KJ_TEST("tracking blocked on Once::init") {
    767   // SIGEV_THREAD is supposed to be "private" to the pthreads implementation, but, as
    768   // usual, the higher-level POSIX API that we're supposed to use sucks: the "handler" runs on
    769   // some other thread, which means the stack trace it prints won't be useful.
    770   //
    771   // So, we cheat and work around libc.
    772   struct BlockDetected {
    773     volatile bool blockedOnOnceInit;
    774     SourceLocation blockLocation;
    775   } blockingInfo = {};
    776 
    777   struct sigaction handler;
    778   memset(&handler, 0, sizeof(handler));
    779   handler.sa_sigaction = [](int, siginfo_t* info, void*) {
    780     auto& blockage = *reinterpret_cast<BlockDetected *>(info->si_value.sival_ptr);
    781     KJ_IF_MAYBE(r, blockedReason()) {
    782       KJ_SWITCH_ONEOF(*r) {
    783         KJ_CASE_ONEOF(b, BlockedOnOnceInit) {
    784           blockage.blockedOnOnceInit = true;
    785           blockage.blockLocation = b.origin;
    786         }
    787         KJ_CASE_ONEOF_DEFAULT {}
    788       }
    789     }
    790   };
    791   handler.sa_flags = SA_SIGINFO | SA_RESTART;
    792 
    793   sigaction(SIGINT, &handler, nullptr);
    794 
    795   timer_t timer;
    796   struct sigevent event;
    797   memset(&event, 0, sizeof(event));
    798   event.sigev_notify = SIGEV_THREAD_ID;
    799   event.sigev_signo = SIGINT;
    800   event.sigev_value.sival_ptr = &blockingInfo;
    801   KJ_SYSCALL(event._sigev_un._tid = gettid());
    802   KJ_SYSCALL(timer_create(CLOCK_MONOTONIC, &event, &timer));
    803   KJ_DEFER(timer_delete(timer));
    804 
    805   Lazy<int> once;
    806   MutexGuarded<bool> onceInitializing(false);
    807 
    808   Thread backgroundInit([&] {
    809     once.get([&](SpaceFor<int>& x) {
    810       *onceInitializing.lockExclusive() = true;
    811       usleep(100 * 1000);  // 100 ms
    812       return x.construct(5);
    813     });
    814   });
    815 
    816   kj::Duration timeout = 50 * MILLISECONDS;
    817   struct itimerspec spec;
    818   memset(&spec, 0, sizeof(spec));
    819   spec.it_value.tv_sec = timeout / kj::SECONDS;
    820   spec.it_value.tv_nsec = timeout % kj::SECONDS / kj::NANOSECONDS;
    821   // We can't use KJ_SYSCALL() because it is not async-signal-safe.
    822   KJ_REQUIRE(-1 != timer_settime(timer, 0, &spec, nullptr));
    823 
    824   kj::SourceLocation onceInitializingBlocked;
    825 
    826   onceInitializing.lockExclusive().wait([](const bool& initializing) {
    827     return initializing;
    828   });
    829 
    830   once.get([](SpaceFor<int>& x) {
    831       return x.construct(5);
    832   }, onceInitializingBlocked);
    833 
    834   KJ_EXPECT(blockingInfo.blockedOnOnceInit);
    835   KJ_EXPECT(blockingInfo.blockLocation == onceInitializingBlocked);
    836 }
    837 
    838 #if KJ_SAVE_ACQUIRED_LOCK_INFO
    839 KJ_TEST("get location of exclusive mutex") {
    840   _::Mutex mutex;
    841   kj::SourceLocation lockAcquisition;
    842   mutex.lock(_::Mutex::EXCLUSIVE, nullptr, lockAcquisition);
    843   KJ_DEFER(mutex.unlock(_::Mutex::EXCLUSIVE));
    844 
    845   const auto& lockedInfo = mutex.lockedInfo();
    846   const auto& lockInfo = lockedInfo.get<_::HoldingExclusively>();
    847   EXPECT_EQ(gettid(), lockInfo.threadHoldingLock());
    848   KJ_EXPECT(lockInfo.lockAcquiredAt() == lockAcquisition);
    849 }
    850 
    851 KJ_TEST("get location of shared mutex") {
    852   _::Mutex mutex;
    853   kj::SourceLocation lockLocation;
    854   mutex.lock(_::Mutex::SHARED, nullptr, lockLocation);
    855   KJ_DEFER(mutex.unlock(_::Mutex::SHARED));
    856 
    857   const auto& lockedInfo = mutex.lockedInfo();
    858   const auto& lockInfo = lockedInfo.get<_::HoldingShared>();
    859   KJ_EXPECT(lockInfo.lockAcquiredAt() == lockLocation);
    860 }
    861 #endif
    862 
    863 #endif
    864 
    865 #ifdef KJ_CONTENTION_WARNING_THRESHOLD
    866 KJ_TEST("make sure contended mutex warns") {
    867   class Expectation final: public ExceptionCallback {
    868   public:
    869     Expectation(LogSeverity severity, StringPtr substring) :
    870         severity(severity), substring(substring), seen(false) {}
    871 
    872     void logMessage(LogSeverity severity, const char* file, int line, int contextDepth,
    873                     String&& text) override {
    874       if (!seen && severity == this->severity) {
    875         if (_::hasSubstring(text, substring)) {
    876           // Match. Ignore it.
    877           seen = true;
    878           return;
    879         }
    880       }
    881 
    882       // Pass up the chain.
    883       ExceptionCallback::logMessage(severity, file, line, contextDepth, kj::mv(text));
    884     }
    885 
    886     bool hasSeen() const {
    887       return seen;
    888     }
    889 
    890   private:
    891     LogSeverity severity;
    892     StringPtr substring;
    893     bool seen;
    894     UnwindDetector unwindDetector;
    895   };
    896 
    897   _::Mutex mutex;
    898   LockSourceLocation exclusiveLockLocation;
    899   mutex.lock(_::Mutex::EXCLUSIVE, nullptr, exclusiveLockLocation);
    900 
    901   bool seenContendedLockLog = false;
    902 
    903   auto threads = kj::heapArrayBuilder<kj::Own<kj::Thread>>(KJ_CONTENTION_WARNING_THRESHOLD);
    904   for (auto i: kj::zeroTo(KJ_CONTENTION_WARNING_THRESHOLD)) {
    905     (void)i;
    906     threads.add(kj::heap<kj::Thread>([&mutex, &seenContendedLockLog]() {
    907       Expectation expectation(LogSeverity::WARNING, "Acquired contended lock");
    908       LockSourceLocation sharedLockLocation;
    909       mutex.lock(_::Mutex::SHARED, nullptr, sharedLockLocation);
    910       seenContendedLockLog = seenContendedLockLog || expectation.hasSeen();
    911       mutex.unlock(_::Mutex::SHARED);
    912     }));
    913   }
    914 
    915   while (mutex.numReadersWaitingForTest() < KJ_CONTENTION_WARNING_THRESHOLD) {
    916     usleep(5 * kj::MILLISECONDS / kj::MICROSECONDS);
    917   }
    918 
    919   {
    920     KJ_EXPECT_LOG(WARNING, "excessively many readers were waiting on this lock");
    921     mutex.unlock(_::Mutex::EXCLUSIVE);
    922   }
    923 
    924   threads.clear();
    925 
    926   KJ_ASSERT(seenContendedLockLog);
    927 }
    928 #endif
    929 }  // namespace
    930 }  // namespace kj