capnproto

FORK: Cap'n Proto serialization/RPC system - core tools and C++ library
git clone https://git.neptards.moe/neptards/capnproto.git
Log | Files | Refs | README | LICENSE

arena.c++ (14452B)


      1 // Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors
      2 // Licensed under the MIT License:
      3 //
      4 // Permission is hereby granted, free of charge, to any person obtaining a copy
      5 // of this software and associated documentation files (the "Software"), to deal
      6 // in the Software without restriction, including without limitation the rights
      7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      8 // copies of the Software, and to permit persons to whom the Software is
      9 // furnished to do so, subject to the following conditions:
     10 //
     11 // The above copyright notice and this permission notice shall be included in
     12 // all copies or substantial portions of the Software.
     13 //
     14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     20 // THE SOFTWARE.
     21 
     22 #define CAPNP_PRIVATE
     23 #include "arena.h"
     24 #include "message.h"
     25 #include <kj/debug.h>
     26 #include <kj/refcount.h>
     27 #include <vector>
     28 #include <string.h>
     29 #include <stdio.h>
     30 #include <stdlib.h>
     31 
     32 #if !CAPNP_LITE
     33 #include "capability.h"
     34 #endif  // !CAPNP_LITE
     35 
     36 namespace capnp {
     37 namespace _ {  // private
     38 
     39 Arena::~Arena() noexcept(false) {}
     40 
     41 void ReadLimiter::unread(WordCount64 amount) {
     42   // Be careful not to overflow here.  Since ReadLimiter has no thread-safety, it's possible that
     43   // the limit value was not updated correctly for one or more reads, and therefore unread() could
     44   // overflow it even if it is only unreading bytes that were actually read.
     45   uint64_t oldValue = readLimit();
     46   uint64_t newValue = oldValue + unbound(amount / WORDS);
     47   if (newValue > oldValue) {
     48     setLimit(newValue);
     49   }
     50 }
     51 
     52 void SegmentReader::abortCheckObjectFault() {
     53   KJ_LOG(FATAL, "checkObject()'s parameter is not in-range; this would segfault in opt mode",
     54                 "this is a serious bug in Cap'n Proto; please notify security@sandstorm.io");
     55   abort();
     56 }
     57 
     58 void SegmentBuilder::throwNotWritable() {
     59   KJ_FAIL_REQUIRE(
     60       "Tried to form a Builder to an external data segment referenced by the MessageBuilder.  "
     61       "When you use Orphanage::reference*(), you are not allowed to obtain Builders to the "
     62       "referenced data, only Readers, because that data is const.");
     63 }
     64 
     65 // =======================================================================================
     66 
     67 static SegmentWordCount verifySegmentSize(size_t size) {
     68   auto gsize = bounded(size) * WORDS;
     69   return assertMaxBits<SEGMENT_WORD_COUNT_BITS>(gsize, [&]() {
     70     KJ_FAIL_REQUIRE("segment is too large", size);
     71   });
     72 }
     73 
     74 static SegmentWordCount verifySegment(kj::ArrayPtr<const word> segment) {
     75 #if !CAPNP_ALLOW_UNALIGNED
     76   KJ_REQUIRE(reinterpret_cast<uintptr_t>(segment.begin()) % sizeof(void*) == 0,
     77       "Detected unaligned data in Cap'n Proto message. Messages must be aligned to the "
     78       "architecture's word size. Yes, even on x86: Unaligned access is undefined behavior "
     79       "under the C/C++ language standard, and compilers can and do assume alignment for the "
     80       "purpose of optimizations. Unaligned access may lead to crashes or subtle corruption. "
     81       "For example, GCC will use SIMD instructions in optimizations, and those instrsuctions "
     82       "require alignment. If you really insist on taking your changes with unaligned data, "
     83       "compile the Cap'n Proto library with -DCAPNP_ALLOW_UNALIGNED to remove this check.") {
     84     break;
     85   }
     86 #endif
     87   return verifySegmentSize(segment.size());
     88 }
     89 
     90 inline ReaderArena::ReaderArena(MessageReader* message, const word* firstSegment,
     91                                 SegmentWordCount firstSegmentSize)
     92     : message(message),
     93       readLimiter(bounded(message->getOptions().traversalLimitInWords) * WORDS),
     94       segment0(this, SegmentId(0), firstSegment, firstSegmentSize, &readLimiter) {}
     95 
     96 inline ReaderArena::ReaderArena(MessageReader* message, kj::ArrayPtr<const word> firstSegment)
     97     : ReaderArena(message, firstSegment.begin(), verifySegment(firstSegment)) {}
     98 
     99 ReaderArena::ReaderArena(MessageReader* message)
    100     : ReaderArena(message, message->getSegment(0)) {}
    101 
    102 ReaderArena::~ReaderArena() noexcept(false) {}
    103 
    104 size_t ReaderArena::sizeInWords() {
    105   size_t total = segment0.getArray().size();
    106 
    107   for (uint i = 0; ; i++) {
    108     SegmentReader* segment = tryGetSegment(SegmentId(i));
    109     if (segment == nullptr) return total;
    110     total += unboundAs<size_t>(segment->getSize() / WORDS);
    111   }
    112 }
    113 
    114 SegmentReader* ReaderArena::tryGetSegment(SegmentId id) {
    115   if (id == SegmentId(0)) {
    116     if (segment0.getArray() == nullptr) {
    117       return nullptr;
    118     } else {
    119       return &segment0;
    120     }
    121   }
    122 
    123   auto lock = moreSegments.lockExclusive();
    124 
    125   SegmentMap* segments = nullptr;
    126   KJ_IF_MAYBE(s, *lock) {
    127     KJ_IF_MAYBE(segment, s->find(id.value)) {
    128       return *segment;
    129     }
    130     segments = s;
    131   }
    132 
    133   kj::ArrayPtr<const word> newSegment = message->getSegment(id.value);
    134   if (newSegment == nullptr) {
    135     return nullptr;
    136   }
    137 
    138   SegmentWordCount newSegmentSize = verifySegment(newSegment);
    139 
    140   if (*lock == nullptr) {
    141     // OK, the segment exists, so allocate the map.
    142     segments = &lock->emplace();
    143   }
    144 
    145   auto segment = kj::heap<SegmentReader>(
    146       this, id, newSegment.begin(), newSegmentSize, &readLimiter);
    147   SegmentReader* result = segment;
    148   segments->insert(id.value, kj::mv(segment));
    149   return result;
    150 }
    151 
    152 void ReaderArena::reportReadLimitReached() {
    153   KJ_FAIL_REQUIRE("Exceeded message traversal limit.  See capnp::ReaderOptions.") {
    154     return;
    155   }
    156 }
    157 
    158 // =======================================================================================
    159 
    160 BuilderArena::BuilderArena(MessageBuilder* message)
    161     : message(message), segment0(nullptr, SegmentId(0), nullptr, nullptr) {}
    162 
    163 BuilderArena::BuilderArena(MessageBuilder* message,
    164                            kj::ArrayPtr<MessageBuilder::SegmentInit> segments)
    165     : message(message),
    166       segment0(this, SegmentId(0), segments[0].space.begin(),
    167                verifySegment(segments[0].space),
    168                &this->dummyLimiter, verifySegmentSize(segments[0].wordsUsed)) {
    169   if (segments.size() > 1) {
    170     kj::Vector<kj::Own<SegmentBuilder>> builders(segments.size() - 1);
    171 
    172     uint i = 1;
    173     for (auto& segment: segments.slice(1, segments.size())) {
    174       builders.add(kj::heap<SegmentBuilder>(
    175           this, SegmentId(i++), segment.space.begin(), verifySegment(segment.space),
    176           &this->dummyLimiter, verifySegmentSize(segment.wordsUsed)));
    177     }
    178 
    179     kj::Vector<kj::ArrayPtr<const word>> forOutput;
    180     forOutput.resize(segments.size());
    181 
    182     segmentWithSpace = builders.back();
    183 
    184     this->moreSegments = kj::heap<MultiSegmentState>(
    185         MultiSegmentState { kj::mv(builders), kj::mv(forOutput) });
    186 
    187   } else {
    188     segmentWithSpace = &segment0;
    189   }
    190 }
    191 
    192 BuilderArena::~BuilderArena() noexcept(false) {}
    193 
    194 size_t BuilderArena::sizeInWords() {
    195   KJ_IF_MAYBE(segmentState, moreSegments) {
    196     size_t total = segment0.currentlyAllocated().size();
    197     for (auto& builder: segmentState->get()->builders) {
    198       total += builder->currentlyAllocated().size();
    199     }
    200     return total;
    201   } else {
    202     if (segment0.getArena() == nullptr) {
    203       // We haven't actually allocated any segments yet.
    204       return 0;
    205     } else {
    206       // We have only one segment so far.
    207       return segment0.currentlyAllocated().size();
    208     }
    209   }
    210 }
    211 
    212 SegmentBuilder* BuilderArena::getSegment(SegmentId id) {
    213   // This method is allowed to fail if the segment ID is not valid.
    214   if (id == SegmentId(0)) {
    215     return &segment0;
    216   } else {
    217     KJ_IF_MAYBE(s, moreSegments) {
    218       KJ_REQUIRE(id.value - 1 < s->get()->builders.size(), "invalid segment id", id.value);
    219       return const_cast<SegmentBuilder*>(s->get()->builders[id.value - 1].get());
    220     } else {
    221       KJ_FAIL_REQUIRE("invalid segment id", id.value);
    222     }
    223   }
    224 }
    225 
    226 BuilderArena::AllocateResult BuilderArena::allocate(SegmentWordCount amount) {
    227   if (segment0.getArena() == nullptr) {
    228     // We're allocating the first segment.
    229     kj::ArrayPtr<word> ptr = message->allocateSegment(unbound(amount / WORDS));
    230     auto actualSize = verifySegment(ptr);
    231 
    232     // Re-allocate segment0 in-place.  This is a bit of a hack, but we have not returned any
    233     // pointers to this segment yet, so it should be fine.
    234     kj::dtor(segment0);
    235     kj::ctor(segment0, this, SegmentId(0), ptr.begin(), actualSize, &this->dummyLimiter);
    236 
    237     segmentWithSpace = &segment0;
    238     return AllocateResult { &segment0, segment0.allocate(amount) };
    239   } else {
    240     if (segmentWithSpace != nullptr) {
    241       // Check if there is space in an existing segment.
    242       // TODO(perf):  Check for available space in more than just the last segment.  We don't
    243       //   want this to be O(n), though, so we'll need to maintain some sort of table.  Complicating
    244       //   matters, we want SegmentBuilders::allocate() to be fast, so we can't update any such
    245       //   table when allocation actually happens.  Instead, we could have a priority queue based
    246       //   on the last-known available size, and then re-check the size when we pop segments off it
    247       //   and shove them to the back of the queue if they have become too small.
    248       word* attempt = segmentWithSpace->allocate(amount);
    249       if (attempt != nullptr) {
    250         return AllocateResult { segmentWithSpace, attempt };
    251       }
    252     }
    253 
    254     // Need to allocate a new segment.
    255     SegmentBuilder* result = addSegmentInternal(message->allocateSegment(unbound(amount / WORDS)));
    256 
    257     // Check this new segment first the next time we need to allocate.
    258     segmentWithSpace = result;
    259 
    260     // Allocating from the new segment is guaranteed to succeed since we made it big enough.
    261     return AllocateResult { result, result->allocate(amount) };
    262   }
    263 }
    264 
    265 SegmentBuilder* BuilderArena::addExternalSegment(kj::ArrayPtr<const word> content) {
    266   return addSegmentInternal(content);
    267 }
    268 
    269 template <typename T>
    270 SegmentBuilder* BuilderArena::addSegmentInternal(kj::ArrayPtr<T> content) {
    271   // This check should never fail in practice, since you can't get an Orphanage without allocating
    272   // the root segment.
    273   KJ_REQUIRE(segment0.getArena() != nullptr,
    274       "Can't allocate external segments before allocating the root segment.");
    275 
    276   auto contentSize = verifySegmentSize(content.size());
    277 
    278   MultiSegmentState* segmentState;
    279   KJ_IF_MAYBE(s, moreSegments) {
    280     segmentState = *s;
    281   } else {
    282     auto newSegmentState = kj::heap<MultiSegmentState>();
    283     segmentState = newSegmentState;
    284     moreSegments = kj::mv(newSegmentState);
    285   }
    286 
    287   kj::Own<SegmentBuilder> newBuilder = kj::heap<SegmentBuilder>(
    288       this, SegmentId(segmentState->builders.size() + 1),
    289       content.begin(), contentSize, &this->dummyLimiter);
    290   SegmentBuilder* result = newBuilder.get();
    291   segmentState->builders.add(kj::mv(newBuilder));
    292 
    293   // Keep forOutput the right size so that we don't have to re-allocate during
    294   // getSegmentsForOutput(), which callers might reasonably expect is a thread-safe method.
    295   segmentState->forOutput.resize(segmentState->builders.size() + 1);
    296 
    297   return result;
    298 }
    299 
    300 kj::ArrayPtr<const kj::ArrayPtr<const word>> BuilderArena::getSegmentsForOutput() {
    301   // Although this is a read-only method, we shouldn't need to lock a mutex here because if this
    302   // is called multiple times simultaneously, we should only be overwriting the array with the
    303   // exact same data.  If the number or size of segments is actually changing due to an activity
    304   // in another thread, then the caller has a problem regardless of locking here.
    305 
    306   KJ_IF_MAYBE(segmentState, moreSegments) {
    307     KJ_DASSERT(segmentState->get()->forOutput.size() == segmentState->get()->builders.size() + 1,
    308         "segmentState->forOutput wasn't resized correctly when the last builder was added.",
    309         segmentState->get()->forOutput.size(), segmentState->get()->builders.size());
    310 
    311     kj::ArrayPtr<kj::ArrayPtr<const word>> result(
    312         &segmentState->get()->forOutput[0], segmentState->get()->forOutput.size());
    313     uint i = 0;
    314     result[i++] = segment0.currentlyAllocated();
    315     for (auto& builder: segmentState->get()->builders) {
    316       result[i++] = builder->currentlyAllocated();
    317     }
    318     return result;
    319   } else {
    320     if (segment0.getArena() == nullptr) {
    321       // We haven't actually allocated any segments yet.
    322       return nullptr;
    323     } else {
    324       // We have only one segment so far.
    325       segment0ForOutput = segment0.currentlyAllocated();
    326       return kj::arrayPtr(&segment0ForOutput, 1);
    327     }
    328   }
    329 }
    330 
    331 SegmentReader* BuilderArena::tryGetSegment(SegmentId id) {
    332   if (id == SegmentId(0)) {
    333     if (segment0.getArena() == nullptr) {
    334       // We haven't allocated any segments yet.
    335       return nullptr;
    336     } else {
    337       return &segment0;
    338     }
    339   } else {
    340     KJ_IF_MAYBE(segmentState, moreSegments) {
    341       if (id.value <= segmentState->get()->builders.size()) {
    342         // TODO(cleanup):  Return a const SegmentReader and tediously constify all SegmentBuilder
    343         //   pointers throughout the codebase.
    344         return const_cast<SegmentReader*>(kj::implicitCast<const SegmentReader*>(
    345             segmentState->get()->builders[id.value - 1].get()));
    346       }
    347     }
    348     return nullptr;
    349   }
    350 }
    351 
    352 void BuilderArena::reportReadLimitReached() {
    353   KJ_FAIL_ASSERT("Read limit reached for BuilderArena, but it should have been unlimited.") {
    354     return;
    355   }
    356 }
    357 
    358 kj::Maybe<kj::Own<ClientHook>> BuilderArena::LocalCapTable::extractCap(uint index) {
    359 #if CAPNP_LITE
    360   KJ_UNIMPLEMENTED("no cap tables in lite mode");
    361 #else
    362   if (index < capTable.size()) {
    363     return capTable[index].map([](kj::Own<ClientHook>& cap) { return cap->addRef(); });
    364   } else {
    365     return nullptr;
    366   }
    367 #endif
    368 }
    369 
    370 uint BuilderArena::LocalCapTable::injectCap(kj::Own<ClientHook>&& cap) {
    371 #if CAPNP_LITE
    372   KJ_UNIMPLEMENTED("no cap tables in lite mode");
    373 #else
    374   uint result = capTable.size();
    375   capTable.add(kj::mv(cap));
    376   return result;
    377 #endif
    378 }
    379 
    380 void BuilderArena::LocalCapTable::dropCap(uint index) {
    381 #if CAPNP_LITE
    382   KJ_UNIMPLEMENTED("no cap tables in lite mode");
    383 #else
    384   KJ_ASSERT(index < capTable.size(), "Invalid capability descriptor in message.") {
    385     return;
    386   }
    387   capTable[index] = nullptr;
    388 #endif
    389 }
    390 
    391 }  // namespace _ (private)
    392 }  // namespace capnp