capnproto

FORK: Cap'n Proto serialization/RPC system - core tools and C++ library
git clone https://git.neptards.moe/neptards/capnproto.git
Log | Files | Refs | README | LICENSE

rpc.h (29585B)


      1 // Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors
      2 // Licensed under the MIT License:
      3 //
      4 // Permission is hereby granted, free of charge, to any person obtaining a copy
      5 // of this software and associated documentation files (the "Software"), to deal
      6 // in the Software without restriction, including without limitation the rights
      7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
      8 // copies of the Software, and to permit persons to whom the Software is
      9 // furnished to do so, subject to the following conditions:
     10 //
     11 // The above copyright notice and this permission notice shall be included in
     12 // all copies or substantial portions of the Software.
     13 //
     14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
     17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
     20 // THE SOFTWARE.
     21 
     22 #pragma once
     23 
     24 #include "capability.h"
     25 #include "rpc-prelude.h"
     26 
     27 CAPNP_BEGIN_HEADER
     28 
     29 namespace kj { class AutoCloseFd; }
     30 
     31 namespace capnp {
     32 
     33 template <typename VatId, typename ProvisionId, typename RecipientId,
     34           typename ThirdPartyCapId, typename JoinResult>
     35 class VatNetwork;
     36 template <typename SturdyRefObjectId>
     37 class SturdyRefRestorer;
     38 
     39 template <typename VatId>
     40 class BootstrapFactory: public _::BootstrapFactoryBase {
     41   // Interface that constructs per-client bootstrap interfaces. Use this if you want each client
     42   // who connects to see a different bootstrap interface based on their (authenticated) VatId.
     43   // This allows an application to bootstrap off of the authentication performed at the VatNetwork
     44   // level. (Typically VatId is some sort of public key.)
     45   //
     46   // This is only useful for multi-party networks. For TwoPartyVatNetwork, there's no reason to
     47   // use a BootstrapFactory; just specify a single bootstrap capability in this case.
     48 
     49 public:
     50   virtual Capability::Client createFor(typename VatId::Reader clientId) = 0;
     51   // Create a bootstrap capability appropriate for exposing to the given client. VatNetwork will
     52   // have authenticated the client VatId before this is called.
     53 
     54 private:
     55   Capability::Client baseCreateFor(AnyStruct::Reader clientId) override;
     56 };
     57 
     58 template <typename VatId>
     59 class RpcSystem: public _::RpcSystemBase {
     60   // Represents the RPC system, which is the portal to objects available on the network.
     61   //
     62   // The RPC implementation sits on top of an implementation of `VatNetwork`.  The `VatNetwork`
     63   // determines how to form connections between vats -- specifically, two-way, private, reliable,
     64   // sequenced datagram connections.  The RPC implementation determines how to use such connections
     65   // to manage object references and make method calls.
     66   //
     67   // See `makeRpcServer()` and `makeRpcClient()` below for convenient syntax for setting up an
     68   // `RpcSystem` given a `VatNetwork`.
     69   //
     70   // See `ez-rpc.h` for an even simpler interface for setting up RPC in a typical two-party
     71   // client/server scenario.
     72 
     73 public:
     74   template <typename ProvisionId, typename RecipientId,
     75             typename ThirdPartyCapId, typename JoinResult>
     76   RpcSystem(
     77       VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network,
     78       kj::Maybe<Capability::Client> bootstrapInterface);
     79 
     80   template <typename ProvisionId, typename RecipientId,
     81             typename ThirdPartyCapId, typename JoinResult>
     82   RpcSystem(
     83       VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network,
     84       BootstrapFactory<VatId>& bootstrapFactory);
     85 
     86   template <typename ProvisionId, typename RecipientId,
     87             typename ThirdPartyCapId, typename JoinResult,
     88             typename LocalSturdyRefObjectId>
     89   RpcSystem(
     90       VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network,
     91       SturdyRefRestorer<LocalSturdyRefObjectId>& restorer);
     92 
     93   RpcSystem(RpcSystem&& other) = default;
     94 
     95   Capability::Client bootstrap(typename VatId::Reader vatId);
     96   // Connect to the given vat and return its bootstrap interface.
     97 
     98   Capability::Client restore(typename VatId::Reader hostId, AnyPointer::Reader objectId)
     99       CAPNP_DEPRECATED("Please transition to using a bootstrap interface instead.");
    100   // ** DEPRECATED **
    101   //
    102   // Restores the given SturdyRef from the network and return the capability representing it.
    103   //
    104   // `hostId` identifies the host from which to request the ref, in the format specified by the
    105   // `VatNetwork` in use.  `objectId` is the object ID in whatever format is expected by said host.
    106   //
    107   // This method will be removed in a future version of Cap'n Proto. Instead, please transition
    108   // to using bootstrap(), which is equivalent to calling restore() with a null `objectId`.
    109   // You may emulate the old concept of object IDs by exporting a bootstrap interface which has
    110   // methods that can be used to obtain other capabilities by ID.
    111 
    112   void setFlowLimit(size_t words);
    113   // Sets the incoming call flow limit. If more than `words` worth of call messages have not yet
    114   // received responses, the RpcSystem will not read further messages from the stream. This can be
    115   // used as a crude way to prevent a resource exhaustion attack (or bug) in which a peer makes an
    116   // excessive number of simultaneous calls that consume the receiver's RAM.
    117   //
    118   // There are some caveats. When over the flow limit, all messages are blocked, including returns.
    119   // If the outstanding calls are themselves waiting on calls going in the opposite direction, the
    120   // flow limit may prevent those calls from completing, leading to deadlock. However, a
    121   // sufficiently high limit should make this unlikely.
    122   //
    123   // Note that a call's parameter size counts against the flow limit until the call returns, even
    124   // if the recipient calls releaseParams() to free the parameter memory early. This is because
    125   // releaseParams() may simply indicate that the parameters have been forwarded to another
    126   // machine, but are still in-memory there. For illustration, say that Alice made a call to Bob
    127   // who forwarded the call to Carol. Bob has imposed a flow limit on Alice. Alice's calls are
    128   // being forwarded to Carol, so Bob never keeps the parameters in-memory for more than a brief
    129   // period. However, the flow limit counts all calls that haven't returned, even if Bob has
    130   // already freed the memory they consumed. You might argue that the right solution here is
    131   // instead for Carol to impose her own flow limit on Bob. This has a serious problem, though:
    132   // Bob might be forwarding requests to Carol on behalf of many different parties, not just Alice.
    133   // If Alice can pump enough data to hit the Bob -> Carol flow limit, then those other parties
    134   // will be disrupted. Thus, we can only really impose the limit on the Alice -> Bob link, which
    135   // only affects Alice. We need that one flow limit to limit Alice's impact on the whole system,
    136   // so it has to count all in-flight calls.
    137   //
    138   // In Sandstorm, flow limits are imposed by the supervisor on calls coming out of a grain, in
    139   // order to prevent a grain from inundating the system with in-flight calls. In practice, the
    140   // main time this happens is when a grain is pushing a large file download and doesn't implement
    141   // proper cooperative flow control.
    142 
    143   // void setTraceEncoder(kj::Function<kj::String(const kj::Exception&)> func);
    144   //
    145   // (Inherited from _::RpcSystemBase)
    146   //
    147   // Set a function to call to encode exception stack traces for transmission to remote parties.
    148   // By default, traces are not transmitted at all. If a callback is provided, then the returned
    149   // string will be sent with the exception. If the remote end is KJ/C++ based, then this trace
    150   // text ends up being accessible as kj::Exception::getRemoteTrace().
    151   //
    152   // Stack traces can sometimes contain sensitive information, so you should think carefully about
    153   // what information you are willing to reveal to the remote party.
    154 
    155   kj::Promise<void> run() { return RpcSystemBase::run(); }
    156   // Listens for incoming RPC connections and handles them. Never returns normally, but could throw
    157   // an exception if the system becomes unable to accept new connections (e.g. because the
    158   // underlying listen socket becomes broken somehow).
    159   //
    160   // For historical reasons, the RpcSystem will actually run itself even if you do not call this.
    161   // However, if an exception is thrown, the RpcSystem will log the exception to the console and
    162   // then cease accepting new connections. In this case, your server may be in a broken state, but
    163   // without restarting. All servers should therefore call run() and handle failures in some way.
    164 };
    165 
    166 template <typename VatId, typename ProvisionId, typename RecipientId,
    167           typename ThirdPartyCapId, typename JoinResult>
    168 RpcSystem<VatId> makeRpcServer(
    169     VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network,
    170     Capability::Client bootstrapInterface);
    171 // Make an RPC server.  Typical usage (e.g. in a main() function):
    172 //
    173 //    MyEventLoop eventLoop;
    174 //    kj::WaitScope waitScope(eventLoop);
    175 //    MyNetwork network;
    176 //    MyMainInterface::Client bootstrap = makeMain();
    177 //    auto server = makeRpcServer(network, bootstrap);
    178 //    kj::NEVER_DONE.wait(waitScope);  // run forever
    179 //
    180 // See also ez-rpc.h, which has simpler instructions for the common case of a two-party
    181 // client-server RPC connection.
    182 
    183 template <typename VatId, typename ProvisionId, typename RecipientId,
    184           typename ThirdPartyCapId, typename JoinResult>
    185 RpcSystem<VatId> makeRpcServer(
    186     VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network,
    187     BootstrapFactory<VatId>& bootstrapFactory);
    188 // Make an RPC server that can serve different bootstrap interfaces to different clients via a
    189 // BootstrapInterface.
    190 
    191 template <typename VatId, typename LocalSturdyRefObjectId,
    192           typename ProvisionId, typename RecipientId, typename ThirdPartyCapId, typename JoinResult>
    193 RpcSystem<VatId> makeRpcServer(
    194     VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network,
    195     SturdyRefRestorer<LocalSturdyRefObjectId>& restorer)
    196     CAPNP_DEPRECATED("Please transition to using a bootstrap interface instead.");
    197 // ** DEPRECATED **
    198 //
    199 // Create an RPC server which exports multiple main interfaces by object ID. The `restorer` object
    200 // can be used to look up objects by ID.
    201 //
    202 // Please transition to exporting only one interface, which is known as the "bootstrap" interface.
    203 // For backwards-compatibility with old clients, continue to implement SturdyRefRestorer, but
    204 // return the new bootstrap interface when the request object ID is null. When new clients connect
    205 // and request the bootstrap interface, they will get that interface. Eventually, once all clients
    206 // are updated to request only the bootstrap interface, stop implementing SturdyRefRestorer and
    207 // switch to passing the bootstrap capability itself as the second parameter to `makeRpcServer()`.
    208 
    209 template <typename VatId, typename ProvisionId,
    210           typename RecipientId, typename ThirdPartyCapId, typename JoinResult>
    211 RpcSystem<VatId> makeRpcClient(
    212     VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network);
    213 // Make an RPC client.  Typical usage (e.g. in a main() function):
    214 //
    215 //    MyEventLoop eventLoop;
    216 //    kj::WaitScope waitScope(eventLoop);
    217 //    MyNetwork network;
    218 //    auto client = makeRpcClient(network);
    219 //    MyCapability::Client cap = client.restore(hostId, objId).castAs<MyCapability>();
    220 //    auto response = cap.fooRequest().send().wait(waitScope);
    221 //    handleMyResponse(response);
    222 //
    223 // See also ez-rpc.h, which has simpler instructions for the common case of a two-party
    224 // client-server RPC connection.
    225 
    226 template <typename SturdyRefObjectId>
    227 class SturdyRefRestorer: public _::SturdyRefRestorerBase {
    228   // ** DEPRECATED **
    229   //
    230   // In Cap'n Proto 0.4.x, applications could export multiple main interfaces identified by
    231   // object IDs. The callback used to map object IDs to objects was `SturdyRefRestorer`, as we
    232   // imagined this would eventually be used for restoring SturdyRefs as well. In practice, it was
    233   // never used for real SturdyRefs, only for exporting singleton objects under well-known names.
    234   //
    235   // The new preferred strategy is to export only a _single_ such interface, called the
    236   // "bootstrap interface". That interface can itself have methods for obtaining other objects, of
    237   // course, but that is up to the app. `SturdyRefRestorer` exists for backwards-compatibility.
    238   //
    239   // Hint:  Use SturdyRefRestorer<capnp::Text> to define a server that exports services under
    240   //   string names.
    241 
    242 public:
    243   virtual Capability::Client restore(typename SturdyRefObjectId::Reader ref) CAPNP_DEPRECATED(
    244       "Please transition to using bootstrap interfaces instead of SturdyRefRestorer.") = 0;
    245   // Restore the given object, returning a capability representing it.
    246 
    247 private:
    248   Capability::Client baseRestore(AnyPointer::Reader ref) override final;
    249 };
    250 
    251 // =======================================================================================
    252 // VatNetwork
    253 
    254 class OutgoingRpcMessage {
    255   // A message to be sent by a `VatNetwork`.
    256 
    257 public:
    258   virtual AnyPointer::Builder getBody() = 0;
    259   // Get the message body, which the caller may fill in any way it wants.  (The standard RPC
    260   // implementation initializes it as a Message as defined in rpc.capnp.)
    261 
    262   virtual void setFds(kj::Array<int> fds) {}
    263   // Set the list of file descriptors to send along with this message, if FD passing is supported.
    264   // An implementation may ignore this.
    265 
    266   virtual void send() = 0;
    267   // Send the message, or at least put it in a queue to be sent later.  Note that the builder
    268   // returned by `getBody()` remains valid at least until the `OutgoingRpcMessage` is destroyed.
    269 
    270   virtual size_t sizeInWords() = 0;
    271   // Get the total size of the message, for flow control purposes. Although the caller could
    272   // also call getBody().targetSize(), doing that would walk the message tree, whereas typical
    273   // implementations can compute the size more cheaply by summing segment sizes.
    274 };
    275 
    276 class IncomingRpcMessage {
    277   // A message received from a `VatNetwork`.
    278 
    279 public:
    280   virtual AnyPointer::Reader getBody() = 0;
    281   // Get the message body, to be interpreted by the caller.  (The standard RPC implementation
    282   // interprets it as a Message as defined in rpc.capnp.)
    283 
    284   virtual kj::ArrayPtr<kj::AutoCloseFd> getAttachedFds() { return nullptr; }
    285   // If the transport supports attached file descriptors and some were attached to this message,
    286   // returns them. Otherwise returns an empty array. It is intended that the caller will move the
    287   // FDs out of this table when they are consumed, possibly leaving behind a null slot. Callers
    288   // should be careful to check if an FD was already consumed by comparing the slot with `nullptr`.
    289   // (We don't use Maybe here because moving from a Maybe doesn't make it null, so it would only
    290   // add confusion. Moving from an AutoCloseFd does in fact make it null.)
    291 
    292   virtual size_t sizeInWords() = 0;
    293   // Get the total size of the message, for flow control purposes. Although the caller could
    294   // also call getBody().targetSize(), doing that would walk the message tree, whereas typical
    295   // implementations can compute the size more cheaply by summing segment sizes.
    296 };
    297 
    298 class RpcFlowController {
    299   // Tracks a particular RPC stream in order to implement a flow control algorithm.
    300 
    301 public:
    302   virtual kj::Promise<void> send(kj::Own<OutgoingRpcMessage> message, kj::Promise<void> ack) = 0;
    303   // Like calling message->send(), but the promise resolves when it's a good time to send the
    304   // next message.
    305   //
    306   // `ack` is a promise that resolves when the message has been acknowledged from the other side.
    307   // In practice, `message` is typically a `Call` message and `ack` is a `Return`. Note that this
    308   // means `ack` counts not only time to transmit the message but also time for the remote
    309   // application to process the message. The flow controller is expected to apply backpressure if
    310   // the remote application responds slowly. If `ack` rejects, then all outstanding and future
    311   // sends will propagate the exception.
    312   //
    313   // Note that messages sent with this method must still be delivered in the same order as if they
    314   // had been sent with `message->send()`; they cannot be delayed until later. This is important
    315   // because the message may introduce state changes in the RPC system that later messages rely on,
    316   // such as introducing a new Question ID that a later message may reference. Thus, the controller
    317   // can only create backpressure by having the returned promise resolve slowly.
    318   //
    319   // Dropping the returned promise does not cancel the send. Once send() is called, there's no way
    320   // to stop it.
    321 
    322   virtual kj::Promise<void> waitAllAcked() = 0;
    323   // Wait for all `ack`s previously passed to send() to finish. It is an error to call send() again
    324   // after this.
    325 
    326   // ---------------------------------------------------------------------------
    327   // Common implementations.
    328 
    329   static kj::Own<RpcFlowController> newFixedWindowController(size_t windowSize);
    330   // Constructs a flow controller that implements a strict fixed window of the given size. In other
    331   // words, the controller will throttle the stream when the total bytes in-flight exceeds the
    332   // window.
    333 
    334   class WindowGetter {
    335   public:
    336     virtual size_t getWindow() = 0;
    337   };
    338 
    339   static kj::Own<RpcFlowController> newVariableWindowController(WindowGetter& getter);
    340   // Like newFixedWindowController(), but the window size is allowed to vary over time. Useful if
    341   // you have a technique for estimating one good window size for the connection as a whole but not
    342   // for individual streams. Keep in mind, though, that in situations where the other end of the
    343   // connection is merely proxying capabilities from a variety of final destinations across a
    344   // variety of networks, no single window will be appropriate for all streams.
    345 
    346   static constexpr size_t DEFAULT_WINDOW_SIZE = 65536;
    347   // The window size used by the default implementation of Connection::newStream().
    348 };
    349 
    350 template <typename VatId, typename ProvisionId, typename RecipientId,
    351           typename ThirdPartyCapId, typename JoinResult>
    352 class VatNetwork: public _::VatNetworkBase {
    353   // Cap'n Proto RPC operates between vats, where a "vat" is some sort of host of objects.
    354   // Typically one Cap'n Proto process (in the Unix sense) is one vat.  The RPC system is what
    355   // allows calls between objects hosted in different vats.
    356   //
    357   // The RPC implementation sits on top of an implementation of `VatNetwork`.  The `VatNetwork`
    358   // determines how to form connections between vats -- specifically, two-way, private, reliable,
    359   // sequenced datagram connections.  The RPC implementation determines how to use such connections
    360   // to manage object references and make method calls.
    361   //
    362   // The most common implementation of VatNetwork is TwoPartyVatNetwork (rpc-twoparty.h).  Most
    363   // simple client-server apps will want to use it.  (You may even want to use the EZ RPC
    364   // interfaces in `ez-rpc.h` and avoid all of this.)
    365   //
    366   // TODO(someday):  Provide a standard implementation for the public internet.
    367 
    368 public:
    369   class Connection;
    370 
    371   struct ConnectionAndProvisionId {
    372     // Result of connecting to a vat introduced by another vat.
    373 
    374     kj::Own<Connection> connection;
    375     // Connection to the new vat.
    376 
    377     kj::Own<OutgoingRpcMessage> firstMessage;
    378     // An already-allocated `OutgoingRpcMessage` associated with `connection`.  The RPC system will
    379     // construct this as an `Accept` message and send it.
    380 
    381     Orphan<ProvisionId> provisionId;
    382     // A `ProvisionId` already allocated inside `firstMessage`, which the RPC system will use to
    383     // build the `Accept` message.
    384   };
    385 
    386   class Connection: public _::VatNetworkBase::Connection {
    387     // A two-way RPC connection.
    388     //
    389     // This object may represent a connection that doesn't exist yet, but is expected to exist
    390     // in the future.  In this case, sent messages will automatically be queued and sent once the
    391     // connection is ready, so that the caller doesn't need to know the difference.
    392 
    393   public:
    394     virtual kj::Own<RpcFlowController> newStream() override
    395         { return RpcFlowController::newFixedWindowController(65536); }
    396     // Construct a flow controller for a new stream on this connection. The controller can be
    397     // passed into OutgoingRpcMessage::sendStreaming().
    398     //
    399     // The default implementation returns a dummy stream controller that just applies a fixed
    400     // window of 64k to everything. This always works but may constrain throughput on networks
    401     // where the bandwidth-delay product is high, while conversely providing too much buffer when
    402     // the bandwidth-delay product is low.
    403     //
    404     // WARNING: The RPC system may keep the `RpcFlowController` object alive past the lifetime of
    405     //   the `Connection` itself. However, it will not call `send()` any more after the
    406     //   `Connection` is destroyed.
    407     //
    408     // TODO(perf): We should introduce a flow controller implementation that uses a clock to
    409     //   measure RTT and bandwidth and dynamically update the window size, like BBR.
    410 
    411     // Level 0 features ----------------------------------------------
    412 
    413     virtual typename VatId::Reader getPeerVatId() = 0;
    414     // Returns the connected vat's authenticated VatId. It is the VatNetwork's responsibility to
    415     // authenticate this, so that the caller can be assured that they are really talking to the
    416     // identified vat and not an imposter.
    417 
    418     virtual kj::Own<OutgoingRpcMessage> newOutgoingMessage(uint firstSegmentWordSize) override = 0;
    419     // Allocate a new message to be sent on this connection.
    420     //
    421     // If `firstSegmentWordSize` is non-zero, it should be treated as a hint suggesting how large
    422     // to make the first segment.  This is entirely a hint and the connection may adjust it up or
    423     // down.  If it is zero, the connection should choose the size itself.
    424     //
    425     // WARNING: The RPC system may keep the `OutgoingRpcMessage` object alive past the lifetime of
    426     //   the `Connection` itself. However, it will not call `send()` any more after the
    427     //   `Connection` is destroyed.
    428 
    429     virtual kj::Promise<kj::Maybe<kj::Own<IncomingRpcMessage>>> receiveIncomingMessage() override = 0;
    430     // Wait for a message to be received and return it.  If the read stream cleanly terminates,
    431     // return null.  If any other problem occurs, throw an exception.
    432     //
    433     // WARNING: The RPC system may keep the `IncomingRpcMessage` object alive past the lifetime of
    434     //   the `Connection` itself.
    435 
    436     virtual kj::Promise<void> shutdown() override KJ_WARN_UNUSED_RESULT = 0;
    437     // Waits until all outgoing messages have been sent, then shuts down the outgoing stream. The
    438     // returned promise resolves after shutdown is complete.
    439 
    440   private:
    441     AnyStruct::Reader baseGetPeerVatId() override;
    442   };
    443 
    444   // Level 0 features ------------------------------------------------
    445 
    446   virtual kj::Maybe<kj::Own<Connection>> connect(typename VatId::Reader hostId) = 0;
    447   // Connect to a VatId.  Note that this method immediately returns a `Connection`, even
    448   // if the network connection has not yet been established.  Messages can be queued to this
    449   // connection and will be delivered once it is open.  The caller must attempt to read from the
    450   // connection to verify that it actually succeeded; the read will fail if the connection
    451   // couldn't be opened.  Some network implementations may actually start sending messages before
    452   // hearing back from the server at all, to avoid a round trip.
    453   //
    454   // Returns nullptr if `hostId` refers to the local host.
    455 
    456   virtual kj::Promise<kj::Own<Connection>> accept() = 0;
    457   // Wait for the next incoming connection and return it.
    458 
    459   // Level 4 features ------------------------------------------------
    460   // TODO(someday)
    461 
    462 private:
    463   kj::Maybe<kj::Own<_::VatNetworkBase::Connection>>
    464       baseConnect(AnyStruct::Reader hostId) override final;
    465   kj::Promise<kj::Own<_::VatNetworkBase::Connection>> baseAccept() override final;
    466 };
    467 
    468 // =======================================================================================
    469 // ***************************************************************************************
    470 // Inline implementation details start here
    471 // ***************************************************************************************
    472 // =======================================================================================
    473 
    474 template <typename VatId>
    475 Capability::Client BootstrapFactory<VatId>::baseCreateFor(AnyStruct::Reader clientId) {
    476   return createFor(clientId.as<VatId>());
    477 }
    478 
    479 template <typename SturdyRef, typename ProvisionId, typename RecipientId,
    480           typename ThirdPartyCapId, typename JoinResult>
    481 kj::Maybe<kj::Own<_::VatNetworkBase::Connection>>
    482     VatNetwork<SturdyRef, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>::
    483     baseConnect(AnyStruct::Reader ref) {
    484   auto maybe = connect(ref.as<SturdyRef>());
    485   return maybe.map([](kj::Own<Connection>& conn) -> kj::Own<_::VatNetworkBase::Connection> {
    486     return kj::mv(conn);
    487   });
    488 }
    489 
    490 template <typename SturdyRef, typename ProvisionId, typename RecipientId,
    491           typename ThirdPartyCapId, typename JoinResult>
    492 kj::Promise<kj::Own<_::VatNetworkBase::Connection>>
    493     VatNetwork<SturdyRef, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>::baseAccept() {
    494   return accept().then(
    495       [](kj::Own<Connection>&& connection) -> kj::Own<_::VatNetworkBase::Connection> {
    496     return kj::mv(connection);
    497   });
    498 }
    499 
    500 template <typename SturdyRef, typename ProvisionId, typename RecipientId,
    501           typename ThirdPartyCapId, typename JoinResult>
    502 AnyStruct::Reader VatNetwork<
    503     SturdyRef, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>::
    504     Connection::baseGetPeerVatId() {
    505   return getPeerVatId();
    506 }
    507 
    508 template <typename SturdyRef>
    509 Capability::Client SturdyRefRestorer<SturdyRef>::baseRestore(AnyPointer::Reader ref) {
    510 #pragma GCC diagnostic push
    511 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
    512   return restore(ref.getAs<SturdyRef>());
    513 #pragma GCC diagnostic pop
    514 }
    515 
    516 template <typename VatId>
    517 template <typename ProvisionId, typename RecipientId,
    518           typename ThirdPartyCapId, typename JoinResult>
    519 RpcSystem<VatId>::RpcSystem(
    520       VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network,
    521       kj::Maybe<Capability::Client> bootstrap)
    522     : _::RpcSystemBase(network, kj::mv(bootstrap)) {}
    523 
    524 template <typename VatId>
    525 template <typename ProvisionId, typename RecipientId,
    526           typename ThirdPartyCapId, typename JoinResult>
    527 RpcSystem<VatId>::RpcSystem(
    528       VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network,
    529       BootstrapFactory<VatId>& bootstrapFactory)
    530     : _::RpcSystemBase(network, bootstrapFactory) {}
    531 
    532 template <typename VatId>
    533 template <typename ProvisionId, typename RecipientId,
    534           typename ThirdPartyCapId, typename JoinResult,
    535           typename LocalSturdyRefObjectId>
    536 RpcSystem<VatId>::RpcSystem(
    537       VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network,
    538       SturdyRefRestorer<LocalSturdyRefObjectId>& restorer)
    539     : _::RpcSystemBase(network, restorer) {}
    540 
    541 template <typename VatId>
    542 Capability::Client RpcSystem<VatId>::bootstrap(typename VatId::Reader vatId) {
    543   return baseBootstrap(_::PointerHelpers<VatId>::getInternalReader(vatId));
    544 }
    545 
    546 template <typename VatId>
    547 Capability::Client RpcSystem<VatId>::restore(
    548     typename VatId::Reader hostId, AnyPointer::Reader objectId) {
    549   return baseRestore(_::PointerHelpers<VatId>::getInternalReader(hostId), objectId);
    550 }
    551 
    552 template <typename VatId>
    553 inline void RpcSystem<VatId>::setFlowLimit(size_t words) {
    554   baseSetFlowLimit(words);
    555 }
    556 
    557 template <typename VatId, typename ProvisionId, typename RecipientId,
    558           typename ThirdPartyCapId, typename JoinResult>
    559 RpcSystem<VatId> makeRpcServer(
    560     VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network,
    561     Capability::Client bootstrapInterface) {
    562   return RpcSystem<VatId>(network, kj::mv(bootstrapInterface));
    563 }
    564 
    565 template <typename VatId, typename ProvisionId, typename RecipientId,
    566           typename ThirdPartyCapId, typename JoinResult>
    567 RpcSystem<VatId> makeRpcServer(
    568     VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network,
    569     BootstrapFactory<VatId>& bootstrapFactory) {
    570   return RpcSystem<VatId>(network, bootstrapFactory);
    571 }
    572 
    573 template <typename VatId, typename LocalSturdyRefObjectId,
    574           typename ProvisionId, typename RecipientId, typename ThirdPartyCapId, typename JoinResult>
    575 RpcSystem<VatId> makeRpcServer(
    576     VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network,
    577     SturdyRefRestorer<LocalSturdyRefObjectId>& restorer) {
    578   return RpcSystem<VatId>(network, restorer);
    579 }
    580 
    581 template <typename VatId, typename ProvisionId,
    582           typename RecipientId, typename ThirdPartyCapId, typename JoinResult>
    583 RpcSystem<VatId> makeRpcClient(
    584     VatNetwork<VatId, ProvisionId, RecipientId, ThirdPartyCapId, JoinResult>& network) {
    585   return RpcSystem<VatId>(network, nullptr);
    586 }
    587 
    588 }  // namespace capnp
    589 
    590 CAPNP_END_HEADER