schema-loader.c++ (77916B)
1 // Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors 2 // Licensed under the MIT License: 3 // 4 // Permission is hereby granted, free of charge, to any person obtaining a copy 5 // of this software and associated documentation files (the "Software"), to deal 6 // in the Software without restriction, including without limitation the rights 7 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 8 // copies of the Software, and to permit persons to whom the Software is 9 // furnished to do so, subject to the following conditions: 10 // 11 // The above copyright notice and this permission notice shall be included in 12 // all copies or substantial portions of the Software. 13 // 14 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 20 // THE SOFTWARE. 21 22 #define CAPNP_PRIVATE 23 #include "schema-loader.h" 24 #include "message.h" 25 #include "arena.h" 26 #include <kj/debug.h> 27 #include <kj/exception.h> 28 #include <kj/arena.h> 29 #include <kj/vector.h> 30 #include <algorithm> 31 #include <kj/map.h> 32 #include <capnp/stream.capnp.h> 33 34 #if _MSC_VER && !defined(__clang__) 35 #include <atomic> 36 #endif 37 38 namespace capnp { 39 40 namespace { 41 42 struct SchemaBindingsPair { 43 const _::RawSchema* schema; 44 const _::RawBrandedSchema::Scope* scopeBindings; 45 46 inline bool operator==(const SchemaBindingsPair& other) const { 47 return schema == other.schema && scopeBindings == other.scopeBindings; 48 } 49 inline uint hashCode() const { 50 return kj::hashCode(schema, scopeBindings); 51 } 52 }; 53 54 } // namespace 55 56 bool hasDiscriminantValue(const schema::Field::Reader& reader) { 57 return reader.getDiscriminantValue() != schema::Field::NO_DISCRIMINANT; 58 } 59 60 class SchemaLoader::InitializerImpl: public _::RawSchema::Initializer { 61 public: 62 inline explicit InitializerImpl(const SchemaLoader& loader): loader(loader), callback(nullptr) {} 63 inline InitializerImpl(const SchemaLoader& loader, const LazyLoadCallback& callback) 64 : loader(loader), callback(callback) {} 65 66 inline kj::Maybe<const LazyLoadCallback&> getCallback() const { return callback; } 67 68 void init(const _::RawSchema* schema) const override; 69 70 inline bool operator==(decltype(nullptr)) const { return callback == nullptr; } 71 72 private: 73 const SchemaLoader& loader; 74 kj::Maybe<const LazyLoadCallback&> callback; 75 }; 76 77 class SchemaLoader::BrandedInitializerImpl: public _::RawBrandedSchema::Initializer { 78 public: 79 inline explicit BrandedInitializerImpl(const SchemaLoader& loader): loader(loader) {} 80 81 void init(const _::RawBrandedSchema* schema) const override; 82 83 private: 84 const SchemaLoader& loader; 85 }; 86 87 class SchemaLoader::Impl { 88 public: 89 inline explicit Impl(const SchemaLoader& loader) 90 : initializer(loader), brandedInitializer(loader) {} 91 inline Impl(const SchemaLoader& loader, const LazyLoadCallback& callback) 92 : initializer(loader, callback), brandedInitializer(loader) {} 93 94 _::RawSchema* load(const schema::Node::Reader& reader, bool isPlaceholder); 95 96 _::RawSchema* loadNative(const _::RawSchema* nativeSchema); 97 98 _::RawSchema* loadEmpty(uint64_t id, kj::StringPtr name, schema::Node::Which kind, 99 bool isPlaceholder); 100 // Create a dummy empty schema of the given kind for the given id and load it. 101 102 const _::RawBrandedSchema* makeBranded( 103 const _::RawSchema* schema, schema::Brand::Reader proto, 104 kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> clientBrand); 105 106 struct TryGetResult { 107 _::RawSchema* schema; 108 kj::Maybe<const LazyLoadCallback&> callback; 109 }; 110 111 TryGetResult tryGet(uint64_t typeId) const; 112 113 const _::RawBrandedSchema* getUnbound(const _::RawSchema* schema); 114 115 kj::Array<Schema> getAllLoaded() const; 116 117 void requireStructSize(uint64_t id, uint dataWordCount, uint pointerCount); 118 // Require any struct nodes loaded with this ID -- in the past and in the future -- to have at 119 // least the given sizes. Struct nodes that don't comply will simply be rewritten to comply. 120 // This is used to ensure that parents of group nodes have at least the size of the group node, 121 // so that allocating a struct that contains a group then getting the group node and setting 122 // its fields can't possibly write outside of the allocated space. 123 124 kj::Arena arena; 125 126 private: 127 kj::HashSet<kj::ArrayPtr<const byte>> dedupTable; 128 // Records raw segments of memory in the arena against which we my want to de-dupe later 129 // additions. Specifically, RawBrandedSchema binding tables are de-duped. 130 131 kj::HashMap<uint64_t, _::RawSchema*> schemas; 132 kj::HashMap<SchemaBindingsPair, _::RawBrandedSchema*> brands; 133 kj::HashMap<const _::RawSchema*, _::RawBrandedSchema*> unboundBrands; 134 135 struct RequiredSize { 136 uint16_t dataWordCount; 137 uint16_t pointerCount; 138 }; 139 kj::HashMap<uint64_t, RequiredSize> structSizeRequirements; 140 141 InitializerImpl initializer; 142 BrandedInitializerImpl brandedInitializer; 143 144 kj::ArrayPtr<word> makeUncheckedNode(schema::Node::Reader node); 145 // Construct a copy of the given schema node, allocated as a single-segment ("unchecked") node 146 // within the loader's arena. 147 148 kj::ArrayPtr<word> makeUncheckedNodeEnforcingSizeRequirements(schema::Node::Reader node); 149 // Like makeUncheckedNode() but if structSizeRequirements has a requirement for this node which 150 // is larger than the node claims to be, the size will be edited to comply. This should be rare. 151 // If the incoming node is not a struct, any struct size requirements will be ignored, but if 152 // such requirements exist, this indicates an inconsistency that could cause exceptions later on 153 // (but at least can't cause memory corruption). 154 155 kj::ArrayPtr<word> rewriteStructNodeWithSizes( 156 schema::Node::Reader node, uint dataWordCount, uint pointerCount); 157 // Make a copy of the given node (which must be a struct node) and set its sizes to be the max 158 // of what it said already and the given sizes. 159 160 // If the encoded node does not meet the given struct size requirements, make a new copy that 161 // does. 162 void applyStructSizeRequirement(_::RawSchema* raw, uint dataWordCount, uint pointerCount); 163 164 const _::RawBrandedSchema* makeBranded(const _::RawSchema* schema, 165 kj::ArrayPtr<const _::RawBrandedSchema::Scope> scopes); 166 167 kj::ArrayPtr<const _::RawBrandedSchema::Dependency> makeBrandedDependencies( 168 const _::RawSchema* schema, 169 kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> bindings); 170 171 void makeDep(_::RawBrandedSchema::Binding& result, 172 schema::Type::Reader type, kj::StringPtr scopeName, 173 kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings); 174 void makeDep(_::RawBrandedSchema::Binding& result, 175 uint64_t typeId, schema::Type::Which whichType, schema::Node::Which expectedKind, 176 schema::Brand::Reader brand, kj::StringPtr scopeName, 177 kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings); 178 // Looks up the schema and brand for a dependency, or creates lazily-evaluated placeholders if 179 // they don't already exist, and fills in `result`. `scopeName` is a human-readable name of the 180 // place where the type appeared. 181 // 182 // Note that we don't simply return a Binding because we need to be careful about initialization 183 // to ensure that our byte-based de-duplification works. If we constructed a Binding on the stack 184 // and returned it, padding bytes in that Binding could go uninitialized, causing it to appear 185 // unique when it's not. It is expected that `result` has been zero'd via memset() before these 186 // methods are called. 187 188 const _::RawBrandedSchema* makeDepSchema( 189 schema::Type::Reader type, kj::StringPtr scopeName, 190 kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings); 191 const _::RawBrandedSchema* makeDepSchema( 192 uint64_t typeId, schema::Type::Which whichType, schema::Node::Which expectedKind, 193 schema::Brand::Reader brand, kj::StringPtr scopeName, 194 kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings); 195 // Invoke makeDep() then return the result's schema, or nullptr if it's a primitive type. 196 197 template <typename T> 198 kj::ArrayPtr<const T> copyDeduped(kj::ArrayPtr<const T> values); 199 template <typename T> 200 kj::ArrayPtr<const T> copyDeduped(kj::ArrayPtr<T> values); 201 // Copy the given array into the arena and return the copy -- unless an identical array 202 // was copied previously, in which case the existing copy is returned. 203 204 friend class SchemaLoader::BrandedInitializerImpl; 205 }; 206 207 // ======================================================================================= 208 209 inline static void verifyVoid(Void value) {} 210 // Calls to this will break if the parameter type changes to non-void. We use this to detect 211 // when the code needs updating. 212 213 class SchemaLoader::Validator { 214 public: 215 Validator(SchemaLoader::Impl& loader): loader(loader) {} 216 217 bool validate(const schema::Node::Reader& node) { 218 isValid = true; 219 nodeName = node.getDisplayName(); 220 dependencies.clear(); 221 222 KJ_CONTEXT("validating schema node", nodeName, (uint)node.which()); 223 224 if (node.getParameters().size() > 0) { 225 KJ_REQUIRE(node.getIsGeneric(), "if parameter list is non-empty, isGeneric must be true") { 226 isValid = false; 227 return false; 228 } 229 } 230 231 switch (node.which()) { 232 case schema::Node::FILE: 233 verifyVoid(node.getFile()); 234 break; 235 case schema::Node::STRUCT: 236 validate(node.getStruct(), node.getScopeId()); 237 break; 238 case schema::Node::ENUM: 239 validate(node.getEnum()); 240 break; 241 case schema::Node::INTERFACE: 242 validate(node.getInterface()); 243 break; 244 case schema::Node::CONST: 245 validate(node.getConst()); 246 break; 247 case schema::Node::ANNOTATION: 248 validate(node.getAnnotation()); 249 break; 250 } 251 252 // We accept and pass through node types we don't recognize. 253 return isValid; 254 } 255 256 const _::RawSchema** makeDependencyArray(uint32_t* count) { 257 *count = dependencies.size(); 258 kj::ArrayPtr<const _::RawSchema*> result = 259 loader.arena.allocateArray<const _::RawSchema*>(*count); 260 uint pos = 0; 261 for (auto& dep: dependencies) { 262 result[pos++] = dep.value; 263 } 264 KJ_DASSERT(pos == *count); 265 return result.begin(); 266 } 267 268 const uint16_t* makeMemberInfoArray(uint32_t* count) { 269 *count = members.size(); 270 kj::ArrayPtr<uint16_t> result = loader.arena.allocateArray<uint16_t>(*count); 271 uint pos = 0; 272 for (auto& member: members) { 273 result[pos++] = member.value; 274 } 275 KJ_DASSERT(pos == *count); 276 return result.begin(); 277 } 278 279 const uint16_t* makeMembersByDiscriminantArray() { 280 return membersByDiscriminant.begin(); 281 } 282 283 private: 284 SchemaLoader::Impl& loader; 285 Text::Reader nodeName; 286 bool isValid; 287 288 // Maps type IDs -> compiled schemas for each dependency. 289 // Order is important because makeDependencyArray() compiles a sorted array. 290 kj::TreeMap<uint64_t, _::RawSchema*> dependencies; 291 292 // Maps name -> index for each member. 293 // Order is important because makeMemberInfoArray() compiles a sorted array. 294 kj::TreeMap<Text::Reader, uint> members; 295 296 kj::ArrayPtr<uint16_t> membersByDiscriminant; 297 298 #define VALIDATE_SCHEMA(condition, ...) \ 299 KJ_REQUIRE(condition, ##__VA_ARGS__) { isValid = false; return; } 300 #define FAIL_VALIDATE_SCHEMA(...) \ 301 KJ_FAIL_REQUIRE(__VA_ARGS__) { isValid = false; return; } 302 303 void validateMemberName(kj::StringPtr name, uint index) { 304 members.upsert(name, index, [&](auto&, auto&&) { 305 FAIL_VALIDATE_SCHEMA("duplicate name", name); 306 }); 307 } 308 309 void validate(const schema::Node::Struct::Reader& structNode, uint64_t scopeId) { 310 uint dataSizeInBits = structNode.getDataWordCount() * 64; 311 uint pointerCount = structNode.getPointerCount(); 312 313 auto fields = structNode.getFields(); 314 315 KJ_STACK_ARRAY(bool, sawCodeOrder, fields.size(), 32, 256); 316 memset(sawCodeOrder.begin(), 0, sawCodeOrder.size() * sizeof(sawCodeOrder[0])); 317 318 KJ_STACK_ARRAY(bool, sawDiscriminantValue, structNode.getDiscriminantCount(), 32, 256); 319 memset(sawDiscriminantValue.begin(), 0, 320 sawDiscriminantValue.size() * sizeof(sawDiscriminantValue[0])); 321 322 if (structNode.getDiscriminantCount() > 0) { 323 VALIDATE_SCHEMA(structNode.getDiscriminantCount() != 1, 324 "union must have at least two members"); 325 VALIDATE_SCHEMA(structNode.getDiscriminantCount() <= fields.size(), 326 "struct can't have more union fields than total fields"); 327 328 VALIDATE_SCHEMA((structNode.getDiscriminantOffset() + 1) * 16 <= dataSizeInBits, 329 "union discriminant is out-of-bounds"); 330 } 331 332 membersByDiscriminant = loader.arena.allocateArray<uint16_t>(fields.size()); 333 uint discriminantPos = 0; 334 uint nonDiscriminantPos = structNode.getDiscriminantCount(); 335 336 uint index = 0; 337 uint nextOrdinal = 0; 338 for (auto field: fields) { 339 KJ_CONTEXT("validating struct field", field.getName()); 340 341 validateMemberName(field.getName(), index); 342 VALIDATE_SCHEMA(field.getCodeOrder() < sawCodeOrder.size() && 343 !sawCodeOrder[field.getCodeOrder()], 344 "invalid codeOrder"); 345 sawCodeOrder[field.getCodeOrder()] = true; 346 347 auto ordinal = field.getOrdinal(); 348 if (ordinal.isExplicit()) { 349 VALIDATE_SCHEMA(ordinal.getExplicit() >= nextOrdinal, 350 "fields were not ordered by ordinal"); 351 nextOrdinal = ordinal.getExplicit() + 1; 352 } 353 354 if (hasDiscriminantValue(field)) { 355 VALIDATE_SCHEMA(field.getDiscriminantValue() < sawDiscriminantValue.size() && 356 !sawDiscriminantValue[field.getDiscriminantValue()], 357 "invalid discriminantValue"); 358 sawDiscriminantValue[field.getDiscriminantValue()] = true; 359 360 membersByDiscriminant[discriminantPos++] = index; 361 } else { 362 VALIDATE_SCHEMA(nonDiscriminantPos <= fields.size(), 363 "discriminantCount did not match fields"); 364 membersByDiscriminant[nonDiscriminantPos++] = index; 365 } 366 367 switch (field.which()) { 368 case schema::Field::SLOT: { 369 auto slot = field.getSlot(); 370 371 uint fieldBits = 0; 372 bool fieldIsPointer = false; 373 validate(slot.getType(), slot.getDefaultValue(), &fieldBits, &fieldIsPointer); 374 VALIDATE_SCHEMA(fieldBits * (slot.getOffset() + 1) <= dataSizeInBits && 375 fieldIsPointer * (slot.getOffset() + 1) <= pointerCount, 376 "field offset out-of-bounds", 377 slot.getOffset(), dataSizeInBits, pointerCount); 378 379 break; 380 } 381 382 case schema::Field::GROUP: 383 // Require that the group is a struct node. 384 validateTypeId(field.getGroup().getTypeId(), schema::Node::STRUCT); 385 break; 386 } 387 388 ++index; 389 } 390 391 // If the above code is correct, these should pass. 392 KJ_ASSERT(discriminantPos == structNode.getDiscriminantCount()); 393 KJ_ASSERT(nonDiscriminantPos == fields.size()); 394 395 if (structNode.getIsGroup()) { 396 VALIDATE_SCHEMA(scopeId != 0, "group node missing scopeId"); 397 398 // Require that the group's scope has at least the same size as the group, so that anyone 399 // constructing an instance of the outer scope can safely read/write the group. 400 loader.requireStructSize(scopeId, structNode.getDataWordCount(), 401 structNode.getPointerCount()); 402 403 // Require that the parent type is a struct. 404 validateTypeId(scopeId, schema::Node::STRUCT); 405 } 406 } 407 408 void validate(const schema::Node::Enum::Reader& enumNode) { 409 auto enumerants = enumNode.getEnumerants(); 410 KJ_STACK_ARRAY(bool, sawCodeOrder, enumerants.size(), 32, 256); 411 memset(sawCodeOrder.begin(), 0, sawCodeOrder.size() * sizeof(sawCodeOrder[0])); 412 413 uint index = 0; 414 for (auto enumerant: enumerants) { 415 validateMemberName(enumerant.getName(), index++); 416 417 VALIDATE_SCHEMA(enumerant.getCodeOrder() < enumerants.size() && 418 !sawCodeOrder[enumerant.getCodeOrder()], 419 "invalid codeOrder", enumerant.getName()); 420 sawCodeOrder[enumerant.getCodeOrder()] = true; 421 } 422 } 423 424 void validate(const schema::Node::Interface::Reader& interfaceNode) { 425 for (auto extend: interfaceNode.getSuperclasses()) { 426 validateTypeId(extend.getId(), schema::Node::INTERFACE); 427 validate(extend.getBrand()); 428 } 429 430 auto methods = interfaceNode.getMethods(); 431 KJ_STACK_ARRAY(bool, sawCodeOrder, methods.size(), 32, 256); 432 memset(sawCodeOrder.begin(), 0, sawCodeOrder.size() * sizeof(sawCodeOrder[0])); 433 434 uint index = 0; 435 for (auto method: methods) { 436 KJ_CONTEXT("validating method", method.getName()); 437 validateMemberName(method.getName(), index++); 438 439 VALIDATE_SCHEMA(method.getCodeOrder() < methods.size() && 440 !sawCodeOrder[method.getCodeOrder()], 441 "invalid codeOrder"); 442 sawCodeOrder[method.getCodeOrder()] = true; 443 444 validateTypeId(method.getParamStructType(), schema::Node::STRUCT); 445 validate(method.getParamBrand()); 446 validateTypeId(method.getResultStructType(), schema::Node::STRUCT); 447 validate(method.getResultBrand()); 448 } 449 } 450 451 void validate(const schema::Node::Const::Reader& constNode) { 452 uint dummy1; 453 bool dummy2; 454 validate(constNode.getType(), constNode.getValue(), &dummy1, &dummy2); 455 } 456 457 void validate(const schema::Node::Annotation::Reader& annotationNode) { 458 validate(annotationNode.getType()); 459 } 460 461 void validate(const schema::Type::Reader& type, const schema::Value::Reader& value, 462 uint* dataSizeInBits, bool* isPointer) { 463 validate(type); 464 465 schema::Value::Which expectedValueType = schema::Value::VOID; 466 bool hadCase = false; 467 switch (type.which()) { 468 #define HANDLE_TYPE(name, bits, ptr) \ 469 case schema::Type::name: \ 470 expectedValueType = schema::Value::name; \ 471 *dataSizeInBits = bits; *isPointer = ptr; \ 472 hadCase = true; \ 473 break; 474 HANDLE_TYPE(VOID, 0, false) 475 HANDLE_TYPE(BOOL, 1, false) 476 HANDLE_TYPE(INT8, 8, false) 477 HANDLE_TYPE(INT16, 16, false) 478 HANDLE_TYPE(INT32, 32, false) 479 HANDLE_TYPE(INT64, 64, false) 480 HANDLE_TYPE(UINT8, 8, false) 481 HANDLE_TYPE(UINT16, 16, false) 482 HANDLE_TYPE(UINT32, 32, false) 483 HANDLE_TYPE(UINT64, 64, false) 484 HANDLE_TYPE(FLOAT32, 32, false) 485 HANDLE_TYPE(FLOAT64, 64, false) 486 HANDLE_TYPE(TEXT, 0, true) 487 HANDLE_TYPE(DATA, 0, true) 488 HANDLE_TYPE(LIST, 0, true) 489 HANDLE_TYPE(ENUM, 16, false) 490 HANDLE_TYPE(STRUCT, 0, true) 491 HANDLE_TYPE(INTERFACE, 0, true) 492 HANDLE_TYPE(ANY_POINTER, 0, true) 493 #undef HANDLE_TYPE 494 } 495 496 if (hadCase) { 497 VALIDATE_SCHEMA(value.which() == expectedValueType, "Value did not match type.", 498 (uint)value.which(), (uint)expectedValueType); 499 } 500 } 501 502 void validate(const schema::Type::Reader& type) { 503 switch (type.which()) { 504 case schema::Type::VOID: 505 case schema::Type::BOOL: 506 case schema::Type::INT8: 507 case schema::Type::INT16: 508 case schema::Type::INT32: 509 case schema::Type::INT64: 510 case schema::Type::UINT8: 511 case schema::Type::UINT16: 512 case schema::Type::UINT32: 513 case schema::Type::UINT64: 514 case schema::Type::FLOAT32: 515 case schema::Type::FLOAT64: 516 case schema::Type::TEXT: 517 case schema::Type::DATA: 518 case schema::Type::ANY_POINTER: 519 break; 520 521 case schema::Type::STRUCT: { 522 auto structType = type.getStruct(); 523 validateTypeId(structType.getTypeId(), schema::Node::STRUCT); 524 validate(structType.getBrand()); 525 break; 526 } 527 case schema::Type::ENUM: { 528 auto enumType = type.getEnum(); 529 validateTypeId(enumType.getTypeId(), schema::Node::ENUM); 530 validate(enumType.getBrand()); 531 break; 532 } 533 case schema::Type::INTERFACE: { 534 auto interfaceType = type.getInterface(); 535 validateTypeId(interfaceType.getTypeId(), schema::Node::INTERFACE); 536 validate(interfaceType.getBrand()); 537 break; 538 } 539 540 case schema::Type::LIST: 541 validate(type.getList().getElementType()); 542 break; 543 } 544 545 // We intentionally allow unknown types. 546 } 547 548 void validate(const schema::Brand::Reader& brand) { 549 for (auto scope: brand.getScopes()) { 550 switch (scope.which()) { 551 case schema::Brand::Scope::BIND: 552 for (auto binding: scope.getBind()) { 553 switch (binding.which()) { 554 case schema::Brand::Binding::UNBOUND: 555 break; 556 case schema::Brand::Binding::TYPE: { 557 auto type = binding.getType(); 558 validate(type); 559 bool isPointer = true; 560 switch (type.which()) { 561 case schema::Type::VOID: 562 case schema::Type::BOOL: 563 case schema::Type::INT8: 564 case schema::Type::INT16: 565 case schema::Type::INT32: 566 case schema::Type::INT64: 567 case schema::Type::UINT8: 568 case schema::Type::UINT16: 569 case schema::Type::UINT32: 570 case schema::Type::UINT64: 571 case schema::Type::FLOAT32: 572 case schema::Type::FLOAT64: 573 case schema::Type::ENUM: 574 isPointer = false; 575 break; 576 577 case schema::Type::TEXT: 578 case schema::Type::DATA: 579 case schema::Type::ANY_POINTER: 580 case schema::Type::STRUCT: 581 case schema::Type::INTERFACE: 582 case schema::Type::LIST: 583 isPointer = true; 584 break; 585 } 586 VALIDATE_SCHEMA(isPointer, 587 "generic type parameter must be a pointer type", type); 588 589 break; 590 } 591 } 592 } 593 break; 594 case schema::Brand::Scope::INHERIT: 595 break; 596 } 597 } 598 } 599 600 void validateTypeId(uint64_t id, schema::Node::Which expectedKind) { 601 _::RawSchema* existing = loader.tryGet(id).schema; 602 if (existing != nullptr) { 603 auto node = readMessageUnchecked<schema::Node>(existing->encodedNode); 604 VALIDATE_SCHEMA(node.which() == expectedKind, 605 "expected a different kind of node for this ID", 606 id, (uint)expectedKind, (uint)node.which(), node.getDisplayName()); 607 dependencies.upsert(id, existing, [](auto&,auto&&) { /* ignore dupe */ }); 608 return; 609 } 610 611 dependencies.upsert(id, loader.loadEmpty( 612 id, kj::str("(unknown type used by ", nodeName , ")"), expectedKind, true), 613 [](auto&,auto&&) { /* ignore dupe */ }); 614 } 615 616 #undef VALIDATE_SCHEMA 617 #undef FAIL_VALIDATE_SCHEMA 618 }; 619 620 // ======================================================================================= 621 622 class SchemaLoader::CompatibilityChecker { 623 public: 624 CompatibilityChecker(SchemaLoader::Impl& loader): loader(loader) {} 625 626 bool shouldReplace(const schema::Node::Reader& existingNode, 627 const schema::Node::Reader& replacement, 628 bool preferReplacementIfEquivalent) { 629 this->existingNode = existingNode; 630 this->replacementNode = replacement; 631 632 KJ_CONTEXT("checking compatibility with previously-loaded node of the same id", 633 existingNode.getDisplayName()); 634 635 KJ_DREQUIRE(existingNode.getId() == replacement.getId()); 636 637 nodeName = existingNode.getDisplayName(); 638 compatibility = EQUIVALENT; 639 640 checkCompatibility(existingNode, replacement); 641 642 // Prefer the newer schema. 643 return preferReplacementIfEquivalent ? compatibility != OLDER : compatibility == NEWER; 644 } 645 646 private: 647 SchemaLoader::Impl& loader; 648 Text::Reader nodeName; 649 schema::Node::Reader existingNode; 650 schema::Node::Reader replacementNode; 651 652 enum Compatibility { 653 EQUIVALENT, 654 OLDER, 655 NEWER, 656 INCOMPATIBLE 657 }; 658 Compatibility compatibility; 659 660 #define VALIDATE_SCHEMA(condition, ...) \ 661 KJ_REQUIRE(condition, ##__VA_ARGS__) { compatibility = INCOMPATIBLE; return; } 662 #define FAIL_VALIDATE_SCHEMA(...) \ 663 KJ_FAIL_REQUIRE(__VA_ARGS__) { compatibility = INCOMPATIBLE; return; } 664 665 void replacementIsNewer() { 666 switch (compatibility) { 667 case EQUIVALENT: 668 compatibility = NEWER; 669 break; 670 case OLDER: 671 FAIL_VALIDATE_SCHEMA("Schema node contains some changes that are upgrades and some " 672 "that are downgrades. All changes must be in the same direction for compatibility."); 673 break; 674 case NEWER: 675 break; 676 case INCOMPATIBLE: 677 break; 678 } 679 } 680 681 void replacementIsOlder() { 682 switch (compatibility) { 683 case EQUIVALENT: 684 compatibility = OLDER; 685 break; 686 case OLDER: 687 break; 688 case NEWER: 689 FAIL_VALIDATE_SCHEMA("Schema node contains some changes that are upgrades and some " 690 "that are downgrades. All changes must be in the same direction for compatibility."); 691 break; 692 case INCOMPATIBLE: 693 break; 694 } 695 } 696 697 void checkCompatibility(const schema::Node::Reader& node, 698 const schema::Node::Reader& replacement) { 699 // Returns whether `replacement` is equivalent, older than, newer than, or incompatible with 700 // `node`. If exceptions are enabled, this will throw an exception on INCOMPATIBLE. 701 702 VALIDATE_SCHEMA(node.which() == replacement.which(), 703 "kind of declaration changed"); 704 705 // No need to check compatibility of most of the non-body parts of the node: 706 // - Arbitrary renaming and moving between scopes is allowed. 707 // - Annotations are ignored for compatibility purposes. 708 709 if (replacement.getParameters().size() > node.getParameters().size()) { 710 replacementIsNewer(); 711 } else if (replacement.getParameters().size() < node.getParameters().size()) { 712 replacementIsOlder(); 713 } 714 715 switch (node.which()) { 716 case schema::Node::FILE: 717 verifyVoid(node.getFile()); 718 break; 719 case schema::Node::STRUCT: 720 checkCompatibility(node.getStruct(), replacement.getStruct(), 721 node.getScopeId(), replacement.getScopeId()); 722 break; 723 case schema::Node::ENUM: 724 checkCompatibility(node.getEnum(), replacement.getEnum()); 725 break; 726 case schema::Node::INTERFACE: 727 checkCompatibility(node.getInterface(), replacement.getInterface()); 728 break; 729 case schema::Node::CONST: 730 checkCompatibility(node.getConst(), replacement.getConst()); 731 break; 732 case schema::Node::ANNOTATION: 733 checkCompatibility(node.getAnnotation(), replacement.getAnnotation()); 734 break; 735 } 736 } 737 738 void checkCompatibility(const schema::Node::Struct::Reader& structNode, 739 const schema::Node::Struct::Reader& replacement, 740 uint64_t scopeId, uint64_t replacementScopeId) { 741 if (replacement.getDataWordCount() > structNode.getDataWordCount()) { 742 replacementIsNewer(); 743 } else if (replacement.getDataWordCount() < structNode.getDataWordCount()) { 744 replacementIsOlder(); 745 } 746 if (replacement.getPointerCount() > structNode.getPointerCount()) { 747 replacementIsNewer(); 748 } else if (replacement.getPointerCount() < structNode.getPointerCount()) { 749 replacementIsOlder(); 750 } 751 if (replacement.getDiscriminantCount() > structNode.getDiscriminantCount()) { 752 replacementIsNewer(); 753 } else if (replacement.getDiscriminantCount() < structNode.getDiscriminantCount()) { 754 replacementIsOlder(); 755 } 756 757 if (replacement.getDiscriminantCount() > 0 && structNode.getDiscriminantCount() > 0) { 758 VALIDATE_SCHEMA(replacement.getDiscriminantOffset() == structNode.getDiscriminantOffset(), 759 "union discriminant position changed"); 760 } 761 762 // The shared members should occupy corresponding positions in the member lists, since the 763 // lists are sorted by ordinal. 764 auto fields = structNode.getFields(); 765 auto replacementFields = replacement.getFields(); 766 uint count = std::min(fields.size(), replacementFields.size()); 767 768 if (replacementFields.size() > fields.size()) { 769 replacementIsNewer(); 770 } else if (replacementFields.size() < fields.size()) { 771 replacementIsOlder(); 772 } 773 774 for (uint i = 0; i < count; i++) { 775 checkCompatibility(fields[i], replacementFields[i]); 776 } 777 778 // For the moment, we allow "upgrading" from non-group to group, mainly so that the 779 // placeholders we generate for group parents (which in the absence of more info, we assume to 780 // be non-groups) can be replaced with groups. 781 // 782 // TODO(cleanup): The placeholder approach is really breaking down. Maybe we need to maintain 783 // a list of expectations for nodes we haven't loaded yet. 784 if (structNode.getIsGroup()) { 785 if (replacement.getIsGroup()) { 786 VALIDATE_SCHEMA(replacementScopeId == scopeId, "group node's scope changed"); 787 } else { 788 replacementIsOlder(); 789 } 790 } else { 791 if (replacement.getIsGroup()) { 792 replacementIsNewer(); 793 } 794 } 795 } 796 797 void checkCompatibility(const schema::Field::Reader& field, 798 const schema::Field::Reader& replacement) { 799 KJ_CONTEXT("comparing struct field", field.getName()); 800 801 // A field that is initially not in a union can be upgraded to be in one, as long as it has 802 // discriminant 0. 803 uint discriminant = hasDiscriminantValue(field) ? field.getDiscriminantValue() : 0; 804 uint replacementDiscriminant = 805 hasDiscriminantValue(replacement) ? replacement.getDiscriminantValue() : 0; 806 VALIDATE_SCHEMA(discriminant == replacementDiscriminant, "Field discriminant changed."); 807 808 switch (field.which()) { 809 case schema::Field::SLOT: { 810 auto slot = field.getSlot(); 811 812 switch (replacement.which()) { 813 case schema::Field::SLOT: { 814 auto replacementSlot = replacement.getSlot(); 815 816 checkCompatibility(slot.getType(), replacementSlot.getType(), 817 NO_UPGRADE_TO_STRUCT); 818 checkDefaultCompatibility(slot.getDefaultValue(), 819 replacementSlot.getDefaultValue()); 820 821 VALIDATE_SCHEMA(slot.getOffset() == replacementSlot.getOffset(), 822 "field position changed"); 823 break; 824 } 825 case schema::Field::GROUP: 826 checkUpgradeToStruct(slot.getType(), replacement.getGroup().getTypeId(), 827 existingNode, field); 828 break; 829 } 830 831 break; 832 } 833 834 case schema::Field::GROUP: 835 switch (replacement.which()) { 836 case schema::Field::SLOT: 837 checkUpgradeToStruct(replacement.getSlot().getType(), field.getGroup().getTypeId(), 838 replacementNode, replacement); 839 break; 840 case schema::Field::GROUP: 841 VALIDATE_SCHEMA(field.getGroup().getTypeId() == replacement.getGroup().getTypeId(), 842 "group id changed"); 843 break; 844 } 845 break; 846 } 847 } 848 849 void checkCompatibility(const schema::Node::Enum::Reader& enumNode, 850 const schema::Node::Enum::Reader& replacement) { 851 uint size = enumNode.getEnumerants().size(); 852 uint replacementSize = replacement.getEnumerants().size(); 853 if (replacementSize > size) { 854 replacementIsNewer(); 855 } else if (replacementSize < size) { 856 replacementIsOlder(); 857 } 858 } 859 860 void checkCompatibility(const schema::Node::Interface::Reader& interfaceNode, 861 const schema::Node::Interface::Reader& replacement) { 862 { 863 // Check superclasses. 864 865 kj::Vector<uint64_t> superclasses; 866 kj::Vector<uint64_t> replacementSuperclasses; 867 for (auto superclass: interfaceNode.getSuperclasses()) { 868 superclasses.add(superclass.getId()); 869 } 870 for (auto superclass: replacement.getSuperclasses()) { 871 replacementSuperclasses.add(superclass.getId()); 872 } 873 std::sort(superclasses.begin(), superclasses.end()); 874 std::sort(replacementSuperclasses.begin(), replacementSuperclasses.end()); 875 876 auto iter = superclasses.begin(); 877 auto replacementIter = replacementSuperclasses.begin(); 878 879 while (iter != superclasses.end() || replacementIter != replacementSuperclasses.end()) { 880 if (iter == superclasses.end()) { 881 replacementIsNewer(); 882 break; 883 } else if (replacementIter == replacementSuperclasses.end()) { 884 replacementIsOlder(); 885 break; 886 } else if (*iter < *replacementIter) { 887 replacementIsOlder(); 888 ++iter; 889 } else if (*iter > *replacementIter) { 890 replacementIsNewer(); 891 ++replacementIter; 892 } else { 893 ++iter; 894 ++replacementIter; 895 } 896 } 897 } 898 899 auto methods = interfaceNode.getMethods(); 900 auto replacementMethods = replacement.getMethods(); 901 902 if (replacementMethods.size() > methods.size()) { 903 replacementIsNewer(); 904 } else if (replacementMethods.size() < methods.size()) { 905 replacementIsOlder(); 906 } 907 908 uint count = std::min(methods.size(), replacementMethods.size()); 909 910 for (uint i = 0; i < count; i++) { 911 checkCompatibility(methods[i], replacementMethods[i]); 912 } 913 } 914 915 void checkCompatibility(const schema::Method::Reader& method, 916 const schema::Method::Reader& replacement) { 917 KJ_CONTEXT("comparing method", method.getName()); 918 919 // TODO(someday): Allow named parameter list to be replaced by compatible struct type. 920 VALIDATE_SCHEMA(method.getParamStructType() == replacement.getParamStructType(), 921 "Updated method has different parameters."); 922 VALIDATE_SCHEMA(method.getResultStructType() == replacement.getResultStructType(), 923 "Updated method has different results."); 924 } 925 926 void checkCompatibility(const schema::Node::Const::Reader& constNode, 927 const schema::Node::Const::Reader& replacement) { 928 // Who cares? These don't appear on the wire. 929 } 930 931 void checkCompatibility(const schema::Node::Annotation::Reader& annotationNode, 932 const schema::Node::Annotation::Reader& replacement) { 933 // Who cares? These don't appear on the wire. 934 } 935 936 enum UpgradeToStructMode { 937 ALLOW_UPGRADE_TO_STRUCT, 938 NO_UPGRADE_TO_STRUCT 939 }; 940 941 void checkCompatibility(const schema::Type::Reader& type, 942 const schema::Type::Reader& replacement, 943 UpgradeToStructMode upgradeToStructMode) { 944 if (replacement.which() != type.which()) { 945 // Check for allowed "upgrade" to Data or AnyPointer. 946 if (replacement.isData() && canUpgradeToData(type)) { 947 replacementIsNewer(); 948 return; 949 } else if (type.isData() && canUpgradeToData(replacement)) { 950 replacementIsOlder(); 951 return; 952 } else if (replacement.isAnyPointer() && canUpgradeToAnyPointer(type)) { 953 replacementIsNewer(); 954 return; 955 } else if (type.isAnyPointer() && canUpgradeToAnyPointer(replacement)) { 956 replacementIsOlder(); 957 return; 958 } 959 960 if (upgradeToStructMode == ALLOW_UPGRADE_TO_STRUCT) { 961 if (type.isStruct()) { 962 checkUpgradeToStruct(replacement, type.getStruct().getTypeId()); 963 return; 964 } else if (replacement.isStruct()) { 965 checkUpgradeToStruct(type, replacement.getStruct().getTypeId()); 966 return; 967 } 968 } 969 970 FAIL_VALIDATE_SCHEMA("a type was changed"); 971 } 972 973 switch (type.which()) { 974 case schema::Type::VOID: 975 case schema::Type::BOOL: 976 case schema::Type::INT8: 977 case schema::Type::INT16: 978 case schema::Type::INT32: 979 case schema::Type::INT64: 980 case schema::Type::UINT8: 981 case schema::Type::UINT16: 982 case schema::Type::UINT32: 983 case schema::Type::UINT64: 984 case schema::Type::FLOAT32: 985 case schema::Type::FLOAT64: 986 case schema::Type::TEXT: 987 case schema::Type::DATA: 988 case schema::Type::ANY_POINTER: 989 return; 990 991 case schema::Type::LIST: 992 checkCompatibility(type.getList().getElementType(), replacement.getList().getElementType(), 993 ALLOW_UPGRADE_TO_STRUCT); 994 return; 995 996 case schema::Type::ENUM: 997 VALIDATE_SCHEMA(replacement.getEnum().getTypeId() == type.getEnum().getTypeId(), 998 "type changed enum type"); 999 return; 1000 1001 case schema::Type::STRUCT: 1002 // TODO(someday): If the IDs don't match, we should compare the two structs for 1003 // compatibility. This is tricky, though, because the new type's target may not yet be 1004 // loaded. In that case we could take the old type, make a copy of it, assign the new 1005 // ID to the copy, and load() that. That forces any struct type loaded for that ID to 1006 // be compatible. However, that has another problem, which is that it could be that the 1007 // whole reason the type was replaced was to fork that type, and so an incompatibility 1008 // could be very much expected. This could be a rat hole... 1009 VALIDATE_SCHEMA(replacement.getStruct().getTypeId() == type.getStruct().getTypeId(), 1010 "type changed to incompatible struct type"); 1011 return; 1012 1013 case schema::Type::INTERFACE: 1014 VALIDATE_SCHEMA(replacement.getInterface().getTypeId() == type.getInterface().getTypeId(), 1015 "type changed to incompatible interface type"); 1016 return; 1017 } 1018 1019 // We assume unknown types (from newer versions of Cap'n Proto?) are equivalent. 1020 } 1021 1022 void checkUpgradeToStruct(const schema::Type::Reader& type, uint64_t structTypeId, 1023 kj::Maybe<schema::Node::Reader> matchSize = nullptr, 1024 kj::Maybe<schema::Field::Reader> matchPosition = nullptr) { 1025 // We can't just look up the target struct and check it because it may not have been loaded 1026 // yet. Instead, we contrive a struct that looks like what we want and load() that, which 1027 // guarantees that any incompatibility will be caught either now or when the real version of 1028 // that struct is loaded. 1029 1030 word scratch[32]; 1031 memset(scratch, 0, sizeof(scratch)); 1032 MallocMessageBuilder builder(scratch); 1033 auto node = builder.initRoot<schema::Node>(); 1034 node.setId(structTypeId); 1035 node.setDisplayName(kj::str("(unknown type used in ", nodeName, ")")); 1036 auto structNode = node.initStruct(); 1037 1038 switch (type.which()) { 1039 case schema::Type::VOID: 1040 structNode.setDataWordCount(0); 1041 structNode.setPointerCount(0); 1042 break; 1043 1044 case schema::Type::BOOL: 1045 structNode.setDataWordCount(1); 1046 structNode.setPointerCount(0); 1047 break; 1048 1049 case schema::Type::INT8: 1050 case schema::Type::UINT8: 1051 structNode.setDataWordCount(1); 1052 structNode.setPointerCount(0); 1053 break; 1054 1055 case schema::Type::INT16: 1056 case schema::Type::UINT16: 1057 case schema::Type::ENUM: 1058 structNode.setDataWordCount(1); 1059 structNode.setPointerCount(0); 1060 break; 1061 1062 case schema::Type::INT32: 1063 case schema::Type::UINT32: 1064 case schema::Type::FLOAT32: 1065 structNode.setDataWordCount(1); 1066 structNode.setPointerCount(0); 1067 break; 1068 1069 case schema::Type::INT64: 1070 case schema::Type::UINT64: 1071 case schema::Type::FLOAT64: 1072 structNode.setDataWordCount(1); 1073 structNode.setPointerCount(0); 1074 break; 1075 1076 case schema::Type::TEXT: 1077 case schema::Type::DATA: 1078 case schema::Type::LIST: 1079 case schema::Type::STRUCT: 1080 case schema::Type::INTERFACE: 1081 case schema::Type::ANY_POINTER: 1082 structNode.setDataWordCount(0); 1083 structNode.setPointerCount(1); 1084 break; 1085 } 1086 1087 KJ_IF_MAYBE(s, matchSize) { 1088 auto match = s->getStruct(); 1089 structNode.setDataWordCount(match.getDataWordCount()); 1090 structNode.setPointerCount(match.getPointerCount()); 1091 } 1092 1093 auto field = structNode.initFields(1)[0]; 1094 field.setName("member0"); 1095 field.setCodeOrder(0); 1096 auto slot = field.initSlot(); 1097 slot.setType(type); 1098 1099 KJ_IF_MAYBE(p, matchPosition) { 1100 if (p->getOrdinal().isExplicit()) { 1101 field.getOrdinal().setExplicit(p->getOrdinal().getExplicit()); 1102 } else { 1103 field.getOrdinal().setImplicit(); 1104 } 1105 auto matchSlot = p->getSlot(); 1106 slot.setOffset(matchSlot.getOffset()); 1107 slot.setDefaultValue(matchSlot.getDefaultValue()); 1108 } else { 1109 field.getOrdinal().setExplicit(0); 1110 slot.setOffset(0); 1111 1112 schema::Value::Builder value = slot.initDefaultValue(); 1113 switch (type.which()) { 1114 case schema::Type::VOID: value.setVoid(); break; 1115 case schema::Type::BOOL: value.setBool(false); break; 1116 case schema::Type::INT8: value.setInt8(0); break; 1117 case schema::Type::INT16: value.setInt16(0); break; 1118 case schema::Type::INT32: value.setInt32(0); break; 1119 case schema::Type::INT64: value.setInt64(0); break; 1120 case schema::Type::UINT8: value.setUint8(0); break; 1121 case schema::Type::UINT16: value.setUint16(0); break; 1122 case schema::Type::UINT32: value.setUint32(0); break; 1123 case schema::Type::UINT64: value.setUint64(0); break; 1124 case schema::Type::FLOAT32: value.setFloat32(0); break; 1125 case schema::Type::FLOAT64: value.setFloat64(0); break; 1126 case schema::Type::ENUM: value.setEnum(0); break; 1127 case schema::Type::TEXT: value.adoptText(Orphan<Text>()); break; 1128 case schema::Type::DATA: value.adoptData(Orphan<Data>()); break; 1129 case schema::Type::LIST: value.initList(); break; 1130 case schema::Type::STRUCT: value.initStruct(); break; 1131 case schema::Type::INTERFACE: value.setInterface(); break; 1132 case schema::Type::ANY_POINTER: value.initAnyPointer(); break; 1133 } 1134 } 1135 1136 loader.load(node, true); 1137 } 1138 1139 bool canUpgradeToData(const schema::Type::Reader& type) { 1140 if (type.isText()) { 1141 return true; 1142 } else if (type.isList()) { 1143 switch (type.getList().getElementType().which()) { 1144 case schema::Type::INT8: 1145 case schema::Type::UINT8: 1146 return true; 1147 default: 1148 return false; 1149 } 1150 } else { 1151 return false; 1152 } 1153 } 1154 1155 bool canUpgradeToAnyPointer(const schema::Type::Reader& type) { 1156 switch (type.which()) { 1157 case schema::Type::VOID: 1158 case schema::Type::BOOL: 1159 case schema::Type::INT8: 1160 case schema::Type::INT16: 1161 case schema::Type::INT32: 1162 case schema::Type::INT64: 1163 case schema::Type::UINT8: 1164 case schema::Type::UINT16: 1165 case schema::Type::UINT32: 1166 case schema::Type::UINT64: 1167 case schema::Type::FLOAT32: 1168 case schema::Type::FLOAT64: 1169 case schema::Type::ENUM: 1170 return false; 1171 1172 case schema::Type::TEXT: 1173 case schema::Type::DATA: 1174 case schema::Type::LIST: 1175 case schema::Type::STRUCT: 1176 case schema::Type::INTERFACE: 1177 case schema::Type::ANY_POINTER: 1178 return true; 1179 } 1180 1181 // Be lenient with unknown types. 1182 return true; 1183 } 1184 1185 void checkDefaultCompatibility(const schema::Value::Reader& value, 1186 const schema::Value::Reader& replacement) { 1187 // Note that we test default compatibility only after testing type compatibility, and default 1188 // values have already been validated as matching their types, so this should pass. 1189 KJ_ASSERT(value.which() == replacement.which()) { 1190 compatibility = INCOMPATIBLE; 1191 return; 1192 } 1193 1194 switch (value.which()) { 1195 #define HANDLE_TYPE(discrim, name) \ 1196 case schema::Value::discrim: \ 1197 VALIDATE_SCHEMA(value.get##name() == replacement.get##name(), "default value changed"); \ 1198 break; 1199 HANDLE_TYPE(VOID, Void); 1200 HANDLE_TYPE(BOOL, Bool); 1201 HANDLE_TYPE(INT8, Int8); 1202 HANDLE_TYPE(INT16, Int16); 1203 HANDLE_TYPE(INT32, Int32); 1204 HANDLE_TYPE(INT64, Int64); 1205 HANDLE_TYPE(UINT8, Uint8); 1206 HANDLE_TYPE(UINT16, Uint16); 1207 HANDLE_TYPE(UINT32, Uint32); 1208 HANDLE_TYPE(UINT64, Uint64); 1209 HANDLE_TYPE(FLOAT32, Float32); 1210 HANDLE_TYPE(FLOAT64, Float64); 1211 HANDLE_TYPE(ENUM, Enum); 1212 #undef HANDLE_TYPE 1213 1214 case schema::Value::TEXT: 1215 case schema::Value::DATA: 1216 case schema::Value::LIST: 1217 case schema::Value::STRUCT: 1218 case schema::Value::INTERFACE: 1219 case schema::Value::ANY_POINTER: 1220 // It's not a big deal if default values for pointers change, and it would be difficult for 1221 // us to compare these defaults here, so just let it slide. 1222 break; 1223 } 1224 } 1225 }; 1226 1227 // ======================================================================================= 1228 1229 _::RawSchema* SchemaLoader::Impl::load(const schema::Node::Reader& reader, bool isPlaceholder) { 1230 // Make a copy of the node which can be used unchecked. 1231 kj::ArrayPtr<word> validated = makeUncheckedNodeEnforcingSizeRequirements(reader); 1232 1233 // Validate the copy. 1234 Validator validator(*this); 1235 auto validatedReader = readMessageUnchecked<schema::Node>(validated.begin()); 1236 1237 if (!validator.validate(validatedReader)) { 1238 // Not valid. Construct an empty schema of the same type and return that. 1239 return loadEmpty(validatedReader.getId(), 1240 validatedReader.getDisplayName(), 1241 validatedReader.which(), 1242 false); 1243 } 1244 1245 // Check if we already have a schema for this ID. 1246 _::RawSchema* schema; 1247 bool shouldReplace; 1248 bool shouldClearInitializer; 1249 KJ_IF_MAYBE(match, schemas.find(validatedReader.getId())) { 1250 // Yes, check if it is compatible and figure out which schema is newer. 1251 1252 schema = *match; 1253 1254 // If the existing schema is a placeholder, but we're upgrading it to a non-placeholder, we 1255 // need to clear the initializer later. 1256 shouldClearInitializer = schema->lazyInitializer != nullptr && !isPlaceholder; 1257 1258 auto existing = readMessageUnchecked<schema::Node>(schema->encodedNode); 1259 CompatibilityChecker checker(*this); 1260 1261 // Prefer to replace the existing schema if the existing schema is a placeholder. Otherwise, 1262 // prefer to keep the existing schema. 1263 shouldReplace = checker.shouldReplace( 1264 existing, validatedReader, schema->lazyInitializer != nullptr); 1265 } else { 1266 // Nope, allocate a new RawSchema. 1267 schema = &arena.allocate<_::RawSchema>(); 1268 memset(&schema->defaultBrand, 0, sizeof(schema->defaultBrand)); 1269 schema->id = validatedReader.getId(); 1270 schema->canCastTo = nullptr; 1271 schema->defaultBrand.generic = schema; 1272 schema->lazyInitializer = isPlaceholder ? &initializer : nullptr; 1273 schema->defaultBrand.lazyInitializer = isPlaceholder ? &brandedInitializer : nullptr; 1274 shouldReplace = true; 1275 shouldClearInitializer = false; 1276 schemas.insert(validatedReader.getId(), schema); 1277 } 1278 1279 if (shouldReplace) { 1280 // Initialize the RawSchema. 1281 schema->encodedNode = validated.begin(); 1282 schema->encodedSize = validated.size(); 1283 schema->dependencies = validator.makeDependencyArray(&schema->dependencyCount); 1284 schema->membersByName = validator.makeMemberInfoArray(&schema->memberCount); 1285 schema->membersByDiscriminant = validator.makeMembersByDiscriminantArray(); 1286 1287 // Even though this schema isn't itself branded, it may have dependencies that are. So, we 1288 // need to set up the "dependencies" map under defaultBrand. 1289 auto deps = makeBrandedDependencies(schema, kj::ArrayPtr<const _::RawBrandedSchema::Scope>()); 1290 schema->defaultBrand.dependencies = deps.begin(); 1291 schema->defaultBrand.dependencyCount = deps.size(); 1292 } 1293 1294 if (shouldClearInitializer) { 1295 // If this schema is not newly-allocated, it may already be in the wild, specifically in the 1296 // dependency list of other schemas. Once the initializer is null, it is live, so we must do 1297 // a release-store here. 1298 #if __GNUC__ || defined(__clang__) 1299 __atomic_store_n(&schema->lazyInitializer, nullptr, __ATOMIC_RELEASE); 1300 __atomic_store_n(&schema->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE); 1301 #elif _MSC_VER 1302 std::atomic_thread_fence(std::memory_order_release); 1303 *static_cast<_::RawSchema::Initializer const* volatile*>(&schema->lazyInitializer) = nullptr; 1304 *static_cast<_::RawBrandedSchema::Initializer const* volatile*>( 1305 &schema->defaultBrand.lazyInitializer) = nullptr; 1306 #else 1307 #error "Platform not supported" 1308 #endif 1309 } 1310 1311 return schema; 1312 } 1313 1314 _::RawSchema* SchemaLoader::Impl::loadNative(const _::RawSchema* nativeSchema) { 1315 _::RawSchema* schema; 1316 bool shouldReplace; 1317 bool shouldClearInitializer; 1318 KJ_IF_MAYBE(match, schemas.find(nativeSchema->id)) { 1319 schema = *match; 1320 if (schema->canCastTo != nullptr) { 1321 // Already loaded natively, or we're currently in the process of loading natively and there 1322 // was a dependency cycle. 1323 KJ_REQUIRE(schema->canCastTo == nativeSchema, 1324 "two different compiled-in type have the same type ID", 1325 nativeSchema->id, 1326 readMessageUnchecked<schema::Node>(nativeSchema->encodedNode).getDisplayName(), 1327 readMessageUnchecked<schema::Node>(schema->canCastTo->encodedNode).getDisplayName()); 1328 return schema; 1329 } else { 1330 auto existing = readMessageUnchecked<schema::Node>(schema->encodedNode); 1331 auto native = readMessageUnchecked<schema::Node>(nativeSchema->encodedNode); 1332 CompatibilityChecker checker(*this); 1333 shouldReplace = checker.shouldReplace(existing, native, true); 1334 shouldClearInitializer = schema->lazyInitializer != nullptr; 1335 } 1336 } else { 1337 schema = &arena.allocate<_::RawSchema>(); 1338 memset(&schema->defaultBrand, 0, sizeof(schema->defaultBrand)); 1339 schema->defaultBrand.generic = schema; 1340 schema->lazyInitializer = nullptr; 1341 schema->defaultBrand.lazyInitializer = nullptr; 1342 shouldReplace = true; 1343 shouldClearInitializer = false; // already cleared above 1344 schemas.insert(nativeSchema->id, schema); 1345 } 1346 1347 if (shouldReplace) { 1348 // Set the schema to a copy of the native schema, but make sure not to null out lazyInitializer 1349 // yet. 1350 _::RawSchema temp = *nativeSchema; 1351 temp.lazyInitializer = schema->lazyInitializer; 1352 *schema = temp; 1353 1354 schema->defaultBrand.generic = schema; 1355 1356 // Indicate that casting is safe. Note that it's important to set this before recursively 1357 // loading dependencies, so that cycles don't cause infinite loops! 1358 schema->canCastTo = nativeSchema; 1359 1360 // We need to set the dependency list to point at other loader-owned RawSchemas. 1361 kj::ArrayPtr<const _::RawSchema*> dependencies = 1362 arena.allocateArray<const _::RawSchema*>(schema->dependencyCount); 1363 for (uint i = 0; i < nativeSchema->dependencyCount; i++) { 1364 dependencies[i] = loadNative(nativeSchema->dependencies[i]); 1365 } 1366 schema->dependencies = dependencies.begin(); 1367 1368 // Also need to re-do the branded dependencies. 1369 auto deps = makeBrandedDependencies(schema, kj::ArrayPtr<const _::RawBrandedSchema::Scope>()); 1370 schema->defaultBrand.dependencies = deps.begin(); 1371 schema->defaultBrand.dependencyCount = deps.size(); 1372 1373 // If there is a struct size requirement, we need to make sure that it is satisfied. 1374 KJ_IF_MAYBE(sizeReq, structSizeRequirements.find(nativeSchema->id)) { 1375 applyStructSizeRequirement(schema, sizeReq->dataWordCount, 1376 sizeReq->pointerCount); 1377 } 1378 } else { 1379 // The existing schema is newer. 1380 1381 // Indicate that casting is safe. Note that it's important to set this before recursively 1382 // loading dependencies, so that cycles don't cause infinite loops! 1383 schema->canCastTo = nativeSchema; 1384 1385 // Make sure the dependencies are loaded and compatible. 1386 for (uint i = 0; i < nativeSchema->dependencyCount; i++) { 1387 loadNative(nativeSchema->dependencies[i]); 1388 } 1389 } 1390 1391 if (shouldClearInitializer) { 1392 // If this schema is not newly-allocated, it may already be in the wild, specifically in the 1393 // dependency list of other schemas. Once the initializer is null, it is live, so we must do 1394 // a release-store here. 1395 #if __GNUC__ || defined(__clang__) 1396 __atomic_store_n(&schema->lazyInitializer, nullptr, __ATOMIC_RELEASE); 1397 __atomic_store_n(&schema->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE); 1398 #elif _MSC_VER 1399 std::atomic_thread_fence(std::memory_order_release); 1400 *static_cast<_::RawSchema::Initializer const* volatile*>(&schema->lazyInitializer) = nullptr; 1401 *static_cast<_::RawBrandedSchema::Initializer const* volatile*>( 1402 &schema->defaultBrand.lazyInitializer) = nullptr; 1403 #else 1404 #error "Platform not supported" 1405 #endif 1406 } 1407 1408 return schema; 1409 } 1410 1411 _::RawSchema* SchemaLoader::Impl::loadEmpty( 1412 uint64_t id, kj::StringPtr name, schema::Node::Which kind, bool isPlaceholder) { 1413 word scratch[32]; 1414 memset(scratch, 0, sizeof(scratch)); 1415 MallocMessageBuilder builder(scratch); 1416 auto node = builder.initRoot<schema::Node>(); 1417 node.setId(id); 1418 node.setDisplayName(name); 1419 switch (kind) { 1420 case schema::Node::STRUCT: node.initStruct(); break; 1421 case schema::Node::ENUM: node.initEnum(); break; 1422 case schema::Node::INTERFACE: node.initInterface(); break; 1423 1424 case schema::Node::FILE: 1425 case schema::Node::CONST: 1426 case schema::Node::ANNOTATION: 1427 KJ_FAIL_REQUIRE("Not a type."); 1428 break; 1429 } 1430 1431 return load(node, isPlaceholder); 1432 } 1433 1434 const _::RawBrandedSchema* SchemaLoader::Impl::makeBranded( 1435 const _::RawSchema* schema, schema::Brand::Reader proto, 1436 kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> clientBrand) { 1437 kj::StringPtr scopeName = 1438 readMessageUnchecked<schema::Node>(schema->encodedNode).getDisplayName(); 1439 1440 auto srcScopes = proto.getScopes(); 1441 1442 KJ_STACK_ARRAY(_::RawBrandedSchema::Scope, dstScopes, srcScopes.size(), 16, 32); 1443 memset(dstScopes.begin(), 0, dstScopes.size() * sizeof(dstScopes[0])); 1444 1445 uint dstScopeCount = 0; 1446 for (auto srcScope: srcScopes) { 1447 switch (srcScope.which()) { 1448 case schema::Brand::Scope::BIND: { 1449 auto srcBindings = srcScope.getBind(); 1450 KJ_STACK_ARRAY(_::RawBrandedSchema::Binding, dstBindings, srcBindings.size(), 16, 32); 1451 memset(dstBindings.begin(), 0, dstBindings.size() * sizeof(dstBindings[0])); 1452 1453 for (auto j: kj::indices(srcBindings)) { 1454 auto srcBinding = srcBindings[j]; 1455 auto& dstBinding = dstBindings[j]; 1456 1457 memset(&dstBinding, 0, sizeof(dstBinding)); 1458 dstBinding.which = schema::Type::ANY_POINTER; 1459 1460 switch (srcBinding.which()) { 1461 case schema::Brand::Binding::UNBOUND: 1462 break; 1463 case schema::Brand::Binding::TYPE: { 1464 makeDep(dstBinding, srcBinding.getType(), scopeName, clientBrand); 1465 break; 1466 } 1467 } 1468 } 1469 1470 auto& dstScope = dstScopes[dstScopeCount++]; 1471 dstScope.typeId = srcScope.getScopeId(); 1472 dstScope.bindingCount = dstBindings.size(); 1473 dstScope.bindings = copyDeduped(dstBindings).begin(); 1474 break; 1475 } 1476 case schema::Brand::Scope::INHERIT: { 1477 // Inherit the whole scope from the client -- or if the client doesn't have it, at least 1478 // include an empty dstScope in the list just to show that this scope was specified as 1479 // inherited, as opposed to being unspecified (which would be treated as all AnyPointer). 1480 auto& dstScope = dstScopes[dstScopeCount++]; 1481 dstScope.typeId = srcScope.getScopeId(); 1482 1483 KJ_IF_MAYBE(b, clientBrand) { 1484 for (auto& clientScope: *b) { 1485 if (clientScope.typeId == dstScope.typeId) { 1486 // Overwrite the whole thing. 1487 dstScope = clientScope; 1488 break; 1489 } 1490 } 1491 } else { 1492 dstScope.isUnbound = true; 1493 } 1494 break; 1495 } 1496 } 1497 } 1498 1499 dstScopes = dstScopes.slice(0, dstScopeCount); 1500 1501 std::sort(dstScopes.begin(), dstScopes.end(), 1502 [](const _::RawBrandedSchema::Scope& a, const _::RawBrandedSchema::Scope& b) { 1503 return a.typeId < b.typeId; 1504 }); 1505 1506 return makeBranded(schema, copyDeduped(dstScopes)); 1507 } 1508 1509 const _::RawBrandedSchema* SchemaLoader::Impl::makeBranded( 1510 const _::RawSchema* schema, kj::ArrayPtr<const _::RawBrandedSchema::Scope> bindings) { 1511 if (bindings.size() == 0) { 1512 // `defaultBrand` is the version where all type parameters are bound to `AnyPointer`. 1513 return &schema->defaultBrand; 1514 } 1515 1516 SchemaBindingsPair key { schema, bindings.begin() }; 1517 KJ_IF_MAYBE(existing, brands.find(key)) { 1518 return *existing; 1519 } else { 1520 auto& brand = arena.allocate<_::RawBrandedSchema>(); 1521 memset(&brand, 0, sizeof(brand)); 1522 brands.insert(key, &brand); 1523 1524 brand.generic = schema; 1525 brand.scopes = bindings.begin(); 1526 brand.scopeCount = bindings.size(); 1527 brand.lazyInitializer = &brandedInitializer; 1528 return &brand; 1529 } 1530 } 1531 1532 kj::ArrayPtr<const _::RawBrandedSchema::Dependency> 1533 SchemaLoader::Impl::makeBrandedDependencies( 1534 const _::RawSchema* schema, 1535 kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> bindings) { 1536 kj::StringPtr scopeName = 1537 readMessageUnchecked<schema::Node>(schema->encodedNode).getDisplayName(); 1538 1539 kj::Vector<_::RawBrandedSchema::Dependency> deps; 1540 1541 schema::Node::Reader node = readMessageUnchecked<schema::Node>(schema->encodedNode); 1542 1543 #define ADD_ENTRY(kind, index, make) \ 1544 if (const _::RawBrandedSchema* dep = make) { \ 1545 auto& slot = deps.add(); \ 1546 memset(&slot, 0, sizeof(slot)); \ 1547 slot.location = _::RawBrandedSchema::makeDepLocation( \ 1548 _::RawBrandedSchema::DepKind::kind, index); \ 1549 slot.schema = dep; \ 1550 } 1551 1552 switch (node.which()) { 1553 case schema::Node::FILE: 1554 case schema::Node::ENUM: 1555 case schema::Node::ANNOTATION: 1556 break; 1557 1558 case schema::Node::CONST: 1559 ADD_ENTRY(CONST_TYPE, 0, makeDepSchema( 1560 node.getConst().getType(), scopeName, bindings)); 1561 break; 1562 1563 case schema::Node::STRUCT: { 1564 auto fields = node.getStruct().getFields(); 1565 for (auto i: kj::indices(fields)) { 1566 auto field = fields[i]; 1567 switch (field.which()) { 1568 case schema::Field::SLOT: 1569 ADD_ENTRY(FIELD, i, makeDepSchema( 1570 field.getSlot().getType(), scopeName, bindings)) 1571 break; 1572 case schema::Field::GROUP: { 1573 const _::RawSchema* group = loadEmpty( 1574 field.getGroup().getTypeId(), 1575 "(unknown group type)", schema::Node::STRUCT, true); 1576 KJ_IF_MAYBE(b, bindings) { 1577 ADD_ENTRY(FIELD, i, makeBranded(group, *b)); 1578 } else { 1579 ADD_ENTRY(FIELD, i, getUnbound(group)); 1580 } 1581 break; 1582 } 1583 } 1584 } 1585 break; 1586 } 1587 1588 case schema::Node::INTERFACE: { 1589 auto interface = node.getInterface(); 1590 { 1591 auto superclasses = interface.getSuperclasses(); 1592 for (auto i: kj::indices(superclasses)) { 1593 auto superclass = superclasses[i]; 1594 ADD_ENTRY(SUPERCLASS, i, makeDepSchema( 1595 superclass.getId(), schema::Type::INTERFACE, schema::Node::INTERFACE, 1596 superclass.getBrand(), scopeName, bindings)) 1597 } 1598 } 1599 { 1600 auto methods = interface.getMethods(); 1601 for (auto i: kj::indices(methods)) { 1602 auto method = methods[i]; 1603 ADD_ENTRY(METHOD_PARAMS, i, makeDepSchema( 1604 method.getParamStructType(), schema::Type::STRUCT, schema::Node::STRUCT, 1605 method.getParamBrand(), scopeName, bindings)) 1606 ADD_ENTRY(METHOD_RESULTS, i, makeDepSchema( 1607 method.getResultStructType(), schema::Type::STRUCT, schema::Node::STRUCT, 1608 method.getResultBrand(), scopeName, bindings)) 1609 } 1610 } 1611 break; 1612 } 1613 } 1614 1615 #undef ADD_ENTRY 1616 1617 std::sort(deps.begin(), deps.end(), 1618 [](const _::RawBrandedSchema::Dependency& a, const _::RawBrandedSchema::Dependency& b) { 1619 return a.location < b.location; 1620 }); 1621 1622 return copyDeduped(deps.asPtr()); 1623 } 1624 1625 void SchemaLoader::Impl::makeDep(_::RawBrandedSchema::Binding& result, 1626 schema::Type::Reader type, kj::StringPtr scopeName, 1627 kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings) { 1628 switch (type.which()) { 1629 case schema::Type::VOID: 1630 case schema::Type::BOOL: 1631 case schema::Type::INT8: 1632 case schema::Type::INT16: 1633 case schema::Type::INT32: 1634 case schema::Type::INT64: 1635 case schema::Type::UINT8: 1636 case schema::Type::UINT16: 1637 case schema::Type::UINT32: 1638 case schema::Type::UINT64: 1639 case schema::Type::FLOAT32: 1640 case schema::Type::FLOAT64: 1641 case schema::Type::TEXT: 1642 case schema::Type::DATA: 1643 result.which = static_cast<uint8_t>(type.which()); 1644 return; 1645 1646 case schema::Type::STRUCT: { 1647 auto structType = type.getStruct(); 1648 makeDep(result, structType.getTypeId(), schema::Type::STRUCT, schema::Node::STRUCT, 1649 structType.getBrand(), scopeName, brandBindings); 1650 return; 1651 } 1652 case schema::Type::ENUM: { 1653 auto enumType = type.getEnum(); 1654 makeDep(result, enumType.getTypeId(), schema::Type::ENUM, schema::Node::ENUM, 1655 enumType.getBrand(), scopeName, brandBindings); 1656 return; 1657 } 1658 case schema::Type::INTERFACE: { 1659 auto interfaceType = type.getInterface(); 1660 makeDep(result, interfaceType.getTypeId(), schema::Type::INTERFACE, schema::Node::INTERFACE, 1661 interfaceType.getBrand(), scopeName, brandBindings); 1662 return; 1663 } 1664 1665 case schema::Type::LIST: { 1666 makeDep(result, type.getList().getElementType(), scopeName, brandBindings); 1667 ++result.listDepth; 1668 return; 1669 } 1670 1671 case schema::Type::ANY_POINTER: { 1672 result.which = static_cast<uint8_t>(schema::Type::ANY_POINTER); 1673 auto anyPointer = type.getAnyPointer(); 1674 switch (anyPointer.which()) { 1675 case schema::Type::AnyPointer::UNCONSTRAINED: 1676 return; 1677 case schema::Type::AnyPointer::PARAMETER: { 1678 auto param = anyPointer.getParameter(); 1679 uint64_t id = param.getScopeId(); 1680 uint16_t index = param.getParameterIndex(); 1681 1682 KJ_IF_MAYBE(b, brandBindings) { 1683 // TODO(perf): We could binary search here, but... bleh. 1684 for (auto& scope: *b) { 1685 if (scope.typeId == id) { 1686 if (scope.isUnbound) { 1687 // Unbound brand parameter. 1688 result.scopeId = id; 1689 result.paramIndex = index; 1690 return; 1691 } else if (index >= scope.bindingCount) { 1692 // Binding index out-of-range. Treat as AnyPointer. This is important to allow 1693 // new type parameters to be added to existing types without breaking dependent 1694 // schemas. 1695 return; 1696 } else { 1697 result = scope.bindings[index]; 1698 return; 1699 } 1700 } 1701 } 1702 return; 1703 } else { 1704 // Unbound brand parameter. 1705 result.scopeId = id; 1706 result.paramIndex = index; 1707 return; 1708 } 1709 } 1710 case schema::Type::AnyPointer::IMPLICIT_METHOD_PARAMETER: 1711 result.isImplicitParameter = true; 1712 result.paramIndex = anyPointer.getImplicitMethodParameter().getParameterIndex(); 1713 return; 1714 } 1715 KJ_UNREACHABLE; 1716 } 1717 } 1718 1719 KJ_UNREACHABLE; 1720 } 1721 1722 void SchemaLoader::Impl::makeDep(_::RawBrandedSchema::Binding& result, 1723 uint64_t typeId, schema::Type::Which whichType, schema::Node::Which expectedKind, 1724 schema::Brand::Reader brand, kj::StringPtr scopeName, 1725 kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings) { 1726 const _::RawSchema* schema; 1727 if (typeId == capnp::typeId<StreamResult>()) { 1728 // StreamResult is a very special type that is used to mark when a method is declared as 1729 // streaming ("foo @0 () -> stream;"). We like to auto-load it if we see it as someone's 1730 // dependency. 1731 schema = loadNative(&_::rawSchema<StreamResult>()); 1732 } else { 1733 schema = loadEmpty(typeId, 1734 kj::str("(unknown type; seen as dependency of ", scopeName, ")"), 1735 expectedKind, true); 1736 } 1737 result.which = static_cast<uint8_t>(whichType); 1738 result.schema = makeBranded(schema, brand, brandBindings); 1739 } 1740 1741 const _::RawBrandedSchema* SchemaLoader::Impl::makeDepSchema( 1742 schema::Type::Reader type, kj::StringPtr scopeName, 1743 kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings) { 1744 _::RawBrandedSchema::Binding binding; 1745 memset(&binding, 0, sizeof(binding)); 1746 makeDep(binding, type, scopeName, brandBindings); 1747 return binding.schema; 1748 } 1749 1750 const _::RawBrandedSchema* SchemaLoader::Impl::makeDepSchema( 1751 uint64_t typeId, schema::Type::Which whichType, schema::Node::Which expectedKind, 1752 schema::Brand::Reader brand, kj::StringPtr scopeName, 1753 kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings) { 1754 _::RawBrandedSchema::Binding binding; 1755 memset(&binding, 0, sizeof(binding)); 1756 makeDep(binding, typeId, whichType, expectedKind, brand, scopeName, brandBindings); 1757 return binding.schema; 1758 } 1759 1760 template <typename T> 1761 kj::ArrayPtr<const T> SchemaLoader::Impl::copyDeduped(kj::ArrayPtr<const T> values) { 1762 if (values.size() == 0) { 1763 return kj::arrayPtr(kj::implicitCast<const T*>(nullptr), 0); 1764 } 1765 1766 auto bytes = values.asBytes(); 1767 1768 KJ_IF_MAYBE(dupe, dedupTable.find(bytes)) { 1769 return kj::arrayPtr(reinterpret_cast<const T*>(dupe->begin()), values.size()); 1770 } 1771 1772 // Need to make a new copy. 1773 auto copy = arena.allocateArray<T>(values.size()); 1774 memcpy(copy.begin(), values.begin(), values.size() * sizeof(T)); 1775 1776 dedupTable.insert(copy.asBytes()); 1777 1778 return copy; 1779 } 1780 1781 template <typename T> 1782 kj::ArrayPtr<const T> SchemaLoader::Impl::copyDeduped(kj::ArrayPtr<T> values) { 1783 return copyDeduped(kj::ArrayPtr<const T>(values)); 1784 } 1785 1786 SchemaLoader::Impl::TryGetResult SchemaLoader::Impl::tryGet(uint64_t typeId) const { 1787 KJ_IF_MAYBE(schema, schemas.find(typeId)) { 1788 return {*schema, initializer.getCallback()}; 1789 } else { 1790 return {nullptr, initializer.getCallback()}; 1791 } 1792 } 1793 1794 const _::RawBrandedSchema* SchemaLoader::Impl::getUnbound(const _::RawSchema* schema) { 1795 if (!readMessageUnchecked<schema::Node>(schema->encodedNode).getIsGeneric()) { 1796 // Not a generic type, so just return the default brand. 1797 return &schema->defaultBrand; 1798 } 1799 1800 KJ_IF_MAYBE(existing, unboundBrands.find(schema)) { 1801 return *existing; 1802 } else { 1803 auto slot = &arena.allocate<_::RawBrandedSchema>(); 1804 memset(slot, 0, sizeof(*slot)); 1805 slot->generic = schema; 1806 auto deps = makeBrandedDependencies(schema, nullptr); 1807 slot->dependencies = deps.begin(); 1808 slot->dependencyCount = deps.size(); 1809 unboundBrands.insert(schema, slot); 1810 return slot; 1811 } 1812 } 1813 1814 kj::Array<Schema> SchemaLoader::Impl::getAllLoaded() const { 1815 size_t count = 0; 1816 for (auto& schema: schemas) { 1817 if (schema.value->lazyInitializer == nullptr) ++count; 1818 } 1819 1820 kj::Array<Schema> result = kj::heapArray<Schema>(count); 1821 size_t i = 0; 1822 for (auto& schema: schemas) { 1823 if (schema.value->lazyInitializer == nullptr) { 1824 result[i++] = Schema(&schema.value->defaultBrand); 1825 } 1826 } 1827 return result; 1828 } 1829 1830 void SchemaLoader::Impl::requireStructSize(uint64_t id, uint dataWordCount, uint pointerCount) { 1831 structSizeRequirements.upsert(id, { uint16_t(dataWordCount), uint16_t(pointerCount) }, 1832 [&](RequiredSize& existingValue, RequiredSize&& newValue) { 1833 existingValue.dataWordCount = kj::max(existingValue.dataWordCount, newValue.dataWordCount); 1834 existingValue.pointerCount = kj::max(existingValue.pointerCount, newValue.pointerCount); 1835 }); 1836 1837 KJ_IF_MAYBE(schema, schemas.find(id)) { 1838 applyStructSizeRequirement(*schema, dataWordCount, pointerCount); 1839 } 1840 } 1841 1842 kj::ArrayPtr<word> SchemaLoader::Impl::makeUncheckedNode(schema::Node::Reader node) { 1843 size_t size = node.totalSize().wordCount + 1; 1844 kj::ArrayPtr<word> result = arena.allocateArray<word>(size); 1845 memset(result.begin(), 0, size * sizeof(word)); 1846 copyToUnchecked(node, result); 1847 return result; 1848 } 1849 1850 kj::ArrayPtr<word> SchemaLoader::Impl::makeUncheckedNodeEnforcingSizeRequirements( 1851 schema::Node::Reader node) { 1852 if (node.isStruct()) { 1853 KJ_IF_MAYBE(requirement, structSizeRequirements.find(node.getId())) { 1854 auto structNode = node.getStruct(); 1855 if (structNode.getDataWordCount() < requirement->dataWordCount || 1856 structNode.getPointerCount() < requirement->pointerCount) { 1857 return rewriteStructNodeWithSizes(node, requirement->dataWordCount, 1858 requirement->pointerCount); 1859 } 1860 } 1861 } 1862 1863 return makeUncheckedNode(node); 1864 } 1865 1866 kj::ArrayPtr<word> SchemaLoader::Impl::rewriteStructNodeWithSizes( 1867 schema::Node::Reader node, uint dataWordCount, uint pointerCount) { 1868 MallocMessageBuilder builder; 1869 builder.setRoot(node); 1870 1871 auto root = builder.getRoot<schema::Node>(); 1872 auto newStruct = root.getStruct(); 1873 newStruct.setDataWordCount(kj::max(newStruct.getDataWordCount(), dataWordCount)); 1874 newStruct.setPointerCount(kj::max(newStruct.getPointerCount(), pointerCount)); 1875 1876 return makeUncheckedNode(root); 1877 } 1878 1879 void SchemaLoader::Impl::applyStructSizeRequirement( 1880 _::RawSchema* raw, uint dataWordCount, uint pointerCount) { 1881 auto node = readMessageUnchecked<schema::Node>(raw->encodedNode); 1882 1883 auto structNode = node.getStruct(); 1884 if (structNode.getDataWordCount() < dataWordCount || 1885 structNode.getPointerCount() < pointerCount) { 1886 // Sizes need to be increased. Must rewrite. 1887 kj::ArrayPtr<word> words = rewriteStructNodeWithSizes(node, dataWordCount, pointerCount); 1888 1889 // We don't need to re-validate the node because we know this change could not possibly have 1890 // invalidated it. Just remake the unchecked message. 1891 raw->encodedNode = words.begin(); 1892 raw->encodedSize = words.size(); 1893 } 1894 } 1895 1896 void SchemaLoader::InitializerImpl::init(const _::RawSchema* schema) const { 1897 KJ_IF_MAYBE(c, callback) { 1898 c->load(loader, schema->id); 1899 } 1900 1901 if (schema->lazyInitializer != nullptr) { 1902 // The callback declined to load a schema. We need to disable the initializer so that it 1903 // doesn't get invoked again later, as we can no longer modify this schema once it is in use. 1904 1905 // Lock the loader for read to make sure no one is concurrently loading a replacement for this 1906 // schema node. 1907 auto lock = loader.impl.lockShared(); 1908 1909 // Get the mutable version of the schema. 1910 _::RawSchema* mutableSchema = lock->get()->tryGet(schema->id).schema; 1911 KJ_ASSERT(mutableSchema == schema, 1912 "A schema not belonging to this loader used its initializer."); 1913 1914 // Disable the initializer. 1915 #if __GNUC__ || defined(__clang__) 1916 __atomic_store_n(&mutableSchema->lazyInitializer, nullptr, __ATOMIC_RELEASE); 1917 __atomic_store_n(&mutableSchema->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE); 1918 #elif _MSC_VER 1919 std::atomic_thread_fence(std::memory_order_release); 1920 *static_cast<_::RawSchema::Initializer const* volatile*>( 1921 &mutableSchema->lazyInitializer) = nullptr; 1922 *static_cast<_::RawBrandedSchema::Initializer const* volatile*>( 1923 &mutableSchema->defaultBrand.lazyInitializer) = nullptr; 1924 #else 1925 #error "Platform not supported" 1926 #endif 1927 } 1928 } 1929 1930 void SchemaLoader::BrandedInitializerImpl::init(const _::RawBrandedSchema* schema) const { 1931 schema->generic->ensureInitialized(); 1932 1933 auto lock = loader.impl.lockExclusive(); 1934 1935 if (schema->lazyInitializer == nullptr) { 1936 // Never mind, someone beat us to it. 1937 return; 1938 } 1939 1940 // Get the mutable version. 1941 _::RawBrandedSchema* mutableSchema = KJ_ASSERT_NONNULL( 1942 lock->get()->brands.find(SchemaBindingsPair { schema->generic, schema->scopes })); 1943 KJ_ASSERT(mutableSchema == schema); 1944 1945 // Construct its dependency map. 1946 auto deps = lock->get()->makeBrandedDependencies(mutableSchema->generic, 1947 kj::arrayPtr(mutableSchema->scopes, mutableSchema->scopeCount)); 1948 mutableSchema->dependencies = deps.begin(); 1949 mutableSchema->dependencyCount = deps.size(); 1950 1951 // It's initialized now, so disable the initializer. 1952 #if __GNUC__ || defined(__clang__) 1953 __atomic_store_n(&mutableSchema->lazyInitializer, nullptr, __ATOMIC_RELEASE); 1954 #elif _MSC_VER 1955 std::atomic_thread_fence(std::memory_order_release); 1956 *static_cast<_::RawBrandedSchema::Initializer const* volatile*>( 1957 &mutableSchema->lazyInitializer) = nullptr; 1958 #else 1959 #error "Platform not supported" 1960 #endif 1961 } 1962 1963 // ======================================================================================= 1964 1965 SchemaLoader::SchemaLoader(): impl(kj::heap<Impl>(*this)) {} 1966 SchemaLoader::SchemaLoader(const LazyLoadCallback& callback) 1967 : impl(kj::heap<Impl>(*this, callback)) {} 1968 SchemaLoader::~SchemaLoader() noexcept(false) {} 1969 1970 Schema SchemaLoader::get(uint64_t id, schema::Brand::Reader brand, Schema scope) const { 1971 KJ_IF_MAYBE(result, tryGet(id, brand, scope)) { 1972 return *result; 1973 } else { 1974 KJ_FAIL_REQUIRE("no schema node loaded for id", kj::hex(id)); 1975 } 1976 } 1977 1978 kj::Maybe<Schema> SchemaLoader::tryGet( 1979 uint64_t id, schema::Brand::Reader brand, Schema scope) const { 1980 auto getResult = impl.lockShared()->get()->tryGet(id); 1981 if (getResult.schema == nullptr || getResult.schema->lazyInitializer != nullptr) { 1982 // This schema couldn't be found or has yet to be lazily loaded. If we have a lazy loader 1983 // callback, invoke it now to try to get it to load this schema. 1984 KJ_IF_MAYBE(c, getResult.callback) { 1985 c->load(*this, id); 1986 } 1987 getResult = impl.lockShared()->get()->tryGet(id); 1988 } 1989 if (getResult.schema != nullptr && getResult.schema->lazyInitializer == nullptr) { 1990 if (brand.getScopes().size() > 0) { 1991 auto brandedSchema = impl.lockExclusive()->get()->makeBranded( 1992 getResult.schema, brand, 1993 scope.raw->isUnbound() 1994 ? kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>>(nullptr) 1995 : kj::arrayPtr(scope.raw->scopes, scope.raw->scopeCount)); 1996 brandedSchema->ensureInitialized(); 1997 return Schema(brandedSchema); 1998 } else { 1999 return Schema(&getResult.schema->defaultBrand); 2000 } 2001 } else { 2002 return nullptr; 2003 } 2004 } 2005 2006 Schema SchemaLoader::getUnbound(uint64_t id) const { 2007 auto schema = get(id); 2008 return Schema(impl.lockExclusive()->get()->getUnbound(schema.raw->generic)); 2009 } 2010 2011 Type SchemaLoader::getType(schema::Type::Reader proto, Schema scope) const { 2012 switch (proto.which()) { 2013 case schema::Type::VOID: 2014 case schema::Type::BOOL: 2015 case schema::Type::INT8: 2016 case schema::Type::INT16: 2017 case schema::Type::INT32: 2018 case schema::Type::INT64: 2019 case schema::Type::UINT8: 2020 case schema::Type::UINT16: 2021 case schema::Type::UINT32: 2022 case schema::Type::UINT64: 2023 case schema::Type::FLOAT32: 2024 case schema::Type::FLOAT64: 2025 case schema::Type::TEXT: 2026 case schema::Type::DATA: 2027 return proto.which(); 2028 2029 case schema::Type::STRUCT: { 2030 auto structType = proto.getStruct(); 2031 return get(structType.getTypeId(), structType.getBrand(), scope).asStruct(); 2032 } 2033 2034 case schema::Type::ENUM: { 2035 auto enumType = proto.getEnum(); 2036 return get(enumType.getTypeId(), enumType.getBrand(), scope).asEnum(); 2037 } 2038 2039 case schema::Type::INTERFACE: { 2040 auto interfaceType = proto.getInterface(); 2041 return get(interfaceType.getTypeId(), interfaceType.getBrand(), scope) 2042 .asInterface(); 2043 } 2044 2045 case schema::Type::LIST: 2046 return ListSchema::of(getType(proto.getList().getElementType(), scope)); 2047 2048 case schema::Type::ANY_POINTER: { 2049 auto anyPointer = proto.getAnyPointer(); 2050 switch (anyPointer.which()) { 2051 case schema::Type::AnyPointer::UNCONSTRAINED: 2052 return schema::Type::ANY_POINTER; 2053 case schema::Type::AnyPointer::PARAMETER: { 2054 auto param = anyPointer.getParameter(); 2055 return scope.getBrandBinding(param.getScopeId(), param.getParameterIndex()); 2056 } 2057 case schema::Type::AnyPointer::IMPLICIT_METHOD_PARAMETER: 2058 // We don't support binding implicit method params here. 2059 return schema::Type::ANY_POINTER; 2060 } 2061 2062 KJ_UNREACHABLE; 2063 } 2064 } 2065 2066 KJ_UNREACHABLE; 2067 } 2068 2069 Schema SchemaLoader::load(const schema::Node::Reader& reader) { 2070 return Schema(&impl.lockExclusive()->get()->load(reader, false)->defaultBrand); 2071 } 2072 2073 Schema SchemaLoader::loadOnce(const schema::Node::Reader& reader) const { 2074 auto locked = impl.lockExclusive(); 2075 auto getResult = locked->get()->tryGet(reader.getId()); 2076 if (getResult.schema == nullptr || getResult.schema->lazyInitializer != nullptr) { 2077 // Doesn't exist yet, or the existing schema is a placeholder and therefore has not yet been 2078 // seen publicly. Go ahead and load the incoming reader. 2079 return Schema(&locked->get()->load(reader, false)->defaultBrand); 2080 } else { 2081 return Schema(&getResult.schema->defaultBrand); 2082 } 2083 } 2084 2085 kj::Array<Schema> SchemaLoader::getAllLoaded() const { 2086 return impl.lockShared()->get()->getAllLoaded(); 2087 } 2088 2089 void SchemaLoader::loadNative(const _::RawSchema* nativeSchema) { 2090 impl.lockExclusive()->get()->loadNative(nativeSchema); 2091 } 2092 2093 } // namespace capnp