diff --git a/--data b/--data
new file mode 100644
index 0000000..45ec8a7
--- /dev/null
+++ b/--data
@@ -0,0 +1 @@
+{"jsonrpc":"2.0","error":{"code":-32700,"message":"parse error"},"id":null}
diff --git a/--header b/--header
new file mode 100644
index 0000000..45ec8a7
--- /dev/null
+++ b/--header
@@ -0,0 +1 @@
+{"jsonrpc":"2.0","error":{"code":-32700,"message":"parse error"},"id":null}
diff --git a/--url b/--url
new file mode 100644
index 0000000..45ec8a7
--- /dev/null
+++ b/--url
@@ -0,0 +1 @@
+{"jsonrpc":"2.0","error":{"code":-32700,"message":"parse error"},"id":null}
diff --git a/.agent/rules/solidity_zksync.md b/.agent/rules/solidity_zksync.md
new file mode 100644
index 0000000..642f108
--- /dev/null
+++ b/.agent/rules/solidity_zksync.md
@@ -0,0 +1,33 @@
+# Solidity & ZkSync Development Standards
+
+## Toolchain & Environment
+- **Primary Tool**: `forge` (ZkSync fork). Use for compilation, testing, and generic scripting.
+- **Secondary Tool**: `hardhat`. Use only when `forge` encounters compatibility issues (e.g., complex deployments, specific plugin needs).
+- **Network Target**: ZkSync Era (Layer 2).
+- **Solidity Version**: `^0.8.20` (or `0.8.24` if strictly supported by the zk-compiler).
+
+## Modern Solidity Best Practices
+- **Safety First**:
+ - **Checks-Effects-Interactions (CEI)** pattern must be strictly followed.
+ - When a contract requires an owner (e.g., admin-configurable parameters), prefer `Ownable2Step` over `Ownable`. Do **not** add ownership to contracts that don't need it — many contracts are fully permissionless by design.
+ - Prefer `ReentrancyGuard` for external calls where appropriate.
+- **Gas & Efficiency**:
+ - Use **Custom Errors** (`error MyError();`) instead of `require` strings.
+ - Use `mapping` over arrays for membership checks where possible.
+ - Minimize on-chain storage; use events for off-chain indexing.
+
+## Testing Standards
+- **Framework**: Foundry (Forge).
+- **Methodology**:
+ - **Unit Tests**: Comprehensive coverage for all functions.
+ - **Fuzz Testing**: Required for arithmetic and purely functional logic.
+ - **Invariant Testing**: Define invariants for stateful system properties.
+- **Naming Convention**:
+ - `test_Description`
+ - `testFuzz_Description`
+ - `test_RevertIf_Condition`
+
+## ZkSync Specifics
+- **System Contracts**: Be aware of ZkSync system contracts (e.g., `ContractDeployer`, `L2EthToken`) when interacting with low-level features.
+- **Gas Model**: Account for ZkSync's different gas metering if performing low-level optimization.
+- **Compiler Differences**: Be mindful of differences between `solc` and `zksolc` (e.g., `create2` address derivation).
diff --git a/.cspell.json b/.cspell.json
index c990957..541973b 100644
--- a/.cspell.json
+++ b/.cspell.json
@@ -60,6 +60,21 @@
"Frontends",
"testuser",
"testhandle",
- "douglasacost"
+ "douglasacost",
+ "IBEACON",
+ "AABBCCDD",
+ "SSTORE",
+ "Permissionless",
+ "Reentrancy",
+ "SFID",
+ "EXTCODECOPY",
+ "solady",
+ "SLOAD",
+ "Bitmask",
+ "mstore",
+ "MBOND",
+ "USCA",
+ "USNY",
+ "usca"
]
}
diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md
new file mode 100644
index 0000000..7cdccfc
--- /dev/null
+++ b/.github/copilot-instructions.md
@@ -0,0 +1,33 @@
+# Solidity & ZkSync Development Standards
+
+## Toolchain & Environment
+- **Primary Tool**: `forge` (ZkSync fork). Use for compilation, testing, and generic scripting.
+- **Secondary Tool**: `hardhat`. Use only when `forge` encounters compatibility issues (e.g., complex deployments, specific plugin needs).
+- **Network Target**: ZkSync Era (Layer 2).
+- **Solidity Version**: `^0.8.20` (or `0.8.24` if strictly supported by the zk-compiler).
+
+## Modern Solidity Best Practices
+- **Safety First**:
+ - **Checks-Effects-Interactions (CEI)** pattern must be strictly followed.
+ - Use `Ownable2Step` over `Ownable` for privileged access.
+ - Prefer `ReentrancyGuard` for external calls where appropriate.
+- **Gas & Efficiency**:
+ - Use **Custom Errors** (`error MyError();`) instead of `require` strings.
+ - Use `mapping` over arrays for membership checks where possible.
+ - Minimize on-chain storage; use events for off-chain indexing.
+
+## Testing Standards
+- **Framework**: Foundry (Forge).
+- **Methodology**:
+ - **Unit Tests**: Comprehensive coverage for all functions.
+ - **Fuzz Testing**: Required for arithmetic and purely functional logic.
+ - **Invariant Testing**: Define invariants for stateful system properties.
+- **Naming Convention**:
+ - `test_Description`
+ - `testFuzz_Description`
+ - `test_RevertIf_Condition`
+
+## ZkSync Specifics
+- **System Contracts**: Be aware of ZkSync system contracts (e.g., `ContractDeployer`, `L2EthToken`) when interacting with low-level features.
+- **Gas Model**: Account for ZkSync's different gas metering if performing low-level optimization.
+- **Compiler Differences**: Be mindful of differences between `solc` and `zksolc` (e.g., `create2` address derivation).
diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml
index 64ac137..1ceaaf3 100644
--- a/.github/workflows/checks.yml
+++ b/.github/workflows/checks.yml
@@ -34,4 +34,4 @@ jobs:
run: yarn lint
- name: Run tests
- run: forge test --zksync
+ run: forge test
diff --git a/.gitmodules b/.gitmodules
index 9540dda..c6c1a45 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -10,3 +10,6 @@
[submodule "lib/era-contracts"]
path = lib/era-contracts
url = https://github.com/matter-labs/era-contracts
+[submodule "lib/solady"]
+ path = lib/solady
+ url = https://github.com/vectorized/solady
diff --git a/.vscode/settings.json b/.vscode/settings.json
index 4d04fd2..8ab6c21 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -13,5 +13,8 @@
"editor.formatOnSave": true,
"[solidity]": {
"editor.defaultFormatter": "JuanBlanco.solidity"
+ },
+ "chat.tools.terminal.autoApprove": {
+ "forge": true
}
}
diff --git a/foundry.lock b/foundry.lock
new file mode 100644
index 0000000..7a3effd
--- /dev/null
+++ b/foundry.lock
@@ -0,0 +1,20 @@
+{
+ "lib/zksync-storage-proofs": {
+ "rev": "4b20401ce44c1ec966a29d893694f65db885304b"
+ },
+ "lib/openzeppelin-contracts": {
+ "rev": "e4f70216d759d8e6a64144a9e1f7bbeed78e7079"
+ },
+ "lib/solady": {
+ "tag": {
+ "name": "v0.1.26",
+ "rev": "acd959aa4bd04720d640bf4e6a5c71037510cc4b"
+ }
+ },
+ "lib/forge-std": {
+ "rev": "1eea5bae12ae557d589f9f0f0edae2faa47cb262"
+ },
+ "lib/era-contracts": {
+ "rev": "84d5e3716f645909e8144c7d50af9dd6dd9ded62"
+ }
+}
\ No newline at end of file
diff --git a/lib/solady b/lib/solady
new file mode 160000
index 0000000..acd959a
--- /dev/null
+++ b/lib/solady
@@ -0,0 +1 @@
+Subproject commit acd959aa4bd04720d640bf4e6a5c71037510cc4b
diff --git a/logs/deploy_l1_bridge.log b/logs/deploy_l1_bridge.log
new file mode 100644
index 0000000..b3ac3d4
--- /dev/null
+++ b/logs/deploy_l1_bridge.log
@@ -0,0 +1,32 @@
+Compiling 1 files with Solc 0.8.26
+Solc 0.8.26 finished in 2.60s
+Compiler run successful!
+Script ran successfully.
+
+== Logs ==
+ Deployed L1Bridge at 0x2D02b651Ea9630351719c8c55210e042e940d69a
+ Granted MINTER_ROLE on NodlL1(0x6dd0E17ec6fE56c5f58a0Fe2Bb813B9b5cc25990) to bridge
+
+## Setting up 1 EVM.
+
+==========================
+
+Chain 1
+
+Estimated gas price: 0.222068762 gwei
+
+Estimated total gas used for script: 2685066
+
+Estimated amount required: 0.000596269282508292 ETH
+
+==========================
+
+
+==========================
+
+ONCHAIN EXECUTION COMPLETE & SUCCESSFUL.
+
+Transactions saved to: /Users/alex/Documents/rollup/broadcast/DeployL1Bridge.s.sol/1/run-latest.json
+
+Sensitive values saved to: /Users/alex/Documents/rollup/cache/DeployL1Bridge.s.sol/1/run-latest.json
+
diff --git a/logs/deploy_l1_nodl.log b/logs/deploy_l1_nodl.log
new file mode 100644
index 0000000..ae13ee1
--- /dev/null
+++ b/logs/deploy_l1_nodl.log
@@ -0,0 +1,29 @@
+No files changed, compilation skipped
+Script ran successfully.
+
+== Logs ==
+ Deployed L1Nodl at 0x6dd0E17ec6fE56c5f58a0Fe2Bb813B9b5cc25990
+
+## Setting up 1 EVM.
+
+==========================
+
+Chain 1
+
+Estimated gas price: 0.251645298 gwei
+
+Estimated total gas used for script: 4998146
+
+Estimated amount required: 0.001257759939617508 ETH
+
+==========================
+
+
+==========================
+
+ONCHAIN EXECUTION COMPLETE & SUCCESSFUL.
+
+Transactions saved to: /Users/alex/Documents/rollup/broadcast/DeployL1Nodl.s.sol/1/run-latest.json
+
+Sensitive values saved to: /Users/alex/Documents/rollup/cache/DeployL1Nodl.s.sol/1/run-latest.json
+
diff --git a/logs/deploy_l2_bridge.log b/logs/deploy_l2_bridge.log
new file mode 100644
index 0000000..5047416
--- /dev/null
+++ b/logs/deploy_l2_bridge.log
@@ -0,0 +1,98 @@
+Compiling 1 files with Solc 0.8.26
+Solc 0.8.26 finished in 1.70s
+Compiler run successful!
+
+Compiling 1 files with zksolc and solc 0.8.26
+zksolc and solc 0.8.26 finished in 4.05s
+Compiler run successful with warnings:
+Warning
+ZKsync Era comes with native account abstraction support, and therefore the initiator of a
+transaction might be different from the contract calling your code. It is highly recommended NOT
+to rely on tx.origin, but use msg.sender instead.
+Learn more about Account Abstraction at https://docs.zksync.io/build/developer-reference/account-abstraction/
+You may disable this warning with:
+ a. `suppressedWarnings = ["txorigin"]` in standard JSON.
+ b. `--suppress-warnings txorigin` in the CLI.
+ --> lib/era-contracts/l1-contracts/contracts/vendor/AddressAliasHelper.sol:56:42
|
+ 56 | _recipient = _prevMsgSender == tx.origin
+ | ^^^^^^^^^
+
+Warning
+EraVM does not use bytecode for contract deployment. Instead, it refers to contracts using their bytecode hashes.
+In order to deploy a contract, please use the `new` operator in Solidity instead of raw 'create'/'create2' in assembly.
+In Solidity v0.6 and older, it can be a false-positive warning if there is 'create(' or 'create2(' in comments within assembly.
+Learn more about CREATE/CREATE2 EraVM limitations at https://docs.zksync.io/zksync-protocol/differences/evm-instructions#create-create2
+You may disable this warning with:
+ 1. `suppressedWarnings = ["assemblycreate"]` in standard JSON.
+ 2. `--suppress-warnings assemblycreate` in the CLI.
+ --> lib/forge-std/src/StdCheats.sol:494:19
|
+ 494 | addr := create(0, add(bytecode, 0x20), mload(bytecode))
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Warning
+EraVM does not use bytecode for contract deployment. Instead, it refers to contracts using their bytecode hashes.
+In order to deploy a contract, please use the `new` operator in Solidity instead of raw 'create'/'create2' in assembly.
+In Solidity v0.6 and older, it can be a false-positive warning if there is 'create(' or 'create2(' in comments within assembly.
+Learn more about CREATE/CREATE2 EraVM limitations at https://docs.zksync.io/zksync-protocol/differences/evm-instructions#create-create2
+You may disable this warning with:
+ 1. `suppressedWarnings = ["assemblycreate"]` in standard JSON.
+ 2. `--suppress-warnings assemblycreate` in the CLI.
+ --> lib/forge-std/src/StdCheats.sol:504:19
|
+ 504 | addr := create(0, add(bytecode, 0x20), mload(bytecode))
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Warning
+EraVM does not use bytecode for contract deployment. Instead, it refers to contracts using their bytecode hashes.
+In order to deploy a contract, please use the `new` operator in Solidity instead of raw 'create'/'create2' in assembly.
+In Solidity v0.6 and older, it can be a false-positive warning if there is 'create(' or 'create2(' in comments within assembly.
+Learn more about CREATE/CREATE2 EraVM limitations at https://docs.zksync.io/zksync-protocol/differences/evm-instructions#create-create2
+You may disable this warning with:
+ 1. `suppressedWarnings = ["assemblycreate"]` in standard JSON.
+ 2. `--suppress-warnings assemblycreate` in the CLI.
+ --> lib/forge-std/src/StdCheats.sol:515:19
|
+ 515 | addr := create(val, add(bytecode, 0x20), mload(bytecode))
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Warning
+EraVM does not use bytecode for contract deployment. Instead, it refers to contracts using their bytecode hashes.
+In order to deploy a contract, please use the `new` operator in Solidity instead of raw 'create'/'create2' in assembly.
+In Solidity v0.6 and older, it can be a false-positive warning if there is 'create(' or 'create2(' in comments within assembly.
+Learn more about CREATE/CREATE2 EraVM limitations at https://docs.zksync.io/zksync-protocol/differences/evm-instructions#create-create2
+You may disable this warning with:
+ 1. `suppressedWarnings = ["assemblycreate"]` in standard JSON.
+ 2. `--suppress-warnings assemblycreate` in the CLI.
+ --> lib/forge-std/src/StdCheats.sol:525:19
|
+ 525 | addr := create(val, add(bytecode, 0x20), mload(bytecode))
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+[2m2025-10-15T00:35:52.095529Z[0m [31mERROR[0m [2mbackendhandler[0m[2m:[0m failed to get block [3merr[0m[2m=[0mfailed to get block; error sending request for url (https://mainnet.era.zksync.io/); operation timed out [3mnumber[0m[2m=[0m65260273
+[2m2025-10-15T00:35:52.096034Z[0m [31mERROR[0m [2msharedbackend[0m[2m:[0m Failed to send/recv `block_hash` [3merr[0m[2m=[0mfailed to get block hash for 65260273: failed to get block; error sending request for url (https://mainnet.era.zksync.io/); operation timed out [3mnumber[0m[2m=[0m65260273
+Script ran successfully.
+
+== Logs ==
+ Deployed L2Bridge at 0x2c1B65dA72d5Cf19b41dE6eDcCFB7DD83d1B529E
+ Granted MINTER_ROLE on NODL(0xBD4372e44c5eE654dd838304006E1f0f69983154) to bridge
+
+## Setting up 1 EVM.
+
+==========================
+
+Chain 324
+
+Estimated gas price: 0.090500001 gwei
+
+Estimated total gas used for script: 209410861
+
+Estimated amount required: 0.018951683129910861 ETH
+
+==========================
+
+
+==========================
+
+ONCHAIN EXECUTION COMPLETE & SUCCESSFUL.
+
+Transactions saved to: /Users/alex/Documents/rollup/broadcast/DeployL2Bridge.s.sol/324/run-latest.json
+
+Sensitive values saved to: /Users/alex/Documents/rollup/cache/DeployL2Bridge.s.sol/324/run-latest.json
+
diff --git a/remappings.txt b/remappings.txt
index 1e95077..53468b3 100644
--- a/remappings.txt
+++ b/remappings.txt
@@ -1 +1,2 @@
-@openzeppelin=lib/openzeppelin-contracts/
\ No newline at end of file
+@openzeppelin=lib/openzeppelin-contracts/
+solady/=lib/solady/src/
\ No newline at end of file
diff --git a/src/swarms/FleetIdentity.sol b/src/swarms/FleetIdentity.sol
new file mode 100644
index 0000000..24561e0
--- /dev/null
+++ b/src/swarms/FleetIdentity.sol
@@ -0,0 +1,1048 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+
+pragma solidity ^0.8.24;
+
+import {ERC721} from "@openzeppelin/contracts/token/ERC721/ERC721.sol";
+import {ERC721Enumerable} from "@openzeppelin/contracts/token/ERC721/extensions/ERC721Enumerable.sol";
+import {IERC20} from "@openzeppelin/contracts/token/ERC20/IERC20.sol";
+import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol";
+import {ReentrancyGuard} from "@openzeppelin/contracts/utils/ReentrancyGuard.sol";
+
+/**
+ * @title FleetIdentity
+ * @notice ERC-721 with ERC721Enumerable representing ownership of a BLE fleet,
+ * secured by an ERC-20 bond organized into geometric tiers.
+ *
+ * @dev **Two-level geographic registration**
+ *
+ * Fleets register at exactly one level:
+ * - Country — regionKey = countryCode (ISO 3166-1 numeric, 1-999)
+ * - Admin Area — regionKey = (countryCode << 10) | adminCode (>= 1024)
+ *
+ * Each regionKey has its **own independent tier namespace** — tier indices
+ * start at 0 for every region. The first fleet in any region always pays
+ * the level-appropriate bond (LOCAL: BASE_BOND, COUNTRY: BASE_BOND * 8).
+ *
+ * **Economic Model**
+ *
+ * - Tier capacity: 4 members per tier (unified across levels)
+ * - Local bond: BASE_BOND * 2^tier
+ * - Country bond: BASE_BOND * COUNTRY_BOND_MULTIPLIER * 2^tier (8× local)
+ *
+ * Country fleets pay 8× more but appear in all admin-area bundles within
+ * their country. This economic difference provides locals a significant
+ * advantage: a local can reach tier 3 for the same cost a country player
+ * pays for tier 0. Bundle slots are filled by simple tier-descent priority:
+ * higher tier first, locals before country within each tier.
+ *
+ * EdgeBeaconScanner discovery uses 2-level fallback:
+ * 1. Admin area (highest priority)
+ * 2. Country (lower priority)
+ *
+ * On-chain indexes track which countries and admin areas have active fleets,
+ * enabling EdgeBeaconScanner enumeration without off-chain indexers.
+ *
+ * **TokenID Encoding**
+ *
+ * TokenID = (regionKey << 128) | uuid
+ * - Bits 0-127: UUID (bytes16 Proximity UUID)
+ * - Bits 128-159: Region key (32-bit country or admin-area code)
+ *
+ * This allows the same UUID to be registered in multiple regions,
+ * each with a distinct token. Region and UUID can be extracted:
+ * - uuid = bytes16(uint128(tokenId))
+ * - region = uint32(tokenId >> 128)
+ */
+contract FleetIdentity is ERC721Enumerable, ReentrancyGuard {
+ using SafeERC20 for IERC20;
+
+ // ──────────────────────────────────────────────
+ // Errors
+ // ──────────────────────────────────────────────
+ error InvalidUUID();
+ error NotTokenOwner();
+ error MaxTiersReached();
+ error TierFull();
+ error TargetTierNotHigher();
+ error TargetTierNotLower();
+ error TargetTierSameAsCurrent();
+ error InvalidCountryCode();
+ error InvalidAdminCode();
+ error AdminAreaRequired();
+ error UuidOwnerMismatch();
+ error UuidLevelMismatch();
+ error UuidAlreadyOwned();
+ error UuidNotOwned();
+ error NotUuidOwner();
+ error CannotUnregisterMultipleTokens();
+ error AlreadyRegistered();
+
+ // ──────────────────────────────────────────────
+ // Enums
+ // ──────────────────────────────────────────────
+
+ /// @notice Registration level for a UUID.
+ enum RegistrationLevel {
+ None, // 0 - not registered
+ Local, // 1 - admin area (local) level
+ Country, // 2 - country level
+ Owned // 3 - owned but not registered in any region
+ }
+
+ // ──────────────────────────────────────────────
+ // Constants & Immutables
+ // ──────────────────────────────────────────────
+
+ /// @notice Unified tier capacity for all levels.
+ uint256 public constant TIER_CAPACITY = 4;
+
+ /// @notice Bond multiplier for country-level registration (8× local).
+ uint256 public constant COUNTRY_BOND_MULTIPLIER = 8;
+
+
+
+ /// @notice Hard cap on tier count per region.
+ /// @dev Derived from anti-spam analysis: with a bond doubling per tier
+ /// and capacity 4, a spammer spending half the total token supply
+ /// against a BASE_BOND set 10 000× too low fills ~20 tiers.
+ /// 24 provides comfortable headroom.
+ uint256 public constant MAX_TIERS = 24;
+
+ /// @notice Maximum UUIDs returned by buildHighestBondedUuidBundle.
+ uint256 public constant MAX_BONDED_UUID_BUNDLE_SIZE = 20;
+
+ /// @notice ISO 3166-1 numeric upper bound for country codes.
+ uint16 internal constant MAX_COUNTRY_CODE = 999;
+
+ /// @notice Upper bound for admin-area codes within a country.
+ /// @dev Set to 255 to cover all real-world countries (UK has ~172, the highest).
+ /// Dense indices from ISO 3166-2 mappings range 0-254, stored as adminCode 1-255.
+ uint16 internal constant MAX_ADMIN_CODE = 255;
+
+ /// @dev Bundle level index: admin area (highest priority).
+ uint256 internal constant LEVEL_ADMIN = 0;
+ /// @dev Bundle level index: country.
+ uint256 internal constant LEVEL_COUNTRY = 1;
+
+ /// @dev Bit shift for packing countryCode into an admin-area region key.
+ uint256 private constant ADMIN_SHIFT = 10;
+ /// @dev Bitmask for extracting adminCode from an admin-area region key.
+ uint32 private constant ADMIN_CODE_MASK = 0x3FF;
+
+ /// @notice Region key for owned-only UUIDs (not registered in any region).
+ uint32 public constant OWNED_REGION_KEY = 0;
+
+ /// @notice The ERC-20 token used for bonds (immutable, e.g. NODL).
+ IERC20 public immutable BOND_TOKEN;
+
+ /// @notice Base bond for tier 0 in any region. Tier K requires BASE_BOND * 2^K.
+ uint256 public immutable BASE_BOND;
+
+ // ──────────────────────────────────────────────
+ // Region-namespaced tier data
+ // ──────────────────────────────────────────────
+
+ /// @notice regionKey -> number of tiers opened in that region.
+ mapping(uint32 => uint256) public regionTierCount;
+
+ /// @notice regionKey -> tierIndex -> list of token IDs.
+ mapping(uint32 => mapping(uint256 => uint256[])) internal _regionTierMembers;
+
+ /// @notice Token ID -> index within its tier's member array (for O(1) removal).
+ mapping(uint256 => uint256) internal _indexInTier;
+
+ // ──────────────────────────────────────────────
+ // Fleet data
+ // ──────────────────────────────────────────────
+
+ /// @notice Token ID -> tier index (within its region) the fleet belongs to.
+ mapping(uint256 => uint256) public fleetTier;
+
+ // ──────────────────────────────────────────────
+ // UUID ownership tracking
+ // ──────────────────────────────────────────────
+
+ /// @notice UUID -> address that first registered a token for this UUID.
+ /// All subsequent registrations for the same UUID must come from this address.
+ mapping(bytes16 => address) public uuidOwner;
+
+ /// @notice UUID -> count of active tokens for this UUID (across all regions).
+ /// When this reaches 0, uuidOwner is cleared.
+ mapping(bytes16 => uint256) public uuidTokenCount;
+
+ /// @notice UUID -> registration level.
+ /// All tokens for a UUID must be at the same level.
+ mapping(bytes16 => RegistrationLevel) public uuidLevel;
+
+ // ──────────────────────────────────────────────
+ // On-chain region indexes
+ // ──────────────────────────────────────────────
+
+ /// @dev Set of country codes with at least one active fleet.
+ uint16[] internal _activeCountries;
+ mapping(uint16 => uint256) internal _activeCountryIndex; // value = index+1 (0 = not present)
+
+ /// @dev Set of admin-area region keys with at least one active fleet.
+ uint32[] internal _activeAdminAreas;
+ mapping(uint32 => uint256) internal _activeAdminAreaIndex; // value = index+1 (0 = not present)
+
+ // ──────────────────────────────────────────────
+ // Events
+ // ──────────────────────────────────────────────
+
+ event FleetRegistered(
+ address indexed owner,
+ bytes16 indexed uuid,
+ uint256 indexed tokenId,
+ uint32 regionKey,
+ uint256 tierIndex,
+ uint256 bondAmount
+ );
+ event FleetPromoted(
+ uint256 indexed tokenId, uint256 indexed fromTier, uint256 indexed toTier, uint256 additionalBond
+ );
+ event FleetDemoted(uint256 indexed tokenId, uint256 indexed fromTier, uint256 indexed toTier, uint256 bondRefund);
+ event FleetBurned(
+ address indexed owner, uint256 indexed tokenId, uint32 indexed regionKey, uint256 tierIndex, uint256 bondRefund
+ );
+ event UuidClaimed(address indexed owner, bytes16 indexed uuid, uint256 tokenId, uint256 bond);
+ event UuidUnregistered(
+ address indexed owner, bytes16 indexed uuid, uint256 oldTokenId, uint256 newTokenId, uint256 refund
+ );
+ event UuidReleased(address indexed owner, bytes16 indexed uuid, uint256 refund);
+
+ // ──────────────────────────────────────────────
+ // Constructor
+ // ──────────────────────────────────────────────
+
+ /// @param _bondToken Address of the ERC-20 token used for bonds.
+ /// @param _baseBond Base bond for tier 0 in any region.
+ constructor(address _bondToken, uint256 _baseBond) ERC721("Swarm Fleet Identity", "SFID") {
+ BOND_TOKEN = IERC20(_bondToken);
+ BASE_BOND = _baseBond;
+ }
+
+ // ══════════════════════════════════════════════
+ // Registration: Country (explicit tier only — use countryInclusionHint)
+ // ══════════════════════════════════════════════
+
+ /// @notice Register a fleet under a country into a specific tier.
+ /// @dev No auto-assign: the cheapest-inclusion tier requires scanning all
+ /// admin areas in the country (unbounded), so callers must query
+ /// `countryInclusionHint(countryCode)` off-chain and supply the tier.
+ /// @param countryCode ISO 3166-1 numeric country code (1-999).
+ function registerFleetCountry(bytes16 uuid, uint16 countryCode, uint256 targetTier)
+ external
+ nonReentrant
+ returns (uint256 tokenId)
+ {
+ if (uuid == bytes16(0)) revert InvalidUUID();
+ if (countryCode == 0 || countryCode > MAX_COUNTRY_CODE) revert InvalidCountryCode();
+ uint32 regionKey = uint32(countryCode);
+ _validateExplicitTier(regionKey, targetTier);
+ tokenId = _register(uuid, regionKey, targetTier);
+ }
+
+ // ══════════════════════════════════════════════
+ // Registration: Admin Area (local)
+ // ══════════════════════════════════════════════
+
+ /// @notice Register a fleet under a country + admin area into a specific tier.
+ /// @dev Use `localInclusionHint(countryCode, adminCode)` to find the cheapest
+ /// tier that guarantees bundle inclusion.
+ function registerFleetLocal(bytes16 uuid, uint16 countryCode, uint16 adminCode, uint256 targetTier)
+ external
+ nonReentrant
+ returns (uint256 tokenId)
+ {
+ if (uuid == bytes16(0)) revert InvalidUUID();
+ if (countryCode == 0 || countryCode > MAX_COUNTRY_CODE) revert InvalidCountryCode();
+ if (adminCode == 0 || adminCode > MAX_ADMIN_CODE) revert InvalidAdminCode();
+ uint32 regionKey = _makeAdminRegion(countryCode, adminCode);
+ _validateExplicitTier(regionKey, targetTier);
+ tokenId = _register(uuid, regionKey, targetTier);
+ }
+
+ // ══════════════════════════════════════════════
+ // Promote / Demote (region-aware)
+ // ══════════════════════════════════════════════
+
+ /// @notice Promotes a fleet to the next tier within its region.
+ function promote(uint256 tokenId) external nonReentrant {
+ _promote(tokenId, fleetTier[tokenId] + 1);
+ }
+
+ /// @notice Moves a fleet to a different tier within its region.
+ /// If targetTier > current tier, promotes (pulls additional bond).
+ /// If targetTier < current tier, demotes (refunds bond difference).
+ function reassignTier(uint256 tokenId, uint256 targetTier) external nonReentrant {
+ uint256 currentTier = fleetTier[tokenId];
+ if (targetTier == currentTier) revert TargetTierSameAsCurrent();
+ if (targetTier > currentTier) {
+ _promote(tokenId, targetTier);
+ } else {
+ _demote(tokenId, targetTier);
+ }
+ }
+
+ // ══════════════════════════════════════════════
+ // Burn
+ // ══════════════════════════════════════════════
+
+ /// @notice Burns the fleet NFT and refunds the bond to the token owner.
+ /// Handles both registered fleets (regional) and owned-only UUIDs.
+ function burn(uint256 tokenId) external nonReentrant {
+ address tokenOwner = ownerOf(tokenId);
+ if (tokenOwner != msg.sender) revert NotTokenOwner();
+
+ uint32 region = uint32(tokenId >> 128);
+ bytes16 uuid = bytes16(uint128(tokenId));
+ uint256 refund;
+ uint256 tier;
+
+ if (region == OWNED_REGION_KEY) {
+ // Owned-only token: no tier structures, just BASE_BOND
+ refund = BASE_BOND;
+ tier = 0;
+ _burn(tokenId);
+ } else {
+ // Registered fleet: remove from tier structures
+ tier = fleetTier[tokenId];
+ refund = tierBond(tier, _isCountryRegion(region));
+
+ _removeFromTier(tokenId, region, tier);
+ delete fleetTier[tokenId];
+ delete _indexInTier[tokenId];
+ _burn(tokenId);
+
+ _trimTierCount(region);
+ _removeFromRegionIndex(region);
+ }
+
+ // Clean up UUID ownership tracking
+ uint256 newCount = uuidTokenCount[uuid] - 1;
+ if (newCount == 0) {
+ delete uuidOwner[uuid];
+ delete uuidTokenCount[uuid];
+ delete uuidLevel[uuid];
+ } else {
+ uuidTokenCount[uuid] = newCount;
+ }
+
+ // Interaction
+ if (refund > 0) {
+ BOND_TOKEN.safeTransfer(tokenOwner, refund);
+ }
+
+ emit FleetBurned(tokenOwner, tokenId, region, tier, refund);
+ }
+
+ // ══════════════════════════════════════════════
+ // UUID Ownership (Owned-Only Mode)
+ // ══════════════════════════════════════════════
+
+ /// @notice Claim ownership of a UUID without registering in any region.
+ /// Costs BASE_BOND. The UUID can later be registered via registerFleetLocal/Country.
+ /// @param uuid The Proximity UUID to claim.
+ /// @return tokenId The token ID for the owned-only UUID (region=0).
+ function claimUuid(bytes16 uuid) external nonReentrant returns (uint256 tokenId) {
+ if (uuid == bytes16(0)) revert InvalidUUID();
+ if (uuidOwner[uuid] != address(0)) revert UuidAlreadyOwned();
+
+ // Set ownership
+ uuidOwner[uuid] = msg.sender;
+ uuidLevel[uuid] = RegistrationLevel.Owned;
+ uuidTokenCount[uuid] = 1;
+
+ // Mint token with region=0
+ tokenId = uint256(uint128(uuid));
+ _mint(msg.sender, tokenId);
+
+ // Pull bond
+ BOND_TOKEN.safeTransferFrom(msg.sender, address(this), BASE_BOND);
+
+ emit UuidClaimed(msg.sender, uuid, tokenId, BASE_BOND);
+ }
+
+ /// @notice Move a registered fleet back to owned-only mode, receiving a partial refund.
+ /// Only works when this is the ONLY token for the UUID.
+ /// @param tokenId The fleet token to unregister.
+ /// @return newTokenId The new owned-only token ID (region=0).
+ function unregisterToOwned(uint256 tokenId) external nonReentrant returns (uint256 newTokenId) {
+ address tokenOwner = ownerOf(tokenId);
+ if (tokenOwner != msg.sender) revert NotTokenOwner();
+
+ bytes16 uuid = bytes16(uint128(tokenId));
+ uint32 region = uint32(tokenId >> 128);
+
+ // Must be a registered fleet, not already owned-only
+ if (region == OWNED_REGION_KEY) revert UuidNotOwned();
+
+ // Must be the only token for this UUID
+ if (uuidTokenCount[uuid] > 1) revert CannotUnregisterMultipleTokens();
+
+ uint256 tier = fleetTier[tokenId];
+ uint256 currentBond = tierBond(tier, _isCountryRegion(region));
+ uint256 refund = currentBond - BASE_BOND;
+
+ // === Effects: Remove from region ===
+ _removeFromTier(tokenId, region, tier);
+ delete fleetTier[tokenId];
+ delete _indexInTier[tokenId];
+ _burn(tokenId);
+ _trimTierCount(region);
+ _removeFromRegionIndex(region);
+
+ // Update level to Owned (resets level for future registration flexibility)
+ uuidLevel[uuid] = RegistrationLevel.Owned;
+
+ // Mint owned-only token
+ newTokenId = uint256(uint128(uuid));
+ _mint(msg.sender, newTokenId);
+
+ // Interaction: refund excess bond
+ if (refund > 0) {
+ BOND_TOKEN.safeTransfer(msg.sender, refund);
+ }
+
+ emit UuidUnregistered(msg.sender, uuid, tokenId, newTokenId, refund);
+ }
+
+ /// @notice Release an owned-only UUID, refunding the BASE_BOND.
+ /// After release, the UUID can be claimed by anyone.
+ /// @param uuid The UUID to release (must be in owned-only state).
+ function releaseUuid(bytes16 uuid) external nonReentrant {
+ if (uuidLevel[uuid] != RegistrationLevel.Owned) revert UuidNotOwned();
+ if (uuidOwner[uuid] != msg.sender) revert NotUuidOwner();
+
+ // Get the token ID for this owned-only UUID
+ uint256 tokenId = uint256(uint128(uuid));
+ address tokenOwner = ownerOf(tokenId);
+
+ // Burn the token
+ _burn(tokenId);
+
+ // Clear ownership
+ delete uuidOwner[uuid];
+ delete uuidTokenCount[uuid];
+ delete uuidLevel[uuid];
+
+ // Refund BASE_BOND to token owner (not necessarily uuidOwner due to transfers)
+ BOND_TOKEN.safeTransfer(tokenOwner, BASE_BOND);
+
+ emit UuidReleased(tokenOwner, uuid, BASE_BOND);
+ }
+
+ // ══════════════════════════════════════════════
+ // Views: Bond & tier helpers
+ // ══════════════════════════════════════════════
+
+ /// @notice Bond required for tier K.
+ /// Local (admin area): BASE_BOND * 2^K
+ /// Country: BASE_BOND * COUNTRY_BOND_MULTIPLIER * 2^K (8× local)
+ function tierBond(uint256 tier, bool isCountry) public view returns (uint256) {
+ uint256 base = BASE_BOND << tier;
+ return isCountry ? base * COUNTRY_BOND_MULTIPLIER : base;
+ }
+
+ /// @notice Returns the cheapest tier that guarantees a **local** fleet
+ /// appears in `buildHighestBondedUuidBundle` for (countryCode, adminCode).
+ /// Bounded: O(MAX_TIERS).
+ function localInclusionHint(uint16 countryCode, uint16 adminCode)
+ external
+ view
+ returns (uint256 inclusionTier, uint256 bond)
+ {
+ if (countryCode == 0 || countryCode > MAX_COUNTRY_CODE) revert InvalidCountryCode();
+ if (adminCode == 0 || adminCode > MAX_ADMIN_CODE) revert InvalidAdminCode();
+ inclusionTier = _findCheapestInclusionTier(countryCode, adminCode, LEVEL_ADMIN);
+ bond = tierBond(inclusionTier, false);
+ }
+
+ /// @notice Returns the cheapest tier that guarantees a **country** fleet
+ /// appears in every `buildHighestBondedUuidBundle` query within
+ /// the country (across all active admin areas).
+ /// @dev Unbounded view — iterates over all active admin areas in the
+ /// country. Free off-chain; callers pass the result to
+ /// `registerFleetCountry(uuid, cc, tier)`.
+ function countryInclusionHint(uint16 countryCode) external view returns (uint256 inclusionTier, uint256 bond) {
+ if (countryCode == 0 || countryCode > MAX_COUNTRY_CODE) revert InvalidCountryCode();
+
+ // Check the country-only location (no admin area active).
+ inclusionTier = _findCheapestInclusionTier(countryCode, 0, LEVEL_COUNTRY);
+
+ // Scan all active admin areas belonging to this country.
+ for (uint256 i = 0; i < _activeAdminAreas.length; ++i) {
+ uint32 rk = _activeAdminAreas[i];
+ if (_countryFromRegion(rk) != countryCode) continue;
+ uint16 admin = _adminFromRegion(rk);
+ uint256 t = _findCheapestInclusionTier(countryCode, admin, LEVEL_COUNTRY);
+ if (t > inclusionTier) inclusionTier = t;
+ }
+ bond = tierBond(inclusionTier, true);
+ }
+
+ /// @notice Highest non-empty tier in a region, or 0 if none.
+ function highestActiveTier(uint32 regionKey) external view returns (uint256) {
+ uint256 tierCount = regionTierCount[regionKey];
+ if (tierCount == 0) return 0;
+ return tierCount - 1;
+ }
+
+ /// @notice Number of members in a specific tier of a region.
+ function tierMemberCount(uint32 regionKey, uint256 tier) external view returns (uint256) {
+ return _regionTierMembers[regionKey][tier].length;
+ }
+
+ /// @notice All token IDs in a specific tier of a region.
+ function getTierMembers(uint32 regionKey, uint256 tier) external view returns (uint256[] memory) {
+ return _regionTierMembers[regionKey][tier];
+ }
+
+ /// @notice All UUIDs in a specific tier of a region.
+ function getTierUuids(uint32 regionKey, uint256 tier) external view returns (bytes16[] memory uuids) {
+ uint256[] storage members = _regionTierMembers[regionKey][tier];
+ uuids = new bytes16[](members.length);
+ for (uint256 i = 0; i < members.length; ++i) {
+ uuids[i] = tokenUuid(members[i]);
+ }
+ }
+
+ /// @notice UUID for a token ID (extracts lower 128 bits).
+ function tokenUuid(uint256 tokenId) public pure returns (bytes16) {
+ return bytes16(uint128(tokenId));
+ }
+
+ /// @notice Region key encoded in a token ID (extracts bits 128-159).
+ function tokenRegion(uint256 tokenId) public pure returns (uint32) {
+ return uint32(tokenId >> 128);
+ }
+
+ /// @notice Computes the deterministic token ID for a uuid+region pair.
+ function computeTokenId(bytes16 uuid, uint32 regionKey) public pure returns (uint256) {
+ return (uint256(regionKey) << 128) | uint256(uint128(uuid));
+ }
+
+ /// @notice Bond amount for a token. Returns 0 for nonexistent tokens.
+ function bonds(uint256 tokenId) external view returns (uint256) {
+ if (_ownerOf(tokenId) == address(0)) return 0;
+ uint32 region = uint32(tokenId >> 128);
+ if (region == OWNED_REGION_KEY) return BASE_BOND;
+ return tierBond(fleetTier[tokenId], _isCountryRegion(region));
+ }
+
+ /// @notice Returns true if the UUID is in owned-only state (claimed but not registered).
+ function isOwnedOnly(bytes16 uuid) external view returns (bool) {
+ return uuidLevel[uuid] == RegistrationLevel.Owned;
+ }
+
+ // ══════════════════════════════════════════════
+ // Views: EdgeBeaconScanner discovery
+ // ══════════════════════════════════════════════
+
+ /// @notice Builds a priority-ordered bundle of up to 20 UUIDs for an EdgeBeaconScanner,
+ /// merging the highest-bonded tiers across admin-area and country levels.
+ ///
+ /// @dev **Priority Rules:**
+ /// 1. Higher bond tier always beats lower bond tier
+ /// 2. Within same tier: local (admin area) beats country
+ /// 3. Within same tier + level: earlier registration wins
+ ///
+ /// **Economic Fairness:** Country fleets pay 8× more (COUNTRY_BOND_MULTIPLIER)
+ /// than local fleets at the same tier. This means a local can reach tier 3
+ /// for the same cost a country player pays for tier 0, giving locals a
+ /// significant economic advantage when competing for bundle slots.
+ ///
+ /// @param countryCode EdgeBeaconScanner country (must be > 0).
+ /// @param adminCode EdgeBeaconScanner admin area (must be > 0).
+ /// @return uuids The merged UUID bundle (up to 20).
+ /// @return count Actual number of UUIDs returned.
+ function buildHighestBondedUuidBundle(uint16 countryCode, uint16 adminCode)
+ external
+ view
+ returns (bytes16[] memory uuids, uint256 count)
+ {
+ if (adminCode == 0) revert AdminAreaRequired();
+ if (countryCode == 0) revert InvalidCountryCode();
+
+ uint32 adminKey = _makeAdminRegion(countryCode, adminCode);
+ uint32 countryKey = uint32(countryCode);
+
+ uint256 adminTiers = regionTierCount[adminKey];
+ uint256 countryTiers = regionTierCount[countryKey];
+
+ // Find highest active tier across both levels
+ uint256 maxTier = adminTiers > 0 ? adminTiers - 1 : 0;
+ if (countryTiers > 0 && countryTiers - 1 > maxTier) maxTier = countryTiers - 1;
+
+ uuids = new bytes16[](MAX_BONDED_UUID_BUNDLE_SIZE);
+
+ // Simple tier-descent: at each tier, locals first, then country
+ for (uint256 tier = maxTier + 1; tier > 0 && count < MAX_BONDED_UUID_BUNDLE_SIZE;) {
+ unchecked { --tier; }
+
+ // Include local (admin area) members first (higher priority within tier)
+ count = _appendTierUuids(adminKey, tier, adminTiers, uuids, count);
+
+ // Include country members
+ count = _appendTierUuids(countryKey, tier, countryTiers, uuids, count);
+ }
+
+ // Trim array to actual size
+ assembly {
+ mstore(uuids, count)
+ }
+ }
+
+ /// @dev Appends UUIDs from a tier to the bundle array. Returns new count.
+ function _appendTierUuids(
+ uint32 regionKey,
+ uint256 tier,
+ uint256 tierCount,
+ bytes16[] memory uuids,
+ uint256 count
+ ) internal view returns (uint256) {
+ if (tier >= tierCount) return count;
+
+ uint256[] storage members = _regionTierMembers[regionKey][tier];
+ uint256 len = members.length;
+ uint256 room = MAX_BONDED_UUID_BUNDLE_SIZE - count;
+ uint256 toInclude = len < room ? len : room;
+
+ for (uint256 i = 0; i < toInclude; ++i) {
+ uuids[count] = bytes16(uint128(members[i]));
+ unchecked { ++count; }
+ }
+ return count;
+ }
+
+ // ══════════════════════════════════════════════
+ // Views: Region indexes
+ // ══════════════════════════════════════════════
+
+ /// @notice Returns all country codes with at least one active fleet.
+ function getActiveCountries() external view returns (uint16[] memory) {
+ return _activeCountries;
+ }
+
+ /// @notice Returns all admin-area region keys with at least one active fleet.
+ function getActiveAdminAreas() external view returns (uint32[] memory) {
+ return _activeAdminAreas;
+ }
+
+ // ══════════════════════════════════════════════
+ // Region key helpers (pure)
+ // ══════════════════════════════════════════════
+
+ /// @notice Builds a country region key from a country code.
+ function countryRegionKey(uint16 countryCode) external pure returns (uint32) {
+ return uint32(countryCode);
+ }
+
+ /// @notice Builds an admin-area region key from country + admin codes.
+ function adminRegionKey(uint16 countryCode, uint16 adminCode) external pure returns (uint32) {
+ return _makeAdminRegion(countryCode, adminCode);
+ }
+
+ // ══════════════════════════════════════════════
+ // Internals
+ // ══════════════════════════════════════════════
+
+ // -- Region key encoding --
+
+ /// @dev Packs a country code and admin code into an admin-area region key.
+ function _makeAdminRegion(uint16 countryCode, uint16 adminCode) internal pure returns (uint32) {
+ return (uint32(countryCode) << uint32(ADMIN_SHIFT)) | uint32(adminCode);
+ }
+
+ /// @dev Extracts the country code from an admin-area region key.
+ function _countryFromRegion(uint32 adminRegion) internal pure returns (uint16) {
+ return uint16(adminRegion >> uint32(ADMIN_SHIFT));
+ }
+
+ /// @dev Extracts the admin code from an admin-area region key.
+ function _adminFromRegion(uint32 adminRegion) internal pure returns (uint16) {
+ return uint16(adminRegion & ADMIN_CODE_MASK);
+ }
+
+ /// @dev Returns true if the region key represents a country-level registration.
+ /// Region 0 (owned-only) is not a country region.
+ function _isCountryRegion(uint32 regionKey) internal pure returns (bool) {
+ return regionKey > 0 && regionKey <= MAX_COUNTRY_CODE;
+ }
+
+ /// @dev Shared registration logic. Handles both fresh registrations and Owned → Registered transitions.
+ function _register(bytes16 uuid, uint32 region, uint256 tier) internal returns (uint256 tokenId) {
+ RegistrationLevel existingLevel = uuidLevel[uuid];
+ address existingOwner = uuidOwner[uuid];
+ bool isCountry = _isCountryRegion(region);
+ RegistrationLevel targetLevel = isCountry ? RegistrationLevel.Country : RegistrationLevel.Local;
+
+ // Handle Owned → Registered transition
+ if (existingLevel == RegistrationLevel.Owned) {
+ if (existingOwner != msg.sender) revert UuidOwnerMismatch();
+
+ // Burn the owned-only token
+ uint256 ownedTokenId = uint256(uint128(uuid));
+ _burn(ownedTokenId);
+
+ // Update level
+ uuidLevel[uuid] = targetLevel;
+ // uuidTokenCount stays the same (1)
+
+ // Calculate bond: full tier bond minus BASE_BOND already paid
+ uint256 fullBond = tierBond(tier, isCountry);
+ uint256 incrementalBond = fullBond - BASE_BOND;
+
+ // Mint new token
+ tokenId = (uint256(region) << 128) | uint256(uint128(uuid));
+ fleetTier[tokenId] = tier;
+ _addToTier(tokenId, region, tier);
+ _addToRegionIndex(region);
+ _mint(msg.sender, tokenId);
+
+ // Pull incremental bond
+ if (incrementalBond > 0) {
+ BOND_TOKEN.safeTransferFrom(msg.sender, address(this), incrementalBond);
+ }
+
+ emit FleetRegistered(msg.sender, uuid, tokenId, region, tier, incrementalBond);
+ return tokenId;
+ }
+
+ // Standard registration (None or existing level)
+ if (existingOwner == address(0)) {
+ // First registration for this UUID
+ uuidOwner[uuid] = msg.sender;
+ uuidLevel[uuid] = targetLevel;
+ } else {
+ if (existingOwner != msg.sender) revert UuidOwnerMismatch();
+ if (existingLevel != targetLevel) revert UuidLevelMismatch();
+ }
+
+ uuidTokenCount[uuid]++;
+
+ uint256 bond = tierBond(tier, isCountry);
+ tokenId = (uint256(region) << 128) | uint256(uint128(uuid));
+
+ // Effects
+ fleetTier[tokenId] = tier;
+ _addToTier(tokenId, region, tier);
+ _addToRegionIndex(region);
+ _mint(msg.sender, tokenId);
+
+ // Interaction
+ if (bond > 0) {
+ BOND_TOKEN.safeTransferFrom(msg.sender, address(this), bond);
+ }
+
+ emit FleetRegistered(msg.sender, uuid, tokenId, region, tier, bond);
+ }
+
+ /// @dev Shared promotion logic.
+ function _promote(uint256 tokenId, uint256 targetTier) internal {
+ address tokenOwner = ownerOf(tokenId);
+ if (tokenOwner != msg.sender) revert NotTokenOwner();
+
+ uint32 region = uint32(tokenId >> 128);
+ uint256 currentTier = fleetTier[tokenId];
+ if (targetTier <= currentTier) revert TargetTierNotHigher();
+ if (targetTier >= MAX_TIERS) revert MaxTiersReached();
+ if (_regionTierMembers[region][targetTier].length >= TIER_CAPACITY) revert TierFull();
+
+ bool isCountry = _isCountryRegion(region);
+ uint256 currentBond = tierBond(currentTier, isCountry);
+ uint256 targetBond = tierBond(targetTier, isCountry);
+ uint256 additionalBond = targetBond - currentBond;
+
+ // Effects
+ _removeFromTier(tokenId, region, currentTier);
+ fleetTier[tokenId] = targetTier;
+ _addToTier(tokenId, region, targetTier);
+
+ // Interaction
+ if (additionalBond > 0) {
+ BOND_TOKEN.safeTransferFrom(tokenOwner, address(this), additionalBond);
+ }
+
+ emit FleetPromoted(tokenId, currentTier, targetTier, additionalBond);
+ }
+
+ /// @dev Shared demotion logic. Refunds bond difference.
+ function _demote(uint256 tokenId, uint256 targetTier) internal {
+ address tokenOwner = ownerOf(tokenId);
+ if (tokenOwner != msg.sender) revert NotTokenOwner();
+
+ uint32 region = uint32(tokenId >> 128);
+ uint256 currentTier = fleetTier[tokenId];
+ if (targetTier >= currentTier) revert TargetTierNotLower();
+ if (_regionTierMembers[region][targetTier].length >= TIER_CAPACITY) revert TierFull();
+
+ bool isCountry = _isCountryRegion(region);
+ uint256 currentBond = tierBond(currentTier, isCountry);
+ uint256 targetBond = tierBond(targetTier, isCountry);
+ uint256 refund = currentBond - targetBond;
+
+ // Effects
+ _removeFromTier(tokenId, region, currentTier);
+ fleetTier[tokenId] = targetTier;
+ _addToTier(tokenId, region, targetTier);
+
+ _trimTierCount(region);
+
+ // Interaction
+ if (refund > 0) {
+ BOND_TOKEN.safeTransfer(tokenOwner, refund);
+ }
+
+ emit FleetDemoted(tokenId, currentTier, targetTier, refund);
+ }
+
+ /// @dev Validates that a tier is available for registration (pure validation, no state changes).
+ function _validateExplicitTier(uint32 region, uint256 targetTier) internal view {
+ if (targetTier >= MAX_TIERS) revert MaxTiersReached();
+ if (_regionTierMembers[region][targetTier].length >= TIER_CAPACITY) revert TierFull();
+ }
+
+ // -- Bundle-level helpers (shared by buildHighestBondedUuidBundle & inclusion hints) --
+
+ /// @dev Resolves the two bundle levels from a location.
+ /// Level 0 = admin area (highest priority), Level 1 = country.
+ function _resolveBundleLevels(uint16 countryCode, uint16 adminCode)
+ internal
+ pure
+ returns (uint32[2] memory keys, bool[2] memory active)
+ {
+ if (countryCode > 0 && adminCode > 0) {
+ keys[LEVEL_ADMIN] = _makeAdminRegion(countryCode, adminCode);
+ active[LEVEL_ADMIN] = true;
+ }
+ if (countryCode > 0) {
+ keys[LEVEL_COUNTRY] = uint32(countryCode);
+ active[LEVEL_COUNTRY] = true;
+ }
+ }
+
+ /// @dev Finds the highest active tier index across both bundle levels.
+ function _findMaxTierIndex(uint32[2] memory keys, bool[2] memory active)
+ internal
+ view
+ returns (uint256 maxTierIndex)
+ {
+ for (uint256 lvl = 0; lvl < 2; ++lvl) {
+ if (!active[lvl]) continue;
+ uint256 tc = regionTierCount[keys[lvl]];
+ if (tc > 0 && tc - 1 > maxTierIndex) {
+ maxTierIndex = tc - 1;
+ }
+ }
+ }
+
+ // -- Inclusion-tier logic --
+
+ /// @dev Simulates `buildHighestBondedUuidBundle(countryCode, adminCode)` and
+ /// returns the cheapest tier at `candidateLevel` that guarantees bundle
+ /// inclusion. Bounded: O(MAX_TIERS). Works for admin or country level.
+ ///
+ /// A new fleet is included if:
+ /// (a) The tier has capacity for the new fleet.
+ /// (b) At that tier position, after higher-priority levels consume slots,
+ /// there is room for at least one member from the candidate tier.
+ function _findCheapestInclusionTier(uint16 countryCode, uint16 adminCode, uint256 candidateLevel)
+ internal
+ view
+ returns (uint256)
+ {
+ (uint32[2] memory keys, bool[2] memory active) = _resolveBundleLevels(countryCode, adminCode);
+ uint256 maxTierIndex = _findMaxTierIndex(keys, active);
+
+ uint32 candidateRegion = keys[candidateLevel];
+ uint256 cap = TIER_CAPACITY;
+
+ // Find cheapest tier T at candidateRegion.
+ uint256 candidateTierCount = regionTierCount[candidateRegion];
+
+ for (uint256 T = 0; T < MAX_TIERS; ++T) {
+ // (a) Room in the tier?
+ uint256 members = (T < candidateTierCount) ? _regionTierMembers[candidateRegion][T].length : 0;
+ if (members >= cap) continue;
+
+ // (b) Compute how many slots are consumed BEFORE reaching (candidateRegion, T).
+ uint256 countBefore;
+
+ if (T > maxTierIndex) {
+ // Tier above current max: if we join here, we become the new max.
+ // Bundle starts from T, so countBefore = 0.
+ countBefore = 0;
+ } else {
+ // Count from tiers strictly above T.
+ countBefore = _getCountFromTiersAbove(keys, active, maxTierIndex, T);
+
+ // Add higher-priority levels at tier T itself (only admin area for country candidates).
+ if (candidateLevel == LEVEL_COUNTRY && active[LEVEL_ADMIN]) {
+ if (countBefore < MAX_BONDED_UUID_BUNDLE_SIZE) {
+ uint256 ltc = regionTierCount[keys[LEVEL_ADMIN]];
+ if (ltc > 0 && T < ltc) {
+ uint256 m = _regionTierMembers[keys[LEVEL_ADMIN]][T].length;
+ if (m > 0) {
+ uint256 room = MAX_BONDED_UUID_BUNDLE_SIZE - countBefore;
+ countBefore += (m < room ? m : room);
+ }
+ }
+ }
+ }
+ }
+
+ // Does the candidate (with +1) fit in bundle?
+ if (countBefore < MAX_BONDED_UUID_BUNDLE_SIZE) {
+ return T;
+ }
+ }
+
+ revert MaxTiersReached();
+ }
+
+ /// @dev Helper to compute count from tiers STRICTLY above T (i.e., tiers > T).
+ /// Simple tier-descent: at each tier, locals first, then country.
+ function _getCountFromTiersAbove(uint32[2] memory keys, bool[2] memory active, uint256 maxTierIndex, uint256 T)
+ internal
+ view
+ returns (uint256 count)
+ {
+ // Process tiers from maxTierIndex down to T+1 (exclusive of T).
+ for (int256 cursor = int256(maxTierIndex); cursor > int256(T); --cursor) {
+ if (count >= MAX_BONDED_UUID_BUNDLE_SIZE) break;
+
+ // lvl 0 = admin (local), lvl 1 = country
+ for (uint256 lvl = 0; lvl < 2; ++lvl) {
+ if (!active[lvl]) continue;
+ if (count >= MAX_BONDED_UUID_BUNDLE_SIZE) break;
+
+ uint256 tc = regionTierCount[keys[lvl]];
+ if (tc == 0 || uint256(cursor) >= tc) continue;
+
+ uint256 m = _regionTierMembers[keys[lvl]][uint256(cursor)].length;
+ if (m == 0) continue;
+
+ uint256 room = MAX_BONDED_UUID_BUNDLE_SIZE - count;
+ uint256 toInclude = m < room ? m : room;
+ count += toInclude;
+ }
+ }
+ }
+
+ /// @dev Appends a token to a region's tier member array and records its index.
+ /// Updates regionTierCount if this opens a new highest tier.
+ function _addToTier(uint256 tokenId, uint32 region, uint256 tier) internal {
+ _regionTierMembers[region][tier].push(tokenId);
+ _indexInTier[tokenId] = _regionTierMembers[region][tier].length - 1;
+
+ // Update tier count if we're opening a new tier
+ if (tier >= regionTierCount[region]) {
+ regionTierCount[region] = tier + 1;
+ }
+ }
+
+ /// @dev Swap-and-pop removal from a region's tier member array.
+ function _removeFromTier(uint256 tokenId, uint32 region, uint256 tier) internal {
+ uint256[] storage members = _regionTierMembers[region][tier];
+ uint256 idx = _indexInTier[tokenId];
+ uint256 lastIdx = members.length - 1;
+
+ if (idx != lastIdx) {
+ uint256 lastTokenId = members[lastIdx];
+ members[idx] = lastTokenId;
+ _indexInTier[lastTokenId] = idx;
+ }
+ members.pop();
+ }
+
+ /// @dev Shrinks regionTierCount so the top tier is always non-empty.
+ function _trimTierCount(uint32 region) internal {
+ uint256 tierCount = regionTierCount[region];
+ while (tierCount > 0 && _regionTierMembers[region][tierCount - 1].length == 0) {
+ tierCount--;
+ }
+ regionTierCount[region] = tierCount;
+ }
+
+ // -- Region index maintenance --
+
+ /// @dev Adds a region to the appropriate index set if not already present.
+ function _addToRegionIndex(uint32 region) internal {
+ if (_isCountryRegion(region)) {
+ // Country
+ uint16 cc = uint16(region);
+ if (_activeCountryIndex[cc] == 0) {
+ _activeCountries.push(cc);
+ _activeCountryIndex[cc] = _activeCountries.length; // 1-indexed
+ }
+ } else {
+ // Admin area
+ if (_activeAdminAreaIndex[region] == 0) {
+ _activeAdminAreas.push(region);
+ _activeAdminAreaIndex[region] = _activeAdminAreas.length;
+ }
+ }
+ }
+
+ /// @dev Removes a region from the index set if the region is now completely empty.
+ function _removeFromRegionIndex(uint32 region) internal {
+ if (regionTierCount[region] > 0) return; // still has fleets
+
+ if (_isCountryRegion(region)) {
+ uint16 cc = uint16(region);
+ uint256 oneIdx = _activeCountryIndex[cc];
+ if (oneIdx > 0) {
+ uint256 lastIdx = _activeCountries.length - 1;
+ uint256 removeIdx = oneIdx - 1;
+ if (removeIdx != lastIdx) {
+ uint16 lastCountryCode = _activeCountries[lastIdx];
+ _activeCountries[removeIdx] = lastCountryCode;
+ _activeCountryIndex[lastCountryCode] = oneIdx;
+ }
+ _activeCountries.pop();
+ delete _activeCountryIndex[cc];
+ }
+ } else {
+ uint256 oneIdx = _activeAdminAreaIndex[region];
+ if (oneIdx > 0) {
+ uint256 lastIdx = _activeAdminAreas.length - 1;
+ uint256 removeIdx = oneIdx - 1;
+ if (removeIdx != lastIdx) {
+ uint32 lastAdminArea = _activeAdminAreas[lastIdx];
+ _activeAdminAreas[removeIdx] = lastAdminArea;
+ _activeAdminAreaIndex[lastAdminArea] = oneIdx;
+ }
+ _activeAdminAreas.pop();
+ delete _activeAdminAreaIndex[region];
+ }
+ }
+ }
+
+ // ──────────────────────────────────────────────
+ // Overrides required by ERC721Enumerable
+ // ──────────────────────────────────────────────
+
+ function _update(address to, uint256 tokenId, address auth) internal override(ERC721Enumerable) returns (address) {
+ address from = super._update(to, tokenId, auth);
+
+ // For owned-only tokens, transfer uuidOwner when the token is transferred
+ // This allows marketplace trading of owned-only UUIDs
+ uint32 region = uint32(tokenId >> 128);
+ if (region == OWNED_REGION_KEY && from != address(0) && to != address(0)) {
+ bytes16 uuid = bytes16(uint128(tokenId));
+ uuidOwner[uuid] = to;
+ }
+
+ return from;
+ }
+
+ function _increaseBalance(address account, uint128 value) internal override(ERC721Enumerable) {
+ super._increaseBalance(account, value);
+ }
+
+ function supportsInterface(bytes4 interfaceId) public view override(ERC721Enumerable) returns (bool) {
+ return super.supportsInterface(interfaceId);
+ }
+}
diff --git a/src/swarms/ServiceProvider.sol b/src/swarms/ServiceProvider.sol
new file mode 100644
index 0000000..e4a777b
--- /dev/null
+++ b/src/swarms/ServiceProvider.sol
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+
+pragma solidity ^0.8.24;
+
+import {ERC721} from "@openzeppelin/contracts/token/ERC721/ERC721.sol";
+
+/**
+ * @title ServiceProvider
+ * @notice Permissionless ERC-721 representing ownership of a service endpoint URL.
+ * @dev TokenID = keccak256(url), guaranteeing one owner per URL.
+ */
+contract ServiceProvider is ERC721 {
+ error EmptyURL();
+ error NotTokenOwner();
+
+ // Maps TokenID -> Provider URL
+ mapping(uint256 => string) public providerUrls;
+
+ event ProviderRegistered(address indexed owner, string url, uint256 indexed tokenId);
+ event ProviderBurned(address indexed owner, uint256 indexed tokenId);
+
+ constructor() ERC721("Swarm Service Provider", "SSV") {}
+
+ /// @notice Mints a new provider NFT for the given URL.
+ /// @param url The backend service URL (must be unique).
+ /// @return tokenId The deterministic token ID derived from `url`.
+ function registerProvider(string calldata url) external returns (uint256 tokenId) {
+ if (bytes(url).length == 0) {
+ revert EmptyURL();
+ }
+
+ tokenId = uint256(keccak256(bytes(url)));
+
+ providerUrls[tokenId] = url;
+
+ _mint(msg.sender, tokenId);
+
+ emit ProviderRegistered(msg.sender, url, tokenId);
+ }
+
+ /// @notice Burns the provider NFT. Caller must be the token owner.
+ /// @param tokenId The provider token ID to burn.
+ function burn(uint256 tokenId) external {
+ if (ownerOf(tokenId) != msg.sender) {
+ revert NotTokenOwner();
+ }
+
+ delete providerUrls[tokenId];
+
+ _burn(tokenId);
+
+ emit ProviderBurned(msg.sender, tokenId);
+ }
+}
diff --git a/src/swarms/SwarmRegistryL1.sol b/src/swarms/SwarmRegistryL1.sol
new file mode 100644
index 0000000..70da9db
--- /dev/null
+++ b/src/swarms/SwarmRegistryL1.sol
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+
+pragma solidity ^0.8.24;
+
+// NOTE: SSTORE2 is not compatible with ZkSync Era due to EXTCODECOPY limitation.
+// For ZkSync deployment, consider using chunked storage or calldata alternatives.
+import {SSTORE2} from "solady/utils/SSTORE2.sol";
+import {ReentrancyGuard} from "@openzeppelin/contracts/utils/ReentrancyGuard.sol";
+import {FleetIdentity} from "./FleetIdentity.sol";
+import {ServiceProvider} from "./ServiceProvider.sol";
+
+/**
+ * @title SwarmRegistryL1
+ * @notice Permissionless BLE swarm registry optimized for Ethereum L1 (uses SSTORE2 for filter storage).
+ * @dev Not compatible with ZkSync Era — use SwarmRegistryUniversal instead.
+ */
+contract SwarmRegistryL1 is ReentrancyGuard {
+ error InvalidFingerprintSize();
+ error InvalidFilterSize();
+ error NotFleetOwner();
+ error ProviderDoesNotExist();
+ error NotProviderOwner();
+ error SwarmNotFound();
+ error InvalidSwarmData();
+ error SwarmAlreadyExists();
+ error SwarmNotOrphaned();
+ error SwarmOrphaned();
+
+ enum SwarmStatus {
+ REGISTERED,
+ ACCEPTED,
+ REJECTED
+ }
+
+ // Internal Schema version for Tag ID construction
+ enum TagType {
+ IBEACON_PAYLOAD_ONLY, // 0x00: proxUUID || major || minor
+ IBEACON_INCLUDES_MAC, // 0x01: proxUUID || major || minor || MAC (Normalized)
+ VENDOR_ID, // 0x02: companyID || hash(vendorBytes)
+ GENERIC // 0x03
+
+ }
+
+ struct Swarm {
+ uint256 fleetId; // The Fleet UUID (as uint)
+ uint256 providerId; // The Service Provider TokenID
+ address filterPointer; // SSTORE2 pointer
+ uint8 fingerprintSize;
+ TagType tagType;
+ SwarmStatus status;
+ }
+
+ uint8 public constant MAX_FINGERPRINT_SIZE = 16;
+
+ FleetIdentity public immutable FLEET_CONTRACT;
+
+ ServiceProvider public immutable PROVIDER_CONTRACT;
+
+ // SwarmID -> Swarm
+ mapping(uint256 => Swarm) public swarms;
+
+ // FleetID -> List of SwarmIDs
+ mapping(uint256 => uint256[]) public fleetSwarms;
+
+ // SwarmID -> index in fleetSwarms[fleetId] (for O(1) removal)
+ mapping(uint256 => uint256) public swarmIndexInFleet;
+
+ event SwarmRegistered(uint256 indexed swarmId, uint256 indexed fleetId, uint256 indexed providerId, address owner);
+ event SwarmStatusChanged(uint256 indexed swarmId, SwarmStatus status);
+ event SwarmFilterUpdated(uint256 indexed swarmId, address indexed owner, uint32 filterSize);
+ event SwarmProviderUpdated(uint256 indexed swarmId, uint256 indexed oldProvider, uint256 indexed newProvider);
+ event SwarmDeleted(uint256 indexed swarmId, uint256 indexed fleetId, address indexed owner);
+ event SwarmPurged(uint256 indexed swarmId, uint256 indexed fleetId, address indexed purgedBy);
+
+ /// @notice Derives a deterministic swarm ID. Callable off-chain to predict IDs before registration.
+ /// @return swarmId keccak256(fleetId, providerId, filterData)
+ function computeSwarmId(uint256 fleetId, uint256 providerId, bytes calldata filterData)
+ public
+ pure
+ returns (uint256)
+ {
+ return uint256(keccak256(abi.encode(fleetId, providerId, filterData)));
+ }
+
+ constructor(address _fleetContract, address _providerContract) {
+ if (_fleetContract == address(0) || _providerContract == address(0)) {
+ revert InvalidSwarmData();
+ }
+ FLEET_CONTRACT = FleetIdentity(_fleetContract);
+ PROVIDER_CONTRACT = ServiceProvider(_providerContract);
+ }
+
+ /// @notice Registers a new swarm. Caller must own the fleet NFT.
+ /// @param fleetId Fleet token ID.
+ /// @param providerId Service provider token ID.
+ /// @param filterData XOR filter blob (1–24 576 bytes).
+ /// @param fingerprintSize Fingerprint width in bits (1–16).
+ /// @param tagType Tag identity schema.
+ /// @return swarmId Deterministic ID for this swarm.
+ function registerSwarm(
+ uint256 fleetId,
+ uint256 providerId,
+ bytes calldata filterData,
+ uint8 fingerprintSize,
+ TagType tagType
+ ) external nonReentrant returns (uint256 swarmId) {
+ if (fingerprintSize == 0 || fingerprintSize > MAX_FINGERPRINT_SIZE) {
+ revert InvalidFingerprintSize();
+ }
+ if (filterData.length == 0 || filterData.length > 24576) {
+ revert InvalidFilterSize();
+ }
+
+ if (FLEET_CONTRACT.ownerOf(fleetId) != msg.sender) {
+ revert NotFleetOwner();
+ }
+ if (PROVIDER_CONTRACT.ownerOf(providerId) == address(0)) {
+ revert ProviderDoesNotExist();
+ }
+
+ swarmId = computeSwarmId(fleetId, providerId, filterData);
+
+ if (swarms[swarmId].filterPointer != address(0)) {
+ revert SwarmAlreadyExists();
+ }
+
+ Swarm storage s = swarms[swarmId];
+ s.fleetId = fleetId;
+ s.providerId = providerId;
+ s.fingerprintSize = fingerprintSize;
+ s.tagType = tagType;
+ s.status = SwarmStatus.REGISTERED;
+
+ fleetSwarms[fleetId].push(swarmId);
+ swarmIndexInFleet[swarmId] = fleetSwarms[fleetId].length - 1;
+
+ s.filterPointer = SSTORE2.write(filterData);
+
+ emit SwarmRegistered(swarmId, fleetId, providerId, msg.sender);
+ }
+
+ /// @notice Approves a swarm. Caller must own the provider NFT.
+ /// @param swarmId The swarm to accept.
+ function acceptSwarm(uint256 swarmId) external {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterPointer == address(0)) revert SwarmNotFound();
+
+ (bool fleetValid, bool providerValid) = isSwarmValid(swarmId);
+ if (!fleetValid || !providerValid) revert SwarmOrphaned();
+
+ if (PROVIDER_CONTRACT.ownerOf(s.providerId) != msg.sender) {
+ revert NotProviderOwner();
+ }
+ s.status = SwarmStatus.ACCEPTED;
+ emit SwarmStatusChanged(swarmId, SwarmStatus.ACCEPTED);
+ }
+
+ /// @notice Rejects a swarm. Caller must own the provider NFT.
+ /// @param swarmId The swarm to reject.
+ function rejectSwarm(uint256 swarmId) external {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterPointer == address(0)) revert SwarmNotFound();
+
+ (bool fleetValid, bool providerValid) = isSwarmValid(swarmId);
+ if (!fleetValid || !providerValid) revert SwarmOrphaned();
+
+ if (PROVIDER_CONTRACT.ownerOf(s.providerId) != msg.sender) {
+ revert NotProviderOwner();
+ }
+ s.status = SwarmStatus.REJECTED;
+ emit SwarmStatusChanged(swarmId, SwarmStatus.REJECTED);
+ }
+
+ /// @notice Replaces the XOR filter. Resets status to REGISTERED. Caller must own the fleet NFT.
+ /// @param swarmId The swarm to update.
+ /// @param newFilterData Replacement filter blob.
+ function updateSwarmFilter(uint256 swarmId, bytes calldata newFilterData) external nonReentrant {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterPointer == address(0)) {
+ revert SwarmNotFound();
+ }
+ if (FLEET_CONTRACT.ownerOf(s.fleetId) != msg.sender) {
+ revert NotFleetOwner();
+ }
+ if (newFilterData.length == 0 || newFilterData.length > 24576) {
+ revert InvalidFilterSize();
+ }
+
+ s.status = SwarmStatus.REGISTERED;
+
+ s.filterPointer = SSTORE2.write(newFilterData);
+
+ emit SwarmFilterUpdated(swarmId, msg.sender, uint32(newFilterData.length));
+ }
+
+ /// @notice Reassigns the service provider. Resets status to REGISTERED. Caller must own the fleet NFT.
+ /// @param swarmId The swarm to update.
+ /// @param newProviderId New provider token ID.
+ function updateSwarmProvider(uint256 swarmId, uint256 newProviderId) external {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterPointer == address(0)) {
+ revert SwarmNotFound();
+ }
+ if (FLEET_CONTRACT.ownerOf(s.fleetId) != msg.sender) {
+ revert NotFleetOwner();
+ }
+ if (PROVIDER_CONTRACT.ownerOf(newProviderId) == address(0)) {
+ revert ProviderDoesNotExist();
+ }
+
+ uint256 oldProvider = s.providerId;
+
+ s.providerId = newProviderId;
+
+ s.status = SwarmStatus.REGISTERED;
+
+ emit SwarmProviderUpdated(swarmId, oldProvider, newProviderId);
+ }
+
+ /// @notice Permanently deletes a swarm. Caller must own the fleet NFT.
+ /// @param swarmId The swarm to delete.
+ function deleteSwarm(uint256 swarmId) external {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterPointer == address(0)) {
+ revert SwarmNotFound();
+ }
+ if (FLEET_CONTRACT.ownerOf(s.fleetId) != msg.sender) {
+ revert NotFleetOwner();
+ }
+
+ uint256 fleetId = s.fleetId;
+
+ _removeFromFleetSwarms(fleetId, swarmId);
+
+ delete swarms[swarmId];
+
+ emit SwarmDeleted(swarmId, fleetId, msg.sender);
+ }
+
+ /// @notice Returns whether the swarm's fleet and provider NFTs still exist (i.e. have not been burned).
+ /// @param swarmId The swarm to check.
+ /// @return fleetValid True if the fleet NFT exists.
+ /// @return providerValid True if the provider NFT exists.
+ function isSwarmValid(uint256 swarmId) public view returns (bool fleetValid, bool providerValid) {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterPointer == address(0)) revert SwarmNotFound();
+
+ try FLEET_CONTRACT.ownerOf(s.fleetId) returns (address) {
+ fleetValid = true;
+ } catch {
+ fleetValid = false;
+ }
+
+ try PROVIDER_CONTRACT.ownerOf(s.providerId) returns (address) {
+ providerValid = true;
+ } catch {
+ providerValid = false;
+ }
+ }
+
+ /// @notice Permissionless-ly removes a swarm whose fleet or provider NFT has been burned.
+ /// @param swarmId The orphaned swarm to purge.
+ function purgeOrphanedSwarm(uint256 swarmId) external {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterPointer == address(0)) revert SwarmNotFound();
+
+ (bool fleetValid, bool providerValid) = isSwarmValid(swarmId);
+ if (fleetValid && providerValid) revert SwarmNotOrphaned();
+
+ uint256 fleetId = s.fleetId;
+
+ _removeFromFleetSwarms(fleetId, swarmId);
+
+ delete swarms[swarmId];
+
+ emit SwarmPurged(swarmId, fleetId, msg.sender);
+ }
+
+ /// @notice Tests tag membership against the swarm's XOR filter.
+ /// @param swarmId The swarm to query.
+ /// @param tagHash keccak256 of the tag identity bytes (caller must pre-normalize per tagType).
+ /// @return isValid True if the tag passes the XOR filter check.
+ function checkMembership(uint256 swarmId, bytes32 tagHash) external view returns (bool isValid) {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterPointer == address(0)) {
+ revert SwarmNotFound();
+ }
+
+ // Reject queries against orphaned swarms
+ (bool fleetValid, bool providerValid) = isSwarmValid(swarmId);
+ if (!fleetValid || !providerValid) revert SwarmOrphaned();
+
+ uint256 dataLen;
+ address pointer = s.filterPointer;
+ assembly {
+ dataLen := extcodesize(pointer)
+ }
+
+ // SSTORE2 adds 1 byte overhead (0x00), So actual data length = codeSize - 1.
+ if (dataLen > 0) {
+ unchecked {
+ --dataLen;
+ }
+ }
+
+ // 2. Calculate M (number of slots)
+ uint256 m = (dataLen * 8) / s.fingerprintSize;
+ if (m == 0) return false;
+
+ bytes32 h = tagHash;
+
+ uint32 h1 = uint32(uint256(h)) % uint32(m);
+ uint32 h2 = uint32(uint256(h) >> 32) % uint32(m);
+ uint32 h3 = uint32(uint256(h) >> 64) % uint32(m);
+
+ uint256 fpMask = (1 << s.fingerprintSize) - 1;
+ uint256 expectedFp = (uint256(h) >> 96) & fpMask;
+
+ uint256 f1 = _readFingerprint(pointer, h1, s.fingerprintSize);
+ uint256 f2 = _readFingerprint(pointer, h2, s.fingerprintSize);
+ uint256 f3 = _readFingerprint(pointer, h3, s.fingerprintSize);
+
+ return (f1 ^ f2 ^ f3) == expectedFp;
+ }
+
+ /**
+ * @dev O(1) removal of a swarm from its fleet's swarm list using index tracking.
+ */
+ function _removeFromFleetSwarms(uint256 fleetId, uint256 swarmId) internal {
+ uint256[] storage arr = fleetSwarms[fleetId];
+ uint256 index = swarmIndexInFleet[swarmId];
+ uint256 lastId = arr[arr.length - 1];
+
+ arr[index] = lastId;
+ swarmIndexInFleet[lastId] = index;
+ arr.pop();
+ delete swarmIndexInFleet[swarmId];
+ }
+
+ /**
+ * @dev Reads a packed fingerprint of arbitrary bit size from SSTORE2 blob.
+ * @param pointer The contract address storing data.
+ * @param index The slot index.
+ * @param bits The bit size of the fingerprint.
+ */
+ function _readFingerprint(address pointer, uint256 index, uint8 bits) internal view returns (uint256) {
+ uint256 bitOffset = index * bits;
+ uint256 startByte = bitOffset / 8;
+ uint256 endByte = (bitOffset + bits - 1) / 8;
+
+ // Read raw bytes. SSTORE2 uses 0-based index relative to data.
+ bytes memory chunk = SSTORE2.read(pointer, startByte, endByte + 1);
+
+ // Convert chunk to uint256
+ uint256 raw;
+ for (uint256 i = 0; i < chunk.length;) {
+ raw = (raw << 8) | uint8(chunk[i]);
+ unchecked {
+ ++i;
+ }
+ }
+
+ uint256 totalBitsRead = chunk.length * 8;
+ uint256 localStart = bitOffset % 8;
+ uint256 shiftRight = totalBitsRead - (localStart + bits);
+
+ return (raw >> shiftRight) & ((1 << bits) - 1);
+ }
+}
diff --git a/src/swarms/SwarmRegistryUniversal.sol b/src/swarms/SwarmRegistryUniversal.sol
new file mode 100644
index 0000000..446312c
--- /dev/null
+++ b/src/swarms/SwarmRegistryUniversal.sol
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+
+pragma solidity ^0.8.24;
+
+import {ReentrancyGuard} from "@openzeppelin/contracts/utils/ReentrancyGuard.sol";
+import {FleetIdentity} from "./FleetIdentity.sol";
+import {ServiceProvider} from "./ServiceProvider.sol";
+
+/**
+ * @title SwarmRegistryUniversal
+ * @notice Permissionless BLE swarm registry compatible with all EVM chains (including ZkSync Era).
+ * @dev Uses native `bytes` storage for cross-chain compatibility.
+ */
+contract SwarmRegistryUniversal is ReentrancyGuard {
+ error InvalidFingerprintSize();
+ error InvalidFilterSize();
+ error NotFleetOwner();
+ error ProviderDoesNotExist();
+ error NotProviderOwner();
+ error SwarmNotFound();
+ error InvalidSwarmData();
+ error FilterTooLarge();
+ error SwarmAlreadyExists();
+ error SwarmNotOrphaned();
+ error SwarmOrphaned();
+
+ enum SwarmStatus {
+ REGISTERED,
+ ACCEPTED,
+ REJECTED
+ }
+
+ enum TagType {
+ IBEACON_PAYLOAD_ONLY, // 0x00: proxUUID || major || minor
+ IBEACON_INCLUDES_MAC, // 0x01: proxUUID || major || minor || MAC (Normalized)
+ VENDOR_ID, // 0x02: companyID || hash(vendorBytes)
+ GENERIC // 0x03
+
+ }
+
+ struct Swarm {
+ uint256 fleetId;
+ uint256 providerId;
+ uint32 filterLength; // Length of filter in bytes (max ~4GB, practically limited)
+ uint8 fingerprintSize;
+ TagType tagType;
+ SwarmStatus status;
+ }
+
+ uint8 public constant MAX_FINGERPRINT_SIZE = 16;
+
+ /// @notice Maximum filter size per swarm (24KB - fits in ~15M gas on cold write)
+ uint32 public constant MAX_FILTER_SIZE = 24576;
+
+ FleetIdentity public immutable FLEET_CONTRACT;
+
+ ServiceProvider public immutable PROVIDER_CONTRACT;
+
+ /// @notice SwarmID -> Swarm metadata
+ mapping(uint256 => Swarm) public swarms;
+
+ /// @notice SwarmID -> XOR filter data (stored as bytes)
+ mapping(uint256 => bytes) internal filterData;
+
+ /// @notice FleetID -> List of SwarmIDs
+ mapping(uint256 => uint256[]) public fleetSwarms;
+
+ /// @notice SwarmID -> index in fleetSwarms[fleetId] (for O(1) removal)
+ mapping(uint256 => uint256) public swarmIndexInFleet;
+
+ event SwarmRegistered(
+ uint256 indexed swarmId, uint256 indexed fleetId, uint256 indexed providerId, address owner, uint32 filterSize
+ );
+
+ event SwarmStatusChanged(uint256 indexed swarmId, SwarmStatus status);
+ event SwarmFilterUpdated(uint256 indexed swarmId, address indexed owner, uint32 filterSize);
+ event SwarmProviderUpdated(uint256 indexed swarmId, uint256 indexed oldProvider, uint256 indexed newProvider);
+ event SwarmDeleted(uint256 indexed swarmId, uint256 indexed fleetId, address indexed owner);
+ event SwarmPurged(uint256 indexed swarmId, uint256 indexed fleetId, address indexed purgedBy);
+
+ /// @notice Derives a deterministic swarm ID. Callable off-chain to predict IDs before registration.
+ /// @return swarmId keccak256(fleetId, providerId, filter)
+ function computeSwarmId(uint256 fleetId, uint256 providerId, bytes calldata filter) public pure returns (uint256) {
+ return uint256(keccak256(abi.encode(fleetId, providerId, filter)));
+ }
+
+ constructor(address _fleetContract, address _providerContract) {
+ if (_fleetContract == address(0) || _providerContract == address(0)) {
+ revert InvalidSwarmData();
+ }
+ FLEET_CONTRACT = FleetIdentity(_fleetContract);
+ PROVIDER_CONTRACT = ServiceProvider(_providerContract);
+ }
+
+ /// @notice Registers a new swarm. Caller must own the fleet NFT.
+ /// @param fleetId Fleet token ID.
+ /// @param providerId Service provider token ID.
+ /// @param filter XOR filter blob (1–24 576 bytes).
+ /// @param fingerprintSize Fingerprint width in bits (1–16).
+ /// @param tagType Tag identity schema.
+ /// @return swarmId Deterministic ID for this swarm.
+ function registerSwarm(
+ uint256 fleetId,
+ uint256 providerId,
+ bytes calldata filter,
+ uint8 fingerprintSize,
+ TagType tagType
+ ) external nonReentrant returns (uint256 swarmId) {
+ if (fingerprintSize == 0 || fingerprintSize > MAX_FINGERPRINT_SIZE) {
+ revert InvalidFingerprintSize();
+ }
+ if (filter.length == 0) {
+ revert InvalidFilterSize();
+ }
+ if (filter.length > MAX_FILTER_SIZE) {
+ revert FilterTooLarge();
+ }
+
+ if (FLEET_CONTRACT.ownerOf(fleetId) != msg.sender) {
+ revert NotFleetOwner();
+ }
+ if (PROVIDER_CONTRACT.ownerOf(providerId) == address(0)) {
+ revert ProviderDoesNotExist();
+ }
+
+ swarmId = computeSwarmId(fleetId, providerId, filter);
+
+ if (swarms[swarmId].filterLength != 0) {
+ revert SwarmAlreadyExists();
+ }
+
+ Swarm storage s = swarms[swarmId];
+ s.fleetId = fleetId;
+ s.providerId = providerId;
+ s.filterLength = uint32(filter.length);
+ s.fingerprintSize = fingerprintSize;
+ s.tagType = tagType;
+ s.status = SwarmStatus.REGISTERED;
+
+ filterData[swarmId] = filter;
+
+ fleetSwarms[fleetId].push(swarmId);
+ swarmIndexInFleet[swarmId] = fleetSwarms[fleetId].length - 1;
+
+ emit SwarmRegistered(swarmId, fleetId, providerId, msg.sender, uint32(filter.length));
+ }
+
+ /// @notice Approves a swarm. Caller must own the provider NFT.
+ /// @param swarmId The swarm to accept.
+ function acceptSwarm(uint256 swarmId) external {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterLength == 0) revert SwarmNotFound();
+
+ (bool fleetValid, bool providerValid) = isSwarmValid(swarmId);
+ if (!fleetValid || !providerValid) revert SwarmOrphaned();
+
+ if (PROVIDER_CONTRACT.ownerOf(s.providerId) != msg.sender) {
+ revert NotProviderOwner();
+ }
+ s.status = SwarmStatus.ACCEPTED;
+ emit SwarmStatusChanged(swarmId, SwarmStatus.ACCEPTED);
+ }
+
+ /// @notice Rejects a swarm. Caller must own the provider NFT.
+ /// @param swarmId The swarm to reject.
+ function rejectSwarm(uint256 swarmId) external {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterLength == 0) revert SwarmNotFound();
+
+ (bool fleetValid, bool providerValid) = isSwarmValid(swarmId);
+ if (!fleetValid || !providerValid) revert SwarmOrphaned();
+
+ if (PROVIDER_CONTRACT.ownerOf(s.providerId) != msg.sender) {
+ revert NotProviderOwner();
+ }
+ s.status = SwarmStatus.REJECTED;
+ emit SwarmStatusChanged(swarmId, SwarmStatus.REJECTED);
+ }
+
+ /// @notice Replaces the XOR filter. Resets status to REGISTERED. Caller must own the fleet NFT.
+ /// @param swarmId The swarm to update.
+ /// @param newFilterData Replacement filter blob.
+ function updateSwarmFilter(uint256 swarmId, bytes calldata newFilterData) external nonReentrant {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterLength == 0) {
+ revert SwarmNotFound();
+ }
+ if (FLEET_CONTRACT.ownerOf(s.fleetId) != msg.sender) {
+ revert NotFleetOwner();
+ }
+ if (newFilterData.length == 0) {
+ revert InvalidFilterSize();
+ }
+ if (newFilterData.length > MAX_FILTER_SIZE) {
+ revert FilterTooLarge();
+ }
+
+ s.filterLength = uint32(newFilterData.length);
+ s.status = SwarmStatus.REGISTERED;
+ filterData[swarmId] = newFilterData;
+
+ emit SwarmFilterUpdated(swarmId, msg.sender, uint32(newFilterData.length));
+ }
+
+ /// @notice Reassigns the service provider. Resets status to REGISTERED. Caller must own the fleet NFT.
+ /// @param swarmId The swarm to update.
+ /// @param newProviderId New provider token ID.
+ function updateSwarmProvider(uint256 swarmId, uint256 newProviderId) external {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterLength == 0) {
+ revert SwarmNotFound();
+ }
+ if (FLEET_CONTRACT.ownerOf(s.fleetId) != msg.sender) {
+ revert NotFleetOwner();
+ }
+ if (PROVIDER_CONTRACT.ownerOf(newProviderId) == address(0)) {
+ revert ProviderDoesNotExist();
+ }
+
+ uint256 oldProvider = s.providerId;
+
+ // Effects — update provider and reset status
+ s.providerId = newProviderId;
+ s.status = SwarmStatus.REGISTERED;
+
+ emit SwarmProviderUpdated(swarmId, oldProvider, newProviderId);
+ }
+
+ /// @notice Permanently deletes a swarm. Caller must own the fleet NFT.
+ /// @param swarmId The swarm to delete.
+ function deleteSwarm(uint256 swarmId) external {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterLength == 0) {
+ revert SwarmNotFound();
+ }
+ if (FLEET_CONTRACT.ownerOf(s.fleetId) != msg.sender) {
+ revert NotFleetOwner();
+ }
+
+ uint256 fleetId = s.fleetId;
+
+ _removeFromFleetSwarms(fleetId, swarmId);
+
+ delete swarms[swarmId];
+ delete filterData[swarmId];
+
+ emit SwarmDeleted(swarmId, fleetId, msg.sender);
+ }
+
+ /// @notice Returns whether the swarm's fleet and provider NFTs still exist (i.e. have not been burned).
+ /// @param swarmId The swarm to check.
+ /// @return fleetValid True if the fleet NFT exists.
+ /// @return providerValid True if the provider NFT exists.
+ function isSwarmValid(uint256 swarmId) public view returns (bool fleetValid, bool providerValid) {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterLength == 0) revert SwarmNotFound();
+
+ try FLEET_CONTRACT.ownerOf(s.fleetId) returns (address) {
+ fleetValid = true;
+ } catch {
+ fleetValid = false;
+ }
+
+ try PROVIDER_CONTRACT.ownerOf(s.providerId) returns (address) {
+ providerValid = true;
+ } catch {
+ providerValid = false;
+ }
+ }
+
+ /// @notice Permissionless-ly removes a swarm whose fleet or provider NFT has been burned.
+ /// @param swarmId The orphaned swarm to purge.
+ function purgeOrphanedSwarm(uint256 swarmId) external {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterLength == 0) revert SwarmNotFound();
+
+ (bool fleetValid, bool providerValid) = isSwarmValid(swarmId);
+ if (fleetValid && providerValid) revert SwarmNotOrphaned();
+
+ uint256 fleetId = s.fleetId;
+
+ _removeFromFleetSwarms(fleetId, swarmId);
+
+ delete swarms[swarmId];
+ delete filterData[swarmId];
+
+ emit SwarmPurged(swarmId, fleetId, msg.sender);
+ }
+
+ /// @notice Tests tag membership against the swarm's XOR filter.
+ /// @param swarmId The swarm to query.
+ /// @param tagHash keccak256 of the tag identity bytes (caller must pre-normalize per tagType).
+ /// @return isValid True if the tag passes the XOR filter check.
+ function checkMembership(uint256 swarmId, bytes32 tagHash) external view returns (bool isValid) {
+ Swarm storage s = swarms[swarmId];
+ if (s.filterLength == 0) {
+ revert SwarmNotFound();
+ }
+
+ // Reject queries against orphaned swarms
+ (bool fleetValid, bool providerValid) = isSwarmValid(swarmId);
+ if (!fleetValid || !providerValid) revert SwarmOrphaned();
+
+ bytes storage filter = filterData[swarmId];
+ uint256 dataLen = s.filterLength;
+
+ // Calculate M (number of fingerprint slots)
+ uint256 m = (dataLen * 8) / s.fingerprintSize;
+ if (m == 0) return false;
+
+ // Derive 3 indices and expected fingerprint from hash
+ uint32 h1 = uint32(uint256(tagHash)) % uint32(m);
+ uint32 h2 = uint32(uint256(tagHash) >> 32) % uint32(m);
+ uint32 h3 = uint32(uint256(tagHash) >> 64) % uint32(m);
+
+ uint256 fpMask = (1 << s.fingerprintSize) - 1;
+ uint256 expectedFp = (uint256(tagHash) >> 96) & fpMask;
+
+ // Read and XOR fingerprints
+ uint256 f1 = _readFingerprint(filter, h1, s.fingerprintSize);
+ uint256 f2 = _readFingerprint(filter, h2, s.fingerprintSize);
+ uint256 f3 = _readFingerprint(filter, h3, s.fingerprintSize);
+
+ return (f1 ^ f2 ^ f3) == expectedFp;
+ }
+
+ /// @notice Returns the raw XOR filter bytes for a swarm.
+ /// @param swarmId The swarm to query.
+ /// @return filter The XOR filter blob.
+ function getFilterData(uint256 swarmId) external view returns (bytes memory filter) {
+ if (swarms[swarmId].filterLength == 0) {
+ revert SwarmNotFound();
+ }
+ return filterData[swarmId];
+ }
+
+ /**
+ * @dev O(1) removal of a swarm from its fleet's swarm list using index tracking.
+ */
+ function _removeFromFleetSwarms(uint256 fleetId, uint256 swarmId) internal {
+ uint256[] storage arr = fleetSwarms[fleetId];
+ uint256 index = swarmIndexInFleet[swarmId];
+ uint256 lastId = arr[arr.length - 1];
+
+ arr[index] = lastId;
+ swarmIndexInFleet[lastId] = index;
+ arr.pop();
+ delete swarmIndexInFleet[swarmId];
+ }
+
+ /**
+ * @dev Reads a packed fingerprint from storage bytes.
+ * @param filter The filter bytes in storage.
+ * @param index The fingerprint slot index.
+ * @param bits The fingerprint size in bits.
+ */
+ function _readFingerprint(bytes storage filter, uint256 index, uint8 bits) internal view returns (uint256) {
+ uint256 bitOffset = index * bits;
+ uint256 startByte = bitOffset / 8;
+ uint256 endByte = (bitOffset + bits - 1) / 8;
+
+ // Read bytes and assemble into uint256
+ uint256 raw;
+ for (uint256 i = startByte; i <= endByte;) {
+ raw = (raw << 8) | uint8(filter[i]);
+ unchecked {
+ ++i;
+ }
+ }
+
+ // Extract the fingerprint bits
+ uint256 totalBitsRead = (endByte - startByte + 1) * 8;
+ uint256 localStart = bitOffset % 8;
+ uint256 shiftRight = totalBitsRead - (localStart + bits);
+
+ return (raw >> shiftRight) & ((1 << bits) - 1);
+ }
+}
diff --git a/src/swarms/doc/README.md b/src/swarms/doc/README.md
new file mode 100644
index 0000000..f1ccf63
--- /dev/null
+++ b/src/swarms/doc/README.md
@@ -0,0 +1,49 @@
+# ISO 3166-2 Admin Area Mappings
+
+The [iso3166-2/](iso3166-2/) directory contains standardized mappings from ISO 3166-2 subdivision codes to dense numeric indices for use with the FleetIdentity contract.
+
+## File Naming Convention
+
+Each file in [iso3166-2/](iso3166-2/) is named `{ISO_3166-1_numeric}-{Country_Name}.md`
+
+Example: [840-United_States.md](iso3166-2/840-United_States.md) for the United States (ISO 3166-1 numeric: 840)
+
+## Table Format
+
+Each country file contains a mapping table with three columns:
+
+| Dense Index | ISO 3166-2 Code | Name |
+| ----------- | --------------- | --------------------- |
+| 0 | XX | Full subdivision name |
+
+- **Dense Index**: Sequential integers from 0 to n-1 (where n = number of subdivisions)
+- **ISO 3166-2 Code**: The subdivision code (1-3 alphanumeric characters, without country prefix)
+- **Name**: Full official name of the subdivision
+
+## Usage with FleetIdentity Contract
+
+The FleetIdentity contract uses:
+
+- **Country Code**: ISO 3166-1 numeric (1-999)
+- **Admin Code**: Dense index + 1 (1-4095)
+ - Add 1 to the dense index when calling contract functions
+ - Dense index 0 → adminCode 1
+ - Dense index 4094 → adminCode 4095
+
+## Example
+
+For California, USA:
+
+- Country: United States (ISO 3166-1 numeric: 840)
+- ISO 3166-2: US-CA
+- Dense Index: 4 (from table)
+- Contract adminCode: 5 (dense index + 1)
+- Region Key: `(840 << 12) | 5 = 3440645`
+
+## Coverage
+
+This directory aims to provide mappings for all countries with defined ISO 3166-2 subdivisions. Countries without official subdivisions may be omitted.
+
+## Data Sources
+
+Mappings are based on ISO 3166-2 standard as maintained by ISO and various national statistical agencies.
diff --git a/src/swarms/doc/assistant-guide.md b/src/swarms/doc/assistant-guide.md
new file mode 100644
index 0000000..ffa3cb4
--- /dev/null
+++ b/src/swarms/doc/assistant-guide.md
@@ -0,0 +1,209 @@
+# Swarm System Architecture & Implementation Guide
+
+> **Context for AI Agents**: This document outlines the architecture, constraints, and operational logic of the Swarm Smart Contract system. Use this context when modifying contracts, writing SDKs, or debugging verifiers.
+
+## 1. System Overview
+
+The Swarm System is a privacy-preserving registry for **BLE (Bluetooth Low Energy)** tag swarms. It allows Fleet Owners to manage large sets of tags (~10k-20k) and link them to Service Providers (Backend URLs) without revealing the individual identity of every tag on-chain.
+
+Two registry variants exist for different deployment targets:
+
+- **`SwarmRegistryL1`** — Ethereum L1, uses SSTORE2 (contract bytecode) for gas-efficient filter storage. Not compatible with ZkSync Era.
+- **`SwarmRegistryUniversal`** — All EVM chains including ZkSync Era, uses native `bytes` storage.
+
+### Core Components
+
+| Contract | Role | Key Identity | Token |
+| :--------------------------- | :---------------------------------- | :--------------------------------------- | :---- |
+| **`FleetIdentity`** | Fleet Registry (ERC-721 Enumerable) | `uint256(uint128(uuid))` | SFID |
+| **`ServiceProvider`** | Service Registry (ERC-721) | `keccak256(url)` | SSV |
+| **`SwarmRegistryL1`** | Swarm Registry (L1) | `keccak256(fleetId, providerId, filter)` | — |
+| **`SwarmRegistryUniversal`** | Swarm Registry (Universal) | `keccak256(fleetId, providerId, filter)` | — |
+
+All contracts are **permissionless** — access control is enforced through NFT ownership rather than admin roles. `FleetIdentity` additionally requires an ERC-20 bond (e.g. NODL) to register a fleet, acting as an anti-spam / anti-abuse mechanism.
+
+Both NFT contracts support **burning** — the token owner can call `burn(tokenId)` to destroy their NFT. Burning a `FleetIdentity` token refunds the full bond to the owner. Burning either NFT makes any swarms referencing that token _orphaned_.
+
+---
+
+## 2. Operational Workflows
+
+### A. Provider & Fleet Setup (One-Time)
+
+1. **Service Provider**: Calls `ServiceProvider.registerProvider("https://cms.example.com")`. Receives `providerTokenId` (= `keccak256(url)`).
+2. **Fleet Owner**:
+ 1. Approves the bond token: `NODL.approve(fleetIdentityAddress, bondAmount)`.
+ 2. Calls `FleetIdentity.registerFleet(0xUUID..., bondAmount)`. Receives `fleetId` (= `uint256(uint128(uuid))`). The `bondAmount` must be ≥ `MIN_BOND` (set at deploy).
+ 3. _(Optional)_ Calls `FleetIdentity.increaseBond(fleetId, additionalAmount)` to top-up later. Anyone can top-up any fleet's bond.
+
+### B. Swarm Registration (Per Batch of Tags)
+
+A Fleet Owner groups tags into a "Swarm" (chunk of ~10k-20k tags) and registers them.
+
+1. **Construct `TagID`s**: Generate the unique ID for every tag in the swarm (see "Tag Schemas" below).
+2. **Build XOR Filter**: Create a binary XOR filter (Peeling Algorithm) containing the hashes of all `TagID`s.
+3. **(Optional) Predict Swarm ID**: Call `computeSwarmId(fleetId, providerId, filterData)` off-chain to obtain the deterministic ID before submitting the transaction.
+4. **Register**:
+ ```solidity
+ swarmRegistry.registerSwarm(
+ fleetId,
+ providerId,
+ filterData,
+ 16, // Fingerprint size in bits (1–16)
+ TagType.IBEACON_INCLUDES_MAC // or PAYLOAD_ONLY, VENDOR_ID, GENERIC
+ );
+ // Returns the deterministic swarmId
+ ```
+
+### C. Swarm Approval Flow
+
+After registration a swarm starts in `REGISTERED` status and requires provider approval:
+
+1. **Provider approves**: `swarmRegistry.acceptSwarm(swarmId)` → status becomes `ACCEPTED`.
+2. **Provider rejects**: `swarmRegistry.rejectSwarm(swarmId)` → status becomes `REJECTED`.
+
+Only the owner of the provider NFT (`providerId`) can accept or reject.
+
+### D. Swarm Updates
+
+The fleet owner can modify a swarm at any time. Both operations reset status to `REGISTERED`, requiring fresh provider approval:
+
+- **Replace the XOR filter**: `swarmRegistry.updateSwarmFilter(swarmId, newFilterData)`
+- **Change service provider**: `swarmRegistry.updateSwarmProvider(swarmId, newProviderId)`
+
+### E. Swarm Deletion
+
+The fleet owner can permanently remove a swarm:
+
+```solidity
+swarmRegistry.deleteSwarm(swarmId);
+```
+
+### F. Orphan Detection & Cleanup
+
+When a fleet or provider NFT is burned, swarms referencing it become _orphaned_:
+
+- **Check validity**: `swarmRegistry.isSwarmValid(swarmId)` returns `(fleetValid, providerValid)`.
+- **Purge**: Anyone can call `swarmRegistry.purgeOrphanedSwarm(swarmId)` to remove stale state. The caller receives the SSTORE gas refund as an incentive.
+- **Guards**: `acceptSwarm`, `rejectSwarm`, and `checkMembership` all revert with `SwarmOrphaned()` if the swarm's NFTs have been burned.
+
+---
+
+## 3. Off-Chain Logic: Filter & Tag Construction
+
+### Tag Schemas (`TagType`)
+
+The system supports different ways of constructing the unique `TagID` based on the hardware capabilities.
+
+**Enum: `TagType`**
+
+- **`0x00`: IBEACON_PAYLOAD_ONLY**
+ - **Format**: `UUID (16b) || Major (2b) || Minor (2b)`
+ - **Use Case**: When Major/Minor pairs are globally unique (standard iBeacon).
+- **`0x01`: IBEACON_INCLUDES_MAC**
+ - **Format**: `UUID (16b) || Major (2b) || Minor (2b) || MAC (6b)`
+ - **Use Case**: Anti-spoofing logic or Shared Major/Minor fleets.
+ - **CRITICAL: MAC Normalization Rule**:
+ - If MAC is **Public/Static** (Address Type bits `00`): Use the **Real MAC Address**.
+ - If MAC is **Random/Private** (Address Type bits `01` or `11`): Replace with `FF:FF:FF:FF:FF:FF`.
+ - _Why?_ To support rotating privacy MACs while still validating "It's a privacy tag".
+- **`0x02`: VENDOR_ID**
+ - **Format**: `companyID || hash(vendorBytes)`
+ - **Use Case**: Non-iBeacon BLE devices identified by Bluetooth SIG company ID.
+- **`0x03`: GENERIC**
+ - **Use Case**: Catch-all for custom tag identity schemes.
+
+### Filter Construction (The Math)
+
+To verify membership on-chain, the contract uses **3-hash XOR logic**.
+
+1. **Input**: `h = keccak256(TagID)` (where TagID is constructed via schema above).
+2. **Indices** (M = number of fingerprint slots = `filterLength * 8 / fingerprintSize`):
+ - `h1 = uint32(h) % M`
+ - `h2 = uint32(h >> 32) % M`
+ - `h3 = uint32(h >> 64) % M`
+3. **Fingerprint**: `fp = (h >> 96) & ((1 << fingerprintSize) - 1)`
+4. **Verification**: `Filter[h1] ^ Filter[h2] ^ Filter[h3] == fp`
+
+### Swarm ID Derivation
+
+Swarm IDs are **deterministic** — derived from the swarm's core identity:
+
+```
+swarmId = uint256(keccak256(abi.encode(fleetId, providerId, filterData)))
+```
+
+This means the same (fleet, provider, filter) triple always produces the same ID, and duplicate registrations revert with `SwarmAlreadyExists()`. The `computeSwarmId` function is `public pure`, so it can be called off-chain at zero cost via `eth_call`.
+
+---
+
+## 4. Client Discovery Flow (The "EdgeBeaconScanner" Perspective)
+
+A client (mobile phone or gateway) scans a BLE beacon and wants to find its owner and backend service.
+
+### Step 1: Scan & Detect
+
+- EdgeBeaconScanner detects iBeacon: `UUID: E2C5...`, `Major: 1`, `Minor: 50`, `MAC: AA:BB...`.
+
+### Step 2: Identify Fleet
+
+- EdgeBeaconScanner checks `FleetIdentity` contract.
+- Calls `ownerOf(uint256(uint128(uuid)))` — reverts if the fleet does not exist.
+- _(Optional)_ Reads `bonds(tokenId)` to assess fleet credibility.
+- **Result**: "This beacon belongs to Fleet #42".
+
+### Step 3: Find Swarms
+
+- EdgeBeaconScanner reads `swarmRegistry.fleetSwarms(42, index)` for each index (array of swarm IDs for that fleet).
+- **Result**: List of `SwarmID`s: `[101, 102, 105]`.
+
+### Step 4: Membership Check (Find the specific Swarm)
+
+For each SwarmID in the list:
+
+1. **Check Schema**: Get `swarms[101].tagType`.
+2. **Construct Candidate TagHash**:
+ - If `IBEACON_INCLUDES_MAC`: Check MAC byte. If Random, use `FF...FF`.
+ - Buffer = `UUID + Major + Minor + (NormalizedMAC)`.
+ - `hash = keccak256(Buffer)`.
+3. **Verify**:
+ - Call `swarmRegistry.checkMembership(101, hash)`.
+ - Reverts with `SwarmOrphaned()` if the fleet or provider NFT has been burned.
+4. **Result**:
+ - If `true`: **Found it!** This tag is in Swarm 101.
+ - If `false`: Try next swarm.
+
+### Step 5: Service Discovery
+
+Once Membership is confirmed (e.g., in Swarm 101):
+
+1. Get `swarms[101].providerId` (e.g., Provider #99).
+2. Call `ServiceProvider.providerUrls(99)`.
+3. **Result**: `"https://api.acme-tracking.com"`.
+4. **Check Status**: `swarms[101].status`.
+ - If `ACCEPTED` (1): Safe to connect.
+ - If `REGISTERED` (0): Provider has not yet approved — use with caution.
+ - If `REJECTED` (2): Do not connect.
+
+---
+
+## 5. Storage & Deletion Notes
+
+### SwarmRegistryL1 (SSTORE2)
+
+- Filter data is stored as **immutable contract bytecode** via SSTORE2.
+- On `deleteSwarm` / `purgeOrphanedSwarm`, the struct is cleared but the deployed bytecode **cannot be erased** (accepted trade-off of the SSTORE2 pattern).
+
+### SwarmRegistryUniversal (native bytes)
+
+- Filter data is stored in a `mapping(uint256 => bytes)`.
+- On `deleteSwarm` / `purgeOrphanedSwarm`, both the struct and the filter bytes are fully deleted (`delete filterData[swarmId]`), reclaiming storage.
+- Exposes `getFilterData(swarmId)` for off-chain filter retrieval.
+
+### Deletion Performance
+
+Both registries use an **O(1) swap-and-pop** strategy for removing swarms from the `fleetSwarms` array, tracked via the `swarmIndexInFleet` mapping.
+
+---
+
+**Note**: This architecture ensures that an EdgeBeaconScanner can go from **Raw Signal** → **Verified Service URL** entirely on-chain (data-wise), without a centralized indexer, while privacy of the 10,000 other tags in the swarm is preserved.
diff --git a/src/swarms/doc/graph-architecture.md b/src/swarms/doc/graph-architecture.md
new file mode 100644
index 0000000..8bf78f4
--- /dev/null
+++ b/src/swarms/doc/graph-architecture.md
@@ -0,0 +1,104 @@
+# Swarm System — Contract Architecture
+
+```mermaid
+graph TB
+ subgraph NFTs["Identity Layer (ERC-721)"]
+ FI["FleetIdentity
SFID
tokenId = uint128(uuid)"]
+ SP["ServiceProvider
SSV
tokenId = keccak256(url)"]
+ end
+
+ subgraph Registries["Registry Layer"]
+ REG["SwarmRegistry
L1 variant: SSTORE2 filter storage
Universal variant: native bytes storage"]
+ end
+
+ subgraph Actors
+ FO(("Fleet
Owner"))
+ PRV(("Service
Provider"))
+ ANY(("Anyone
(EdgeBeaconScanner / Purger)"))
+ end
+
+ FO -- "registerFleet(uuid, bondAmount)" --> FI
+ FO -- "registerSwarm / update / delete" --> REG
+ PRV -- "registerProvider(url)" --> SP
+ PRV -- "acceptSwarm / rejectSwarm" --> REG
+ ANY -- "checkMembership / purgeOrphanedSwarm" --> REG
+
+ REG -. "ownerOf(fleetId)" .-> FI
+ REG -. "ownerOf(providerId)" .-> SP
+
+ style FI fill:#4a9eff,color:#fff
+ style SP fill:#4a9eff,color:#fff
+ style REG fill:#ff9f43,color:#fff
+ style FO fill:#2ecc71,color:#fff
+ style PRV fill:#2ecc71,color:#fff
+ style ANY fill:#95a5a6,color:#fff
+```
+
+## Swarm Data Model
+
+```mermaid
+classDiagram
+ class FleetIdentity {
+ +IERC20 BOND_TOKEN (immutable)
+ +uint256 MIN_BOND (immutable)
+ +mapping bonds
+ +registerFleet(uuid, bondAmount) tokenId
+ +increaseBond(tokenId, amount)
+ +burn(tokenId)
+ +tokenUuid(tokenId) bytes16
+ +totalSupply() uint256
+ +tokenByIndex(index) uint256
+ +tokenOfOwnerByIndex(owner, index) uint256
+ }
+
+ class ServiceProvider {
+ +mapping providerUrls
+ +registerProvider(url) tokenId
+ +burn(tokenId)
+ }
+
+ class SwarmRegistry {
+ +mapping swarms
+ +mapping fleetSwarms
+ +mapping swarmIndexInFleet
+ +computeSwarmId(fleetId, providerId, filter) swarmId
+ +registerSwarm(fleetId, providerId, filter, fpSize, tagType) swarmId
+ +acceptSwarm(swarmId)
+ +rejectSwarm(swarmId)
+ +updateSwarmFilter(swarmId, newFilter)
+ +updateSwarmProvider(swarmId, newProviderId)
+ +deleteSwarm(swarmId)
+ +isSwarmValid(swarmId) fleetValid, providerValid
+ +purgeOrphanedSwarm(swarmId)
+ +checkMembership(swarmId, tagHash) bool
+ }
+
+ class Swarm {
+ uint256 fleetId
+ uint256 providerId
+ uint8 fingerprintSize
+ TagType tagType
+ SwarmStatus status
+ }
+
+ class SwarmStatus {
+ <>
+ REGISTERED
+ ACCEPTED
+ REJECTED
+ }
+
+ class TagType {
+ <>
+ IBEACON_PAYLOAD_ONLY
+ IBEACON_INCLUDES_MAC
+ VENDOR_ID
+ GENERIC
+ }
+
+ SwarmRegistry --> FleetIdentity : validates ownership
+ SwarmRegistry --> ServiceProvider : validates ownership
+ SwarmRegistry *-- Swarm : stores
+ Swarm --> SwarmStatus
+ Swarm --> TagType
+```
diff --git a/src/swarms/doc/iso3166-2/036-Australia.md b/src/swarms/doc/iso3166-2/036-Australia.md
new file mode 100644
index 0000000..dba2257
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/036-Australia.md
@@ -0,0 +1,18 @@
+# Australia (036)
+
+ISO 3166-1 numeric: **036**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | ACT | Australian Capital Territory |
+| 1 | NSW | New South Wales |
+| 2 | NT | Northern Territory |
+| 3 | QLD | Queensland |
+| 4 | SA | South Australia |
+| 5 | TAS | Tasmania |
+| 6 | VIC | Victoria |
+| 7 | WA | Western Australia |
+
+**Total subdivisions:** 8
diff --git a/src/swarms/doc/iso3166-2/076-Brazil.md b/src/swarms/doc/iso3166-2/076-Brazil.md
new file mode 100644
index 0000000..8ab8d0f
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/076-Brazil.md
@@ -0,0 +1,37 @@
+# Brazil (076)
+
+ISO 3166-1 numeric: **076**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | AC | Acre |
+| 1 | AL | Alagoas |
+| 2 | AP | Amapá |
+| 3 | AM | Amazonas |
+| 4 | BA | Bahia |
+| 5 | CE | Ceará |
+| 6 | DF | Federal District |
+| 7 | ES | Espírito Santo |
+| 8 | GO | Goiás |
+| 9 | MA | Maranhão |
+| 10 | MT | Mato Grosso |
+| 11 | MS | Mato Grosso do Sul |
+| 12 | MG | Minas Gerais |
+| 13 | PA | Pará |
+| 14 | PB | Paraíba |
+| 15 | PR | Paraná |
+| 16 | PE | Pernambuco |
+| 17 | PI | Piauí |
+| 18 | RJ | Rio de Janeiro |
+| 19 | RN | Rio Grande do Norte |
+| 20 | RS | Rio Grande do Sul |
+| 21 | RO | Rondônia |
+| 22 | RR | Roraima |
+| 23 | SC | Santa Catarina |
+| 24 | SP | São Paulo |
+| 25 | SE | Sergipe |
+| 26 | TO | Tocantins |
+
+**Total subdivisions:** 27
diff --git a/src/swarms/doc/iso3166-2/124-Canada.md b/src/swarms/doc/iso3166-2/124-Canada.md
new file mode 100644
index 0000000..e8cadc2
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/124-Canada.md
@@ -0,0 +1,23 @@
+# Canada (124)
+
+ISO 3166-1 numeric: **124**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | AB | Alberta |
+| 1 | BC | British Columbia |
+| 2 | MB | Manitoba |
+| 3 | NB | New Brunswick |
+| 4 | NL | Newfoundland and Labrador |
+| 5 | NT | Northwest Territories |
+| 6 | NS | Nova Scotia |
+| 7 | NU | Nunavut |
+| 8 | ON | Ontario |
+| 9 | PE | Prince Edward Island |
+| 10 | QC | Quebec |
+| 11 | SK | Saskatchewan |
+| 12 | YT | Yukon |
+
+**Total subdivisions:** 13
diff --git a/src/swarms/doc/iso3166-2/156-China.md b/src/swarms/doc/iso3166-2/156-China.md
new file mode 100644
index 0000000..fc19630
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/156-China.md
@@ -0,0 +1,44 @@
+# China (156)
+
+ISO 3166-1 numeric: **156**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | AH | Anhui |
+| 1 | BJ | Beijing |
+| 2 | CQ | Chongqing |
+| 3 | FJ | Fujian |
+| 4 | GS | Gansu |
+| 5 | GD | Guangdong |
+| 6 | GX | Guangxi |
+| 7 | GZ | Guizhou |
+| 8 | HI | Hainan |
+| 9 | HE | Hebei |
+| 10 | HL | Heilongjiang |
+| 11 | HA | Henan |
+| 12 | HB | Hubei |
+| 13 | HN | Hunan |
+| 14 | JS | Jiangsu |
+| 15 | JX | Jiangxi |
+| 16 | JL | Jilin |
+| 17 | LN | Liaoning |
+| 18 | NM | Inner Mongolia |
+| 19 | NX | Ningxia |
+| 20 | QH | Qinghai |
+| 21 | SN | Shaanxi |
+| 22 | SD | Shandong |
+| 23 | SH | Shanghai |
+| 24 | SX | Shanxi |
+| 25 | SC | Sichuan |
+| 26 | TJ | Tianjin |
+| 27 | XJ | Xinjiang |
+| 28 | XZ | Tibet |
+| 29 | YN | Yunnan |
+| 30 | ZJ | Zhejiang |
+| 31 | HK | Hong Kong |
+| 32 | MO | Macao |
+| 33 | TW | Taiwan |
+
+**Total subdivisions:** 34
diff --git a/src/swarms/doc/iso3166-2/250-France.md b/src/swarms/doc/iso3166-2/250-France.md
new file mode 100644
index 0000000..7ba6907
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/250-France.md
@@ -0,0 +1,28 @@
+# France (250)
+
+ISO 3166-1 numeric: **250**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | ARA | Auvergne-Rhône-Alpes |
+| 1 | BFC | Bourgogne-Franche-Comté |
+| 2 | BRE | Brittany |
+| 3 | CVL | Centre-Val de Loire |
+| 4 | COR | Corsica |
+| 5 | GES | Grand Est |
+| 6 | HDF | Hauts-de-France |
+| 7 | IDF | Île-de-France |
+| 8 | NOR | Normandy |
+| 9 | NAQ | Nouvelle-Aquitaine |
+| 10 | OCC | Occitanie |
+| 11 | PDL | Pays de la Loire |
+| 12 | PAC | Provence-Alpes-Côte d'Azur |
+| 13 | GP | Guadeloupe |
+| 14 | MQ | Martinique |
+| 15 | GF | French Guiana |
+| 16 | RE | Réunion |
+| 17 | YT | Mayotte |
+
+**Total subdivisions:** 18
diff --git a/src/swarms/doc/iso3166-2/276-Germany.md b/src/swarms/doc/iso3166-2/276-Germany.md
new file mode 100644
index 0000000..30fb23e
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/276-Germany.md
@@ -0,0 +1,26 @@
+# Germany (276)
+
+ISO 3166-1 numeric: **276**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | BW | Baden-Württemberg |
+| 1 | BY | Bavaria |
+| 2 | BE | Berlin |
+| 3 | BB | Brandenburg |
+| 4 | HB | Bremen |
+| 5 | HH | Hamburg |
+| 6 | HE | Hesse |
+| 7 | MV | Mecklenburg-Vorpommern |
+| 8 | NI | Lower Saxony |
+| 9 | NW | North Rhine-Westphalia |
+| 10 | RP | Rhineland-Palatinate |
+| 11 | SL | Saarland |
+| 12 | SN | Saxony |
+| 13 | ST | Saxony-Anhalt |
+| 14 | SH | Schleswig-Holstein |
+| 15 | TH | Thuringia |
+
+**Total subdivisions:** 16
diff --git a/src/swarms/doc/iso3166-2/356-India.md b/src/swarms/doc/iso3166-2/356-India.md
new file mode 100644
index 0000000..1dc0595
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/356-India.md
@@ -0,0 +1,46 @@
+# India (356)
+
+ISO 3166-1 numeric: **356**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | AN | Andaman and Nicobar Islands |
+| 1 | AP | Andhra Pradesh |
+| 2 | AR | Arunachal Pradesh |
+| 3 | AS | Assam |
+| 4 | BR | Bihar |
+| 5 | CH | Chandigarh |
+| 6 | CT | Chhattisgarh |
+| 7 | DH | Dadra and Nagar Haveli and Daman and Diu |
+| 8 | DL | Delhi |
+| 9 | GA | Goa |
+| 10 | GJ | Gujarat |
+| 11 | HR | Haryana |
+| 12 | HP | Himachal Pradesh |
+| 13 | JK | Jammu and Kashmir |
+| 14 | JH | Jharkhand |
+| 15 | KA | Karnataka |
+| 16 | KL | Kerala |
+| 17 | LA | Ladakh |
+| 18 | LD | Lakshadweep |
+| 19 | MP | Madhya Pradesh |
+| 20 | MH | Maharashtra |
+| 21 | MN | Manipur |
+| 22 | ML | Meghalaya |
+| 23 | MZ | Mizoram |
+| 24 | NL | Nagaland |
+| 25 | OR | Odisha |
+| 26 | PY | Puducherry |
+| 27 | PB | Punjab |
+| 28 | RJ | Rajasthan |
+| 29 | SK | Sikkim |
+| 30 | TN | Tamil Nadu |
+| 31 | TG | Telangana |
+| 32 | TR | Tripura |
+| 33 | UP | Uttar Pradesh |
+| 34 | UT | Uttarakhand |
+| 35 | WB | West Bengal |
+
+**Total subdivisions:** 36
diff --git a/src/swarms/doc/iso3166-2/380-Italy.md b/src/swarms/doc/iso3166-2/380-Italy.md
new file mode 100644
index 0000000..741969a
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/380-Italy.md
@@ -0,0 +1,30 @@
+# Italy (380)
+
+ISO 3166-1 numeric: **380**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | 65 | Abruzzo |
+| 1 | 77 | Basilicata |
+| 2 | 78 | Calabria |
+| 3 | 72 | Campania |
+| 4 | 45 | Emilia-Romagna |
+| 5 | 36 | Friuli-Venezia Giulia |
+| 6 | 62 | Lazio |
+| 7 | 42 | Liguria |
+| 8 | 25 | Lombardy |
+| 9 | 57 | Marche |
+| 10 | 67 | Molise |
+| 11 | 21 | Piedmont |
+| 12 | 75 | Apulia |
+| 13 | 88 | Sardinia |
+| 14 | 82 | Sicily |
+| 15 | 52 | Tuscany |
+| 16 | 32 | Trentino-South Tyrol |
+| 17 | 55 | Umbria |
+| 18 | 23 | Aosta Valley |
+| 19 | 34 | Veneto |
+
+**Total subdivisions:** 20
diff --git a/src/swarms/doc/iso3166-2/392-Japan.md b/src/swarms/doc/iso3166-2/392-Japan.md
new file mode 100644
index 0000000..d8779e4
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/392-Japan.md
@@ -0,0 +1,57 @@
+# Japan (392)
+
+ISO 3166-1 numeric: **392**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | 01 | Hokkaido |
+| 1 | 02 | Aomori |
+| 2 | 03 | Iwate |
+| 3 | 04 | Miyagi |
+| 4 | 05 | Akita |
+| 5 | 06 | Yamagata |
+| 6 | 07 | Fukushima |
+| 7 | 08 | Ibaraki |
+| 8 | 09 | Tochigi |
+| 9 | 10 | Gunma |
+| 10 | 11 | Saitama |
+| 11 | 12 | Chiba |
+| 12 | 13 | Tokyo |
+| 13 | 14 | Kanagawa |
+| 14 | 15 | Niigata |
+| 15 | 16 | Toyama |
+| 16 | 17 | Ishikawa |
+| 17 | 18 | Fukui |
+| 18 | 19 | Yamanashi |
+| 19 | 20 | Nagano |
+| 20 | 21 | Gifu |
+| 21 | 22 | Shizuoka |
+| 22 | 23 | Aichi |
+| 23 | 24 | Mie |
+| 24 | 25 | Shiga |
+| 25 | 26 | Kyoto |
+| 26 | 27 | Osaka |
+| 27 | 28 | Hyogo |
+| 28 | 29 | Nara |
+| 29 | 30 | Wakayama |
+| 30 | 31 | Tottori |
+| 31 | 32 | Shimane |
+| 32 | 33 | Okayama |
+| 33 | 34 | Hiroshima |
+| 34 | 35 | Yamaguchi |
+| 35 | 36 | Tokushima |
+| 36 | 37 | Kagawa |
+| 37 | 38 | Ehime |
+| 38 | 39 | Kochi |
+| 39 | 40 | Fukuoka |
+| 40 | 41 | Saga |
+| 41 | 42 | Nagasaki |
+| 42 | 43 | Kumamoto |
+| 43 | 44 | Oita |
+| 44 | 45 | Miyazaki |
+| 45 | 46 | Kagoshima |
+| 46 | 47 | Okinawa |
+
+**Total subdivisions:** 47
diff --git a/src/swarms/doc/iso3166-2/410-South_Korea.md b/src/swarms/doc/iso3166-2/410-South_Korea.md
new file mode 100644
index 0000000..33d4cec
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/410-South_Korea.md
@@ -0,0 +1,27 @@
+# South Korea (410)
+
+ISO 3166-1 numeric: **410**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | 11 | Seoul |
+| 1 | 26 | Busan |
+| 2 | 27 | Daegu |
+| 3 | 28 | Incheon |
+| 4 | 29 | Gwangju |
+| 5 | 30 | Daejeon |
+| 6 | 31 | Ulsan |
+| 7 | 41 | Gyeonggi |
+| 8 | 42 | Gangwon |
+| 9 | 43 | North Chungcheong |
+| 10 | 44 | South Chungcheong |
+| 11 | 45 | North Jeolla |
+| 12 | 46 | South Jeolla |
+| 13 | 47 | North Gyeongsang |
+| 14 | 48 | South Gyeongsang |
+| 15 | 49 | Jeju |
+| 16 | 50 | Sejong |
+
+**Total subdivisions:** 17
diff --git a/src/swarms/doc/iso3166-2/484-Mexico.md b/src/swarms/doc/iso3166-2/484-Mexico.md
new file mode 100644
index 0000000..c6dc84d
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/484-Mexico.md
@@ -0,0 +1,42 @@
+# Mexico (484)
+
+ISO 3166-1 numeric: **484**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | AGU | Aguascalientes |
+| 1 | BCN | Baja California |
+| 2 | BCS | Baja California Sur |
+| 3 | CAM | Campeche |
+| 4 | CHP | Chiapas |
+| 5 | CHH | Chihuahua |
+| 6 | CMX | Mexico City |
+| 7 | COA | Coahuila |
+| 8 | COL | Colima |
+| 9 | DUR | Durango |
+| 10 | GUA | Guanajuato |
+| 11 | GRO | Guerrero |
+| 12 | HID | Hidalgo |
+| 13 | JAL | Jalisco |
+| 14 | MEX | State of Mexico |
+| 15 | MIC | Michoacán |
+| 16 | MOR | Morelos |
+| 17 | NAY | Nayarit |
+| 18 | NLE | Nuevo León |
+| 19 | OAX | Oaxaca |
+| 20 | PUE | Puebla |
+| 21 | QUE | Querétaro |
+| 22 | ROO | Quintana Roo |
+| 23 | SLP | San Luis Potosí |
+| 24 | SIN | Sinaloa |
+| 25 | SON | Sonora |
+| 26 | TAB | Tabasco |
+| 27 | TAM | Tamaulipas |
+| 28 | TLA | Tlaxcala |
+| 29 | VER | Veracruz |
+| 30 | YUC | Yucatán |
+| 31 | ZAC | Zacatecas |
+
+**Total subdivisions:** 32
diff --git a/src/swarms/doc/iso3166-2/566-Nigeria.md b/src/swarms/doc/iso3166-2/566-Nigeria.md
new file mode 100644
index 0000000..5c16c42
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/566-Nigeria.md
@@ -0,0 +1,47 @@
+# Nigeria (566)
+
+ISO 3166-1 numeric: **566**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | AB | Abia |
+| 1 | FC | Abuja Federal Capital Territory |
+| 2 | AD | Adamawa |
+| 3 | AK | Akwa Ibom |
+| 4 | AN | Anambra |
+| 5 | BA | Bauchi |
+| 6 | BY | Bayelsa |
+| 7 | BE | Benue |
+| 8 | BO | Borno |
+| 9 | CR | Cross River |
+| 10 | DE | Delta |
+| 11 | EB | Ebonyi |
+| 12 | ED | Edo |
+| 13 | EK | Ekiti |
+| 14 | EN | Enugu |
+| 15 | GO | Gombe |
+| 16 | IM | Imo |
+| 17 | JI | Jigawa |
+| 18 | KD | Kaduna |
+| 19 | KN | Kano |
+| 20 | KT | Katsina |
+| 21 | KE | Kebbi |
+| 22 | KO | Kogi |
+| 23 | KW | Kwara |
+| 24 | LA | Lagos |
+| 25 | NA | Nasarawa |
+| 26 | NI | Niger |
+| 27 | OG | Ogun |
+| 28 | ON | Ondo |
+| 29 | OS | Osun |
+| 30 | OY | Oyo |
+| 31 | PL | Plateau |
+| 32 | RI | Rivers |
+| 33 | SO | Sokoto |
+| 34 | TA | Taraba |
+| 35 | YO | Yobe |
+| 36 | ZA | Zamfara |
+
+**Total subdivisions:** 37
diff --git a/src/swarms/doc/iso3166-2/643-Russia.md b/src/swarms/doc/iso3166-2/643-Russia.md
new file mode 100644
index 0000000..b0ee930
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/643-Russia.md
@@ -0,0 +1,93 @@
+# Russia (643)
+
+ISO 3166-1 numeric: **643**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | AD | Adygea, Republic of |
+| 1 | AL | Altai Republic |
+| 2 | ALT | Altai Krai |
+| 3 | AMU | Amur Oblast |
+| 4 | ARK | Arkhangelsk Oblast |
+| 5 | AST | Astrakhan Oblast |
+| 6 | BA | Bashkortostan, Republic of |
+| 7 | BEL | Belgorod Oblast |
+| 8 | BRY | Bryansk Oblast |
+| 9 | BU | Buryatia, Republic of |
+| 10 | CE | Chechen Republic |
+| 11 | CHE | Chelyabinsk Oblast |
+| 12 | CHU | Chukotka Autonomous Okrug |
+| 13 | CU | Chuvash Republic |
+| 14 | DA | Dagestan, Republic of |
+| 15 | IN | Ingushetia, Republic of |
+| 16 | IRK | Irkutsk Oblast |
+| 17 | IVA | Ivanovo Oblast |
+| 18 | KB | Kabardino-Balkar Republic |
+| 19 | KGD | Kaliningrad Oblast |
+| 20 | KL | Kalmykia, Republic of |
+| 21 | KLU | Kaluga Oblast |
+| 22 | KAM | Kamchatka Krai |
+| 23 | KC | Karachay-Cherkess Republic |
+| 24 | KR | Karelia, Republic of |
+| 25 | KEM | Kemerovo Oblast |
+| 26 | KHA | Khabarovsk Krai |
+| 27 | KK | Khakassia, Republic of |
+| 28 | KHM | Khanty-Mansi Autonomous Okrug |
+| 29 | KIR | Kirov Oblast |
+| 30 | KO | Komi Republic |
+| 31 | KOS | Kostroma Oblast |
+| 32 | KDA | Krasnodar Krai |
+| 33 | KYA | Krasnoyarsk Krai |
+| 34 | KGN | Kurgan Oblast |
+| 35 | KRS | Kursk Oblast |
+| 36 | LEN | Leningrad Oblast |
+| 37 | LIP | Lipetsk Oblast |
+| 38 | MAG | Magadan Oblast |
+| 39 | ME | Mari El Republic |
+| 40 | MO | Mordovia, Republic of |
+| 41 | MOS | Moscow Oblast |
+| 42 | MOW | Moscow |
+| 43 | MUR | Murmansk Oblast |
+| 44 | NEN | Nenets Autonomous Okrug |
+| 45 | NIZ | Nizhny Novgorod Oblast |
+| 46 | NGR | Novgorod Oblast |
+| 47 | NVS | Novosibirsk Oblast |
+| 48 | OMS | Omsk Oblast |
+| 49 | ORE | Orenburg Oblast |
+| 50 | ORL | Oryol Oblast |
+| 51 | PNZ | Penza Oblast |
+| 52 | PER | Perm Krai |
+| 53 | PRI | Primorsky Krai |
+| 54 | PSK | Pskov Oblast |
+| 55 | ROS | Rostov Oblast |
+| 56 | RYA | Ryazan Oblast |
+| 57 | SA | Sakha (Yakutia), Republic of |
+| 58 | SAK | Sakhalin Oblast |
+| 59 | SAM | Samara Oblast |
+| 60 | SPE | Saint Petersburg |
+| 61 | SAR | Saratov Oblast |
+| 62 | SE | North Ossetia-Alania, Republic of |
+| 63 | SMO | Smolensk Oblast |
+| 64 | STA | Stavropol Krai |
+| 65 | SVE | Sverdlovsk Oblast |
+| 66 | TAM | Tambov Oblast |
+| 67 | TA | Tatarstan, Republic of |
+| 68 | TOM | Tomsk Oblast |
+| 69 | TUL | Tula Oblast |
+| 70 | TVE | Tver Oblast |
+| 71 | TY | Tuva Republic |
+| 72 | TYU | Tyumen Oblast |
+| 73 | UD | Udmurt Republic |
+| 74 | ULY | Ulyanovsk Oblast |
+| 75 | VLA | Vladimir Oblast |
+| 76 | VGG | Volgograd Oblast |
+| 77 | VLG | Vologda Oblast |
+| 78 | VOR | Voronezh Oblast |
+| 79 | YAN | Yamalo-Nenets Autonomous Okrug |
+| 80 | YAR | Yaroslavl Oblast |
+| 81 | YEV | Jewish Autonomous Oblast |
+| 82 | ZAB | Zabaykalsky Krai |
+
+**Total subdivisions:** 83
diff --git a/src/swarms/doc/iso3166-2/710-South_Africa.md b/src/swarms/doc/iso3166-2/710-South_Africa.md
new file mode 100644
index 0000000..67d256d
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/710-South_Africa.md
@@ -0,0 +1,19 @@
+# South Africa (710)
+
+ISO 3166-1 numeric: **710**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | EC | Eastern Cape |
+| 1 | FS | Free State |
+| 2 | GT | Gauteng |
+| 3 | NL | KwaZulu-Natal |
+| 4 | LP | Limpopo |
+| 5 | MP | Mpumalanga |
+| 6 | NW | North West |
+| 7 | NC | Northern Cape |
+| 8 | WC | Western Cape |
+
+**Total subdivisions:** 9
diff --git a/src/swarms/doc/iso3166-2/724-Spain.md b/src/swarms/doc/iso3166-2/724-Spain.md
new file mode 100644
index 0000000..22f90ce
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/724-Spain.md
@@ -0,0 +1,29 @@
+# Spain (724)
+
+ISO 3166-1 numeric: **724**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | AN | Andalusia |
+| 1 | AR | Aragon |
+| 2 | AS | Asturias, Principality of |
+| 3 | CN | Canary Islands |
+| 4 | CB | Cantabria |
+| 5 | CL | Castile and León |
+| 6 | CM | Castilla-La Mancha |
+| 7 | CT | Catalonia |
+| 8 | CE | Ceuta |
+| 9 | EX | Extremadura |
+| 10 | GA | Galicia |
+| 11 | IB | Balearic Islands |
+| 12 | RI | La Rioja |
+| 13 | MD | Community of Madrid |
+| 14 | ML | Melilla |
+| 15 | MC | Murcia, Region of |
+| 16 | NC | Navarre, Chartered Community of |
+| 17 | PV | Basque Country |
+| 18 | VC | Valencian Community |
+
+**Total subdivisions:** 19
diff --git a/src/swarms/doc/iso3166-2/756-Switzerland.md b/src/swarms/doc/iso3166-2/756-Switzerland.md
new file mode 100644
index 0000000..3a3cde6
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/756-Switzerland.md
@@ -0,0 +1,36 @@
+# Switzerland (756)
+
+ISO 3166-1 numeric: **756**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | AG | Aargau |
+| 1 | AI | Appenzell Innerrhoden |
+| 2 | AR | Appenzell Ausserrhoden |
+| 3 | BE | Bern |
+| 4 | BL | Basel-Landschaft |
+| 5 | BS | Basel-Stadt |
+| 6 | FR | Fribourg |
+| 7 | GE | Geneva |
+| 8 | GL | Glarus |
+| 9 | GR | Graubünden |
+| 10 | JU | Jura |
+| 11 | LU | Lucerne |
+| 12 | NE | Neuchâtel |
+| 13 | NW | Nidwalden |
+| 14 | OW | Obwalden |
+| 15 | SG | St. Gallen |
+| 16 | SH | Schaffhausen |
+| 17 | SO | Solothurn |
+| 18 | SZ | Schwyz |
+| 19 | TG | Thurgau |
+| 20 | TI | Ticino |
+| 21 | UR | Uri |
+| 22 | VD | Vaud |
+| 23 | VS | Valais |
+| 24 | ZG | Zug |
+| 25 | ZH | Zurich |
+
+**Total subdivisions:** 26
diff --git a/src/swarms/doc/iso3166-2/826-United_Kingdom.md b/src/swarms/doc/iso3166-2/826-United_Kingdom.md
new file mode 100644
index 0000000..ed7226c
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/826-United_Kingdom.md
@@ -0,0 +1,182 @@
+# United Kingdom (826)
+
+ISO 3166-1 numeric: **826**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | ENG | England |
+| 1 | NIR | Northern Ireland |
+| 2 | SCT | Scotland |
+| 3 | WLS | Wales |
+| 4 | BKM | Buckinghamshire |
+| 5 | CAM | Cambridgeshire |
+| 6 | CMA | Cumbria |
+| 7 | DBY | Derbyshire |
+| 8 | DEV | Devon |
+| 9 | DOR | Dorset |
+| 10 | ESX | East Sussex |
+| 11 | ESS | Essex |
+| 12 | GLS | Gloucestershire |
+| 13 | HAM | Hampshire |
+| 14 | HRT | Hertfordshire |
+| 15 | KEN | Kent |
+| 16 | LAN | Lancashire |
+| 17 | LEC | Leicestershire |
+| 18 | LIN | Lincolnshire |
+| 19 | NFK | Norfolk |
+| 20 | NYK | North Yorkshire |
+| 21 | NTH | Northamptonshire |
+| 22 | NTT | Nottinghamshire |
+| 23 | OXF | Oxfordshire |
+| 24 | SOM | Somerset |
+| 25 | STS | Staffordshire |
+| 26 | SFK | Suffolk |
+| 27 | SRY | Surrey |
+| 28 | WAR | Warwickshire |
+| 29 | WSX | West Sussex |
+| 30 | WOR | Worcestershire |
+| 31 | LND | London, City of |
+| 32 | BDG | Barking and Dagenham |
+| 33 | BNE | Barnet |
+| 34 | BEX | Bexley |
+| 35 | BEN | Brent |
+| 36 | BRY | Bromley |
+| 37 | CMD | Camden |
+| 38 | CRY | Croydon |
+| 39 | EAL | Ealing |
+| 40 | ENF | Enfield |
+| 41 | GRE | Greenwich |
+| 42 | HCK | Hackney |
+| 43 | HMF | Hammersmith and Fulham |
+| 44 | HRY | Haringey |
+| 45 | HRW | Harrow |
+| 46 | HAV | Havering |
+| 47 | HIL | Hillingdon |
+| 48 | HNS | Hounslow |
+| 49 | ISL | Islington |
+| 50 | KEC | Kensington and Chelsea |
+| 51 | KTT | Kingston upon Thames |
+| 52 | LBH | Lambeth |
+| 53 | LEW | Lewisham |
+| 54 | MRT | Merton |
+| 55 | NWM | Newham |
+| 56 | RDB | Redbridge |
+| 57 | RIC | Richmond upon Thames |
+| 58 | SWK | Southwark |
+| 59 | STN | Sutton |
+| 60 | TWH | Tower Hamlets |
+| 61 | WFT | Waltham Forest |
+| 62 | WND | Wandsworth |
+| 63 | WSM | Westminster |
+| 64 | BNS | Barnsley |
+| 65 | BIR | Birmingham |
+| 66 | BOL | Bolton |
+| 67 | BRD | Bradford |
+| 68 | BRI | Brighton and Hove |
+| 69 | BST | Bristol, City of |
+| 70 | CAL | Calderdale |
+| 71 | COV | Coventry |
+| 72 | DER | Derby |
+| 73 | DUD | Dudley |
+| 74 | GAT | Gateshead |
+| 75 | KIR | Kirklees |
+| 76 | KWL | Knowsley |
+| 77 | LDS | Leeds |
+| 78 | LCE | Leicester |
+| 79 | LIV | Liverpool |
+| 80 | MAN | Manchester |
+| 81 | NET | Newcastle upon Tyne |
+| 82 | NTY | North Tyneside |
+| 83 | OLD | Oldham |
+| 84 | PTE | Peterborough |
+| 85 | PLY | Plymouth |
+| 86 | RCH | Rochdale |
+| 87 | ROT | Rotherham |
+| 88 | SLF | Salford |
+| 89 | SAW | Sandwell |
+| 90 | SFT | Sefton |
+| 91 | SHF | Sheffield |
+| 92 | SOL | Solihull |
+| 93 | STY | South Tyneside |
+| 94 | SHN | Southampton |
+| 95 | SGC | South Gloucestershire |
+| 96 | STH | Southend-on-Sea |
+| 97 | SKP | Stockport |
+| 98 | STE | Stoke-on-Trent |
+| 99 | SND | Sunderland |
+| 100 | TAM | Tameside |
+| 101 | TRF | Trafford |
+| 102 | WKF | Wakefield |
+| 103 | WLL | Walsall |
+| 104 | WGN | Wigan |
+| 105 | WRL | Wirral |
+| 106 | WLV | Wolverhampton |
+| 107 | ABE | Aberdeen City |
+| 108 | ABD | Aberdeenshire |
+| 109 | ANS | Angus |
+| 110 | AGB | Argyll and Bute |
+| 111 | CLK | Clackmannanshire |
+| 112 | DGY | Dumfries and Galloway |
+| 113 | DND | Dundee City |
+| 114 | EAY | East Ayrshire |
+| 115 | EDU | East Dunbartonshire |
+| 116 | ELN | East Lothian |
+| 117 | ERW | East Renfrewshire |
+| 118 | EDH | Edinburgh, City of |
+| 119 | ELS | Eilean Siar |
+| 120 | FAL | Falkirk |
+| 121 | FIF | Fife |
+| 122 | GLG | Glasgow City |
+| 123 | HLD | Highland |
+| 124 | IVC | Inverclyde |
+| 125 | MLN | Midlothian |
+| 126 | MRY | Moray |
+| 127 | NAY | North Ayrshire |
+| 128 | NLK | North Lanarkshire |
+| 129 | ORK | Orkney Islands |
+| 130 | PKN | Perth and Kinross |
+| 131 | RFW | Renfrewshire |
+| 132 | SCB | Scottish Borders |
+| 133 | ZET | Shetland Islands |
+| 134 | SAY | South Ayrshire |
+| 135 | SLK | South Lanarkshire |
+| 136 | STG | Stirling |
+| 137 | WDU | West Dunbartonshire |
+| 138 | WLN | West Lothian |
+| 139 | BGW | Blaenau Gwent |
+| 140 | BGE | Bridgend |
+| 141 | CAY | Caerphilly |
+| 142 | CRF | Cardiff |
+| 143 | CMN | Carmarthenshire |
+| 144 | CGN | Ceredigion |
+| 145 | CWY | Conwy |
+| 146 | DEN | Denbighshire |
+| 147 | FLN | Flintshire |
+| 148 | GWN | Gwynedd |
+| 149 | AGY | Isle of Anglesey |
+| 150 | MTY | Merthyr Tydfil |
+| 151 | MON | Monmouthshire |
+| 152 | NTL | Neath Port Talbot |
+| 153 | NWP | Newport |
+| 154 | PEM | Pembrokeshire |
+| 155 | POW | Powys |
+| 156 | RCT | Rhondda Cynon Taf |
+| 157 | SWA | Swansea |
+| 158 | TOF | Torfaen |
+| 159 | VGL | Vale of Glamorgan |
+| 160 | WRX | Wrexham |
+| 161 | ANT | Antrim and Newtownabbey |
+| 162 | ARD | Ards and North Down |
+| 163 | ABC | Armagh City, Banbridge and Craigavon |
+| 164 | BFS | Belfast |
+| 165 | CCG | Causeway Coast and Glens |
+| 166 | DRS | Derry City and Strabane |
+| 167 | FMO | Fermanagh and Omagh |
+| 168 | LBC | Lisburn and Castlereagh |
+| 169 | MEA | Mid and East Antrim |
+| 170 | MUL | Mid Ulster |
+| 171 | NMD | Newry, Mourne and Down |
+
+**Total subdivisions:** 172
diff --git a/src/swarms/doc/iso3166-2/840-United_States.md b/src/swarms/doc/iso3166-2/840-United_States.md
new file mode 100644
index 0000000..7d0e497
--- /dev/null
+++ b/src/swarms/doc/iso3166-2/840-United_States.md
@@ -0,0 +1,67 @@
+# United States (840)
+
+ISO 3166-1 numeric: **840**
+
+## Admin Area Mappings
+
+| Dense Index | ISO 3166-2 | Name |
+|-------------|------------|------|
+| 0 | AL | Alabama |
+| 1 | AK | Alaska |
+| 2 | AZ | Arizona |
+| 3 | AR | Arkansas |
+| 4 | CA | California |
+| 5 | CO | Colorado |
+| 6 | CT | Connecticut |
+| 7 | DE | Delaware |
+| 8 | FL | Florida |
+| 9 | GA | Georgia |
+| 10 | HI | Hawaii |
+| 11 | ID | Idaho |
+| 12 | IL | Illinois |
+| 13 | IN | Indiana |
+| 14 | IA | Iowa |
+| 15 | KS | Kansas |
+| 16 | KY | Kentucky |
+| 17 | LA | Louisiana |
+| 18 | ME | Maine |
+| 19 | MD | Maryland |
+| 20 | MA | Massachusetts |
+| 21 | MI | Michigan |
+| 22 | MN | Minnesota |
+| 23 | MS | Mississippi |
+| 24 | MO | Missouri |
+| 25 | MT | Montana |
+| 26 | NE | Nebraska |
+| 27 | NV | Nevada |
+| 28 | NH | New Hampshire |
+| 29 | NJ | New Jersey |
+| 30 | NM | New Mexico |
+| 31 | NY | New York |
+| 32 | NC | North Carolina |
+| 33 | ND | North Dakota |
+| 34 | OH | Ohio |
+| 35 | OK | Oklahoma |
+| 36 | OR | Oregon |
+| 37 | PA | Pennsylvania |
+| 38 | RI | Rhode Island |
+| 39 | SC | South Carolina |
+| 40 | SD | South Dakota |
+| 41 | TN | Tennessee |
+| 42 | TX | Texas |
+| 43 | UT | Utah |
+| 44 | VT | Vermont |
+| 45 | VA | Virginia |
+| 46 | WA | Washington |
+| 47 | WV | West Virginia |
+| 48 | WI | Wisconsin |
+| 49 | WY | Wyoming |
+| 50 | DC | District of Columbia |
+| 51 | AS | American Samoa |
+| 52 | GU | Guam |
+| 53 | MP | Northern Mariana Islands |
+| 54 | PR | Puerto Rico |
+| 55 | UM | United States Minor Outlying Islands |
+| 56 | VI | Virgin Islands, U.S. |
+
+**Total subdivisions:** 57
diff --git a/src/swarms/doc/sequence-discovery.md b/src/swarms/doc/sequence-discovery.md
new file mode 100644
index 0000000..ea8cf32
--- /dev/null
+++ b/src/swarms/doc/sequence-discovery.md
@@ -0,0 +1,76 @@
+# Client Discovery Sequence
+
+## Full Discovery Flow: BLE Signal → Service URL
+
+```mermaid
+sequenceDiagram
+ actor EBS as EdgeBeaconScanner (Client)
+ participant FI as FleetIdentity
+ participant SR as SwarmRegistry
+ participant SP as ServiceProvider
+
+ Note over EBS: Detects iBeacon:
UUID, Major, Minor, MAC
+
+ rect rgb(240, 248, 255)
+ Note right of EBS: Step 1 — Identify fleet
+ EBS ->>+ FI: ownerOf(uint128(uuid))
+ FI -->>- EBS: fleet owner address (fleet exists ✓)
+ end
+
+ rect rgb(255, 248, 240)
+ Note right of EBS: Step 2 — Enumerate swarms
+ EBS ->>+ SR: fleetSwarms(fleetId, 0)
+ SR -->>- EBS: swarmId_0
+ EBS ->>+ SR: fleetSwarms(fleetId, 1)
+ SR -->>- EBS: swarmId_1
+ Note over EBS: ... iterate until revert (end of array)
+ end
+
+ rect rgb(240, 255, 240)
+ Note right of EBS: Step 3 — Find matching swarm
+ Note over EBS: Read swarms[swarmId_0].tagType
+ Note over EBS: Construct tagId per schema:
UUID || Major || Minor [|| MAC]
+ Note over EBS: tagHash = keccak256(tagId)
+ EBS ->>+ SR: checkMembership(swarmId_0, tagHash)
+ SR -->>- EBS: false (not in this swarm)
+
+ EBS ->>+ SR: checkMembership(swarmId_1, tagHash)
+ SR -->>- EBS: true ✓ (tag found!)
+ end
+
+ rect rgb(248, 240, 255)
+ Note right of EBS: Step 4 — Resolve service URL
+ EBS ->>+ SR: swarms(swarmId_1)
+ SR -->>- EBS: { providerId, status: ACCEPTED, ... }
+ EBS ->>+ SP: providerUrls(providerId)
+ SP -->>- EBS: "https://api.acme-tracking.com"
+ end
+
+ Note over EBS: Connect to service URL ✓
+```
+
+## Tag Hash Construction by TagType
+
+```mermaid
+flowchart TD
+ A[Read swarm.tagType] --> B{TagType?}
+
+ B -->|IBEACON_PAYLOAD_ONLY| C["tagId = UUID ∥ Major ∥ Minor
(20 bytes)"]
+ B -->|IBEACON_INCLUDES_MAC| D{MAC type?}
+ B -->|VENDOR_ID| E["tagId = companyID ∥ hash(vendorBytes)"]
+ B -->|GENERIC| F["tagId = custom scheme"]
+
+ D -->|Public/Static| G["tagId = UUID ∥ Major ∥ Minor ∥ realMAC
(26 bytes)"]
+ D -->|Random/Private| H["tagId = UUID ∥ Major ∥ Minor ∥ FF:FF:FF:FF:FF:FF
(26 bytes)"]
+
+ C --> I["tagHash = keccak256(tagId)"]
+ G --> I
+ H --> I
+ E --> I
+ F --> I
+
+ I --> J["checkMembership(swarmId, tagHash)"]
+
+ style I fill:#4a9eff,color:#fff
+ style J fill:#2ecc71,color:#fff
+```
diff --git a/src/swarms/doc/sequence-lifecycle.md b/src/swarms/doc/sequence-lifecycle.md
new file mode 100644
index 0000000..cdcedfa
--- /dev/null
+++ b/src/swarms/doc/sequence-lifecycle.md
@@ -0,0 +1,112 @@
+# Swarm Lifecycle: Updates, Deletion & Orphan Cleanup
+
+## Swarm Status State Machine
+
+```mermaid
+stateDiagram-v2
+ [*] --> REGISTERED : registerSwarm()
+
+ REGISTERED --> ACCEPTED : acceptSwarm()
(provider owner)
+ REGISTERED --> REJECTED : rejectSwarm()
(provider owner)
+
+ ACCEPTED --> REGISTERED : updateSwarmFilter()
updateSwarmProvider()
(fleet owner)
+ REJECTED --> REGISTERED : updateSwarmFilter()
updateSwarmProvider()
(fleet owner)
+
+ REGISTERED --> [*] : deleteSwarm() / purge
+ ACCEPTED --> [*] : deleteSwarm() / purge
+ REJECTED --> [*] : deleteSwarm() / purge
+```
+
+## Update Flow (Fleet Owner)
+
+```mermaid
+sequenceDiagram
+ actor FO as Fleet Owner
+ participant SR as SwarmRegistry
+ participant FI as FleetIdentity
+
+ rect rgb(255, 248, 240)
+ Note right of FO: Update XOR filter
+ FO ->>+ SR: updateSwarmFilter(swarmId, newFilter)
+ SR ->>+ FI: ownerOf(fleetId)
+ FI -->>- SR: msg.sender ✓
+ Note over SR: Write new filter data
+ Note over SR: status → REGISTERED
+ SR -->>- FO: ✓ (requires provider re-approval)
+ end
+
+ rect rgb(240, 248, 255)
+ Note right of FO: Update service provider
+ FO ->>+ SR: updateSwarmProvider(swarmId, newProviderId)
+ SR ->>+ FI: ownerOf(fleetId)
+ FI -->>- SR: msg.sender ✓
+ Note over SR: providerId → newProviderId
+ Note over SR: status → REGISTERED
+ SR -->>- FO: ✓ (requires new provider approval)
+ end
+```
+
+## Deletion (Fleet Owner)
+
+```mermaid
+sequenceDiagram
+ actor FO as Fleet Owner
+ participant SR as SwarmRegistry
+ participant FI as FleetIdentity
+
+ FO ->>+ SR: deleteSwarm(swarmId)
+ SR ->>+ FI: ownerOf(fleetId)
+ FI -->>- SR: msg.sender ✓
+ Note over SR: Remove from fleetSwarms[] (O(1) swap-and-pop)
+ Note over SR: delete swarms[swarmId]
+ Note over SR: delete filterData[swarmId] (Universal only)
+ SR -->>- FO: ✓ SwarmDeleted event
+```
+
+## Orphan Detection & Permissionless Cleanup
+
+```mermaid
+sequenceDiagram
+ actor Owner as NFT Owner
+ actor Purger as Anyone
+ participant NFT as FleetIdentity / ServiceProvider
+ participant SR as SwarmRegistry
+
+ rect rgb(255, 240, 240)
+ Note right of Owner: NFT owner burns their token
+ Owner ->>+ NFT: burn(tokenId)
+ Note over NFT: If FleetIdentity: refunds full bond
to token owner via BOND_TOKEN.safeTransfer
+ NFT -->>- Owner: ✓ token destroyed + bond refunded
+ Note over SR: Swarms referencing this token
are now orphaned (lazy invalidation)
+ end
+
+ rect rgb(255, 248, 240)
+ Note right of Purger: Anyone checks validity
+ Purger ->>+ SR: isSwarmValid(swarmId)
+ SR ->>+ NFT: ownerOf(fleetId)
+ NFT -->>- SR: ❌ reverts (burned)
+ SR -->>- Purger: (false, true) — fleet invalid
+ end
+
+ rect rgb(240, 255, 240)
+ Note right of Purger: Anyone purges the orphan
+ Purger ->>+ SR: purgeOrphanedSwarm(swarmId)
+ Note over SR: Confirms at least one NFT is burned
+ Note over SR: Remove from fleetSwarms[] (O(1))
+ Note over SR: delete swarms[swarmId]
+ Note over SR: Gas refund → Purger
+ SR -->>- Purger: ✓ SwarmPurged event
+ end
+```
+
+## Orphan Guards (Automatic Rejection)
+
+```mermaid
+flowchart LR
+ A[acceptSwarm /
rejectSwarm /
checkMembership] --> B{isSwarmValid?}
+ B -->|Both NFTs exist| C[Proceed normally]
+ B -->|Fleet or Provider burned| D["❌ revert SwarmOrphaned()"]
+
+ style D fill:#e74c3c,color:#fff
+ style C fill:#2ecc71,color:#fff
+```
diff --git a/src/swarms/doc/sequence-registration.md b/src/swarms/doc/sequence-registration.md
new file mode 100644
index 0000000..37c4483
--- /dev/null
+++ b/src/swarms/doc/sequence-registration.md
@@ -0,0 +1,79 @@
+# Swarm Registration & Approval Sequence
+
+## One-Time Setup
+
+```mermaid
+sequenceDiagram
+ actor FO as Fleet Owner
+ actor PRV as Service Provider
+ participant FI as FleetIdentity
+ participant SP as ServiceProvider
+
+ Note over FO, SP: One-time setup (independent, any order)
+
+ Note over FO: Approve bond token first:
+ Note over FO: NODL.approve(FleetIdentity, bondAmount)
+
+ FO ->>+ FI: registerFleet(uuid, bondAmount)
+ Note over FI: Requires bondAmount ≥ MIN_BOND
+ Note over FI: Locks bondAmount of BOND_TOKEN
+ FI -->>- FO: fleetId = uint128(uuid)
+
+ PRV ->>+ SP: registerProvider(url)
+ SP -->>- PRV: providerId = keccak256(url)
+```
+
+## Swarm Registration & Approval
+
+```mermaid
+sequenceDiagram
+ actor FO as Fleet Owner
+ actor PRV as Provider Owner
+ participant SR as SwarmRegistry
+ participant FI as FleetIdentity
+ participant SP as ServiceProvider
+
+ Note over FO: Build XOR filter off-chain
from tag set (Peeling Algorithm)
+
+ rect rgb(240, 248, 255)
+ Note right of FO: Registration (fleet owner)
+ FO ->>+ SR: registerSwarm(fleetId, providerId, filter, fpSize, tagType)
+ SR ->>+ FI: ownerOf(fleetId)
+ FI -->>- SR: msg.sender ✓
+ SR ->>+ SP: ownerOf(providerId)
+ SP -->>- SR: address ✓ (exists)
+ Note over SR: swarmId = keccak256(fleetId, providerId, filter)
+ Note over SR: status = REGISTERED
+ SR -->>- FO: swarmId
+ end
+
+ rect rgb(240, 255, 240)
+ Note right of PRV: Approval (provider owner)
+ alt Provider approves
+ PRV ->>+ SR: acceptSwarm(swarmId)
+ SR ->>+ SP: ownerOf(providerId)
+ SP -->>- SR: msg.sender ✓
+ Note over SR: status = ACCEPTED
+ SR -->>- PRV: ✓
+ else Provider rejects
+ PRV ->>+ SR: rejectSwarm(swarmId)
+ SR ->>+ SP: ownerOf(providerId)
+ SP -->>- SR: msg.sender ✓
+ Note over SR: status = REJECTED
+ SR -->>- PRV: ✓
+ end
+ end
+```
+
+## Duplicate Prevention
+
+```mermaid
+sequenceDiagram
+ actor FO as Fleet Owner
+ participant SR as SwarmRegistry
+
+ FO ->>+ SR: registerSwarm(fleetId, providerId, sameFilter, ...)
+ Note over SR: swarmId = keccak256(fleetId, providerId, sameFilter)
+ Note over SR: swarms[swarmId] already exists
+ SR -->>- FO: ❌ revert SwarmAlreadyExists()
+```
diff --git a/test/FleetIdentity.t.sol b/test/FleetIdentity.t.sol
new file mode 100644
index 0000000..60c98c8
--- /dev/null
+++ b/test/FleetIdentity.t.sol
@@ -0,0 +1,2999 @@
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.24;
+
+import "forge-std/Test.sol";
+import "../src/swarms/FleetIdentity.sol";
+import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol";
+
+/// @dev Minimal ERC-20 mock with public mint for testing.
+contract MockERC20 is ERC20 {
+ constructor() ERC20("Mock Bond Token", "MBOND") {}
+
+ function mint(address to, uint256 amount) external {
+ _mint(to, amount);
+ }
+}
+
+/// @dev ERC-20 that returns false on transfer instead of reverting.
+contract BadERC20 is ERC20 {
+ bool public shouldFail;
+
+ constructor() ERC20("Bad Token", "BAD") {}
+
+ function mint(address to, uint256 amount) external {
+ _mint(to, amount);
+ }
+
+ function setFail(bool _fail) external {
+ shouldFail = _fail;
+ }
+
+ function transfer(address to, uint256 amount) public override returns (bool) {
+ if (shouldFail) return false;
+ return super.transfer(to, amount);
+ }
+
+ function transferFrom(address from, address to, uint256 amount) public override returns (bool) {
+ if (shouldFail) return false;
+ return super.transferFrom(from, to, amount);
+ }
+}
+
+contract FleetIdentityTest is Test {
+ FleetIdentity fleet;
+ MockERC20 bondToken;
+
+ address alice = address(0xA);
+ address bob = address(0xB);
+ address carol = address(0xC);
+
+ bytes16 constant UUID_1 = bytes16(keccak256("fleet-alpha"));
+ bytes16 constant UUID_2 = bytes16(keccak256("fleet-bravo"));
+ bytes16 constant UUID_3 = bytes16(keccak256("fleet-charlie"));
+
+ uint256 constant BASE_BOND = 100 ether;
+
+ uint16 constant US = 840;
+ uint16 constant DE = 276;
+ uint16 constant FR = 250;
+ uint16 constant JP = 392;
+ uint16 constant ADMIN_CA = 1;
+ uint16 constant ADMIN_NY = 2;
+
+ event FleetRegistered(
+ address indexed owner,
+ bytes16 indexed uuid,
+ uint256 indexed tokenId,
+ uint32 regionKey,
+ uint256 tierIndex,
+ uint256 bondAmount
+ );
+ event FleetPromoted(
+ uint256 indexed tokenId, uint256 indexed fromTier, uint256 indexed toTier, uint256 additionalBond
+ );
+ event FleetDemoted(uint256 indexed tokenId, uint256 indexed fromTier, uint256 indexed toTier, uint256 bondRefund);
+ event FleetBurned(
+ address indexed owner, uint256 indexed tokenId, uint32 indexed regionKey, uint256 tierIndex, uint256 bondRefund
+ );
+
+ function setUp() public {
+ bondToken = new MockERC20();
+ fleet = new FleetIdentity(address(bondToken), BASE_BOND);
+
+ // Mint enough for all 24 tiers (tier 23 bond = BASE_BOND * 2^23 ≈ 838M ether)
+ // Total for 8 members across 24 tiers ≈ 13.4 billion ether
+ bondToken.mint(alice, 100_000_000_000_000 ether);
+ bondToken.mint(bob, 100_000_000_000_000 ether);
+ bondToken.mint(carol, 100_000_000_000_000 ether);
+
+ vm.prank(alice);
+ bondToken.approve(address(fleet), type(uint256).max);
+ vm.prank(bob);
+ bondToken.approve(address(fleet), type(uint256).max);
+ vm.prank(carol);
+ bondToken.approve(address(fleet), type(uint256).max);
+ }
+
+ // --- Helpers ---
+
+ /// @dev Compute tokenId from (uuid, region) using new encoding
+ function _tokenId(bytes16 uuid, uint32 region) internal pure returns (uint256) {
+ return (uint256(region) << 128) | uint256(uint128(uuid));
+ }
+
+ /// @dev Given a UUID from buildBundle, find tokenId by checking local first, then country
+ function _findTokenId(bytes16 uuid, uint16 cc, uint16 admin) internal view returns (uint256) {
+ uint32 localRegion = (uint32(cc) << 10) | uint32(admin);
+ uint256 localTokenId = _tokenId(uuid, localRegion);
+ // Check if local token exists by trying to get its owner
+ try fleet.ownerOf(localTokenId) returns (address) {
+ return localTokenId;
+ } catch {
+ uint32 countryRegion = uint32(cc);
+ return _tokenId(uuid, countryRegion);
+ }
+ }
+
+ function _uuid(uint256 i) internal pure returns (bytes16) {
+ return bytes16(keccak256(abi.encodePacked("fleet-", i)));
+ }
+
+ function _regionUS() internal pure returns (uint32) {
+ return uint32(US);
+ }
+
+ function _regionDE() internal pure returns (uint32) {
+ return uint32(DE);
+ }
+
+ function _regionUSCA() internal pure returns (uint32) {
+ return (uint32(US) << 10) | uint32(ADMIN_CA);
+ }
+
+ function _regionUSNY() internal pure returns (uint32) {
+ return (uint32(US) << 10) | uint32(ADMIN_NY);
+ }
+
+ function _makeAdminRegion(uint16 cc, uint16 admin) internal pure returns (uint32) {
+ return (uint32(cc) << 10) | uint32(admin);
+ }
+
+ function _registerNCountry(address owner, uint16 cc, uint256 count, uint256 startSeed)
+ internal
+ returns (uint256[] memory ids)
+ {
+ ids = new uint256[](count);
+ for (uint256 i = 0; i < count; i++) {
+ vm.prank(owner);
+ ids[i] = fleet.registerFleetCountry(_uuid(startSeed + i), cc, i / 4); // TIER_CAPACITY = 4
+ }
+ }
+
+ function _registerNCountryAt(address owner, uint16 cc, uint256 count, uint256 startSeed, uint256 tier)
+ internal
+ returns (uint256[] memory ids)
+ {
+ ids = new uint256[](count);
+ for (uint256 i = 0; i < count; i++) {
+ vm.prank(owner);
+ ids[i] = fleet.registerFleetCountry(_uuid(startSeed + i), cc, tier);
+ }
+ }
+
+ function _registerNLocal(address owner, uint16 cc, uint16 admin, uint256 count, uint256 startSeed)
+ internal
+ returns (uint256[] memory ids)
+ {
+ ids = new uint256[](count);
+ for (uint256 i = 0; i < count; i++) {
+ vm.prank(owner);
+ ids[i] = fleet.registerFleetLocal(_uuid(startSeed + i), cc, admin, i / 4); // TIER_CAPACITY = 4
+ }
+ }
+
+ function _registerNLocalAt(address owner, uint16 cc, uint16 admin, uint256 count, uint256 startSeed, uint256 tier)
+ internal
+ returns (uint256[] memory ids)
+ {
+ ids = new uint256[](count);
+ for (uint256 i = 0; i < count; i++) {
+ vm.prank(owner);
+ ids[i] = fleet.registerFleetLocal(_uuid(startSeed + i), cc, admin, tier);
+ }
+ }
+
+ // --- Constructor ---
+
+ function test_constructor_setsImmutables() public view {
+ assertEq(address(fleet.BOND_TOKEN()), address(bondToken));
+ assertEq(fleet.BASE_BOND(), BASE_BOND);
+ assertEq(fleet.name(), "Swarm Fleet Identity");
+ assertEq(fleet.symbol(), "SFID");
+ }
+
+ function test_constructor_constants() public view {
+ assertEq(fleet.TIER_CAPACITY(), 4);
+ assertEq(fleet.MAX_TIERS(), 24);
+ assertEq(fleet.MAX_BONDED_UUID_BUNDLE_SIZE(), 20);
+ assertEq(fleet.COUNTRY_BOND_MULTIPLIER(), 8);
+ }
+
+ // --- tierBond ---
+
+ function test_tierBond_local_tier0() public view {
+ // Local regions get 1× multiplier
+ assertEq(fleet.tierBond(0, false), BASE_BOND);
+ }
+
+ function test_tierBond_country_tier0() public view {
+ // Country regions get 8× multiplier
+ assertEq(fleet.tierBond(0, true), BASE_BOND * 8);
+ }
+
+ function test_tierBond_local_tier1() public view {
+ assertEq(fleet.tierBond(1, false), BASE_BOND * 2);
+ }
+
+ function test_tierBond_country_tier1() public view {
+ assertEq(fleet.tierBond(1, true), BASE_BOND * 8 * 2);
+ }
+
+ function test_tierBond_geometricProgression() public view {
+ for (uint256 i = 1; i <= 5; i++) {
+ assertEq(fleet.tierBond(i, false), fleet.tierBond(i - 1, false) * 2);
+ assertEq(fleet.tierBond(i, true), fleet.tierBond(i - 1, true) * 2);
+ }
+ }
+
+ // --- registerFleetCountry ---
+
+ function test_registerFleetCountry_auto_setsRegionAndTier() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 0);
+
+ assertEq(fleet.tokenRegion(tokenId), _regionUS());
+ assertEq(fleet.fleetTier(tokenId), 0);
+ assertEq(fleet.bonds(tokenId), BASE_BOND * 8); // Country gets 8× multiplier
+ assertEq(fleet.regionTierCount(_regionUS()), 1);
+ }
+
+ function test_registerFleetCountry_explicit_tier() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 3);
+
+ assertEq(fleet.fleetTier(tokenId), 3);
+ assertEq(fleet.bonds(tokenId), fleet.tierBond(3, true));
+ assertEq(fleet.regionTierCount(_regionUS()), 4);
+ }
+
+ function test_RevertIf_registerFleetCountry_invalidCode_zero() public {
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.InvalidCountryCode.selector);
+ fleet.registerFleetCountry(UUID_1, 0, 0);
+ }
+
+ function test_RevertIf_registerFleetCountry_invalidCode_over999() public {
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.InvalidCountryCode.selector);
+ fleet.registerFleetCountry(UUID_1, 1000, 0);
+ }
+
+ // --- registerFleetLocal ---
+
+ function test_registerFleetLocal_setsRegionAndTier() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ assertEq(fleet.tokenRegion(tokenId), _regionUSCA());
+ assertEq(fleet.fleetTier(tokenId), 0);
+ assertEq(fleet.bonds(tokenId), BASE_BOND);
+ }
+
+ function test_registerFleetLocal_explicit_tier() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2);
+
+ assertEq(fleet.fleetTier(tokenId), 2);
+ assertEq(fleet.bonds(tokenId), fleet.tierBond(2, false));
+ }
+
+ function test_RevertIf_registerFleetLocal_invalidCountry() public {
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.InvalidCountryCode.selector);
+ fleet.registerFleetLocal(UUID_1, 0, ADMIN_CA, 0);
+ }
+
+ function test_RevertIf_registerFleetLocal_invalidAdmin_zero() public {
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.InvalidAdminCode.selector);
+ fleet.registerFleetLocal(UUID_1, US, 0, 0);
+ }
+
+ function test_RevertIf_registerFleetLocal_invalidAdmin_over4095() public {
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.InvalidAdminCode.selector);
+ fleet.registerFleetLocal(UUID_1, US, 4096, 0);
+ }
+
+ // --- Per-region independent tier indexing (KEY REQUIREMENT) ---
+
+ function test_perRegionTiers_firstFleetInEachLevelPaysBondWithMultiplier() public {
+ // Country level pays 8× multiplier
+ vm.prank(alice);
+ uint256 c1 = fleet.registerFleetCountry(UUID_1, US, 0);
+ // Local level pays 1× multiplier
+ vm.prank(alice);
+ uint256 l1 = fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0);
+
+ assertEq(fleet.fleetTier(c1), 0);
+ assertEq(fleet.fleetTier(l1), 0);
+
+ assertEq(fleet.bonds(c1), BASE_BOND * 8); // Country gets 8× multiplier
+ assertEq(fleet.bonds(l1), BASE_BOND); // Local gets 1× multiplier
+ }
+
+ function test_perRegionTiers_fillOneRegionDoesNotAffectOthers() public {
+ // Fill US country tier 0 with 4 fleets
+ _registerNCountryAt(alice, US, 4, 0, 0);
+ assertEq(fleet.regionTierCount(_regionUS()), 1);
+ assertEq(fleet.tierMemberCount(_regionUS(), 0), 4);
+
+ // Next US country fleet goes to tier 1
+ vm.prank(bob);
+ uint256 us21 = fleet.registerFleetCountry(_uuid(100), US, 1);
+ assertEq(fleet.fleetTier(us21), 1);
+ assertEq(fleet.bonds(us21), BASE_BOND * 8 * 2); // Country tier 1: 8× * 2^1
+
+ // DE country is independent - can still join tier 0
+ vm.prank(bob);
+ uint256 de1 = fleet.registerFleetCountry(_uuid(200), DE, 0);
+ assertEq(fleet.fleetTier(de1), 0);
+ assertEq(fleet.bonds(de1), BASE_BOND * 8);
+ assertEq(fleet.regionTierCount(_regionDE()), 1);
+
+ // US local is independent - can still join tier 0
+ vm.prank(bob);
+ uint256 usca1 = fleet.registerFleetLocal(_uuid(300), US, ADMIN_CA, 0);
+ assertEq(fleet.fleetTier(usca1), 0);
+ assertEq(fleet.bonds(usca1), BASE_BOND);
+ }
+
+ function test_perRegionTiers_twoCountriesIndependent() public {
+ // Register 4 US country fleets at tier 0
+ _registerNCountryAt(alice, US, 4, 0, 0);
+ assertEq(fleet.tierMemberCount(_regionUS(), 0), 4);
+
+ // Next US country fleet explicitly goes to tier 1
+ vm.prank(bob);
+ uint256 us21 = fleet.registerFleetCountry(_uuid(500), US, 1);
+ assertEq(fleet.fleetTier(us21), 1);
+ assertEq(fleet.bonds(us21), BASE_BOND * 8 * 2); // Country tier 1: 8× * 2^1
+
+ // DE country is independent - can still join tier 0
+ vm.prank(bob);
+ uint256 de1 = fleet.registerFleetCountry(_uuid(600), DE, 0);
+ assertEq(fleet.fleetTier(de1), 0);
+ assertEq(fleet.bonds(de1), BASE_BOND * 8); // Country tier 0: 8× * 2^0
+ }
+
+ function test_perRegionTiers_twoAdminAreasIndependent() public {
+ // Register 4 local fleets at tier 0 in US/CA
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 0, 0);
+ assertEq(fleet.tierMemberCount(_regionUSCA(), 0), 4);
+
+ // NY is independent - can still join tier 0
+ vm.prank(bob);
+ uint256 ny1 = fleet.registerFleetLocal(_uuid(500), US, ADMIN_NY, 0);
+ assertEq(fleet.fleetTier(ny1), 0);
+ assertEq(fleet.bonds(ny1), BASE_BOND);
+ }
+
+ // --- Local inclusion hint tier logic ---
+
+ function test_localInclusionHint_emptyRegionReturnsTier0() public {
+ // No fleets anywhere — localInclusionHint returns tier 0.
+ (uint256 inclusionTier,) = fleet.localInclusionHint(US, ADMIN_CA);
+ assertEq(inclusionTier, 0);
+
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, inclusionTier);
+ assertEq(fleet.fleetTier(tokenId), 0);
+ assertEq(fleet.regionTierCount(_regionUSCA()), 1);
+ }
+
+ function test_localInclusionHint_returnsCheapestInclusionTier() public {
+ // Fill admin-area tier 0 (4 members) so tier 0 is full.
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 0, 0);
+
+ // localInclusionHint should return tier 1 (cheapest tier with capacity).
+ (uint256 inclusionTier,) = fleet.localInclusionHint(US, ADMIN_CA);
+ assertEq(inclusionTier, 1);
+
+ vm.prank(bob);
+ uint256 tokenId = fleet.registerFleetLocal(_uuid(100), US, ADMIN_CA, inclusionTier);
+ assertEq(fleet.fleetTier(tokenId), 1);
+ assertEq(fleet.regionTierCount(_regionUSCA()), 2);
+ }
+
+ // --- promote ---
+
+ function test_promote_next_movesToNextTierInRegion() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 0);
+
+ vm.prank(alice);
+ fleet.promote(tokenId);
+
+ assertEq(fleet.fleetTier(tokenId), 1);
+ assertEq(fleet.bonds(tokenId), fleet.tierBond(1, true));
+ }
+
+ function test_promote_next_pullsBondDifference() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ uint256 balBefore = bondToken.balanceOf(alice);
+ uint256 diff = fleet.tierBond(1, false) - fleet.tierBond(0, false);
+
+ vm.prank(alice);
+ fleet.promote(tokenId);
+
+ assertEq(bondToken.balanceOf(alice), balBefore - diff);
+ }
+
+ function test_reassignTier_promotesWhenTargetHigher() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.prank(alice);
+ fleet.reassignTier(tokenId, 3);
+
+ assertEq(fleet.fleetTier(tokenId), 3);
+ assertEq(fleet.bonds(tokenId), fleet.tierBond(3, false));
+ assertEq(fleet.regionTierCount(_regionUSCA()), 4);
+ }
+
+ function test_promote_emitsEvent() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ uint256 diff = fleet.tierBond(1, false) - fleet.tierBond(0, false);
+
+ vm.expectEmit(true, true, true, true);
+ emit FleetPromoted(tokenId, 0, 1, diff);
+
+ vm.prank(alice);
+ fleet.promote(tokenId);
+ }
+
+ function test_RevertIf_promote_notOwner() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.NotTokenOwner.selector);
+ fleet.promote(tokenId);
+ }
+
+ function test_RevertIf_reassignTier_targetSameAsCurrent() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2);
+
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.TargetTierSameAsCurrent.selector);
+ fleet.reassignTier(tokenId, 2);
+ }
+
+ function test_RevertIf_promote_targetTierFull() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ // Fill tier 1 with 4 members
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(bob);
+ fleet.registerFleetLocal(_uuid(50 + i), US, ADMIN_CA, 1);
+ }
+
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.TierFull.selector);
+ fleet.promote(tokenId);
+ }
+
+ function test_RevertIf_reassignTier_exceedsMaxTiers() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.MaxTiersReached.selector);
+ fleet.reassignTier(tokenId, 50);
+ }
+
+ // --- reassignTier (demote direction) ---
+
+ function test_reassignTier_demotesWhenTargetLower() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetCountry(UUID_1, DE, 3);
+
+ vm.prank(alice);
+ fleet.reassignTier(tokenId, 1);
+
+ assertEq(fleet.fleetTier(tokenId), 1);
+ assertEq(fleet.bonds(tokenId), fleet.tierBond(1, true));
+ }
+
+ function test_reassignTier_demoteRefundsBondDifference() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 3);
+
+ uint256 balBefore = bondToken.balanceOf(alice);
+ uint256 refund = fleet.tierBond(3, false) - fleet.tierBond(1, false);
+
+ vm.prank(alice);
+ fleet.reassignTier(tokenId, 1);
+
+ assertEq(bondToken.balanceOf(alice), balBefore + refund);
+ }
+
+ function test_reassignTier_demoteEmitsEvent() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 3);
+ uint256 refund = fleet.tierBond(3, false) - fleet.tierBond(1, false);
+
+ vm.expectEmit(true, true, true, true);
+ emit FleetDemoted(tokenId, 3, 1, refund);
+
+ vm.prank(alice);
+ fleet.reassignTier(tokenId, 1);
+ }
+
+ function test_reassignTier_demoteTrimsTierCountWhenTopEmpties() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 3);
+ assertEq(fleet.regionTierCount(_regionUSCA()), 4);
+
+ vm.prank(alice);
+ fleet.reassignTier(tokenId, 0);
+ assertEq(fleet.regionTierCount(_regionUSCA()), 1);
+ }
+
+ function test_RevertIf_reassignTier_demoteNotOwner() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2);
+
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.NotTokenOwner.selector);
+ fleet.reassignTier(tokenId, 0);
+ }
+
+ function test_RevertIf_reassignTier_demoteTargetTierFull() public {
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 0, 0);
+
+ vm.prank(bob);
+ uint256 tokenId = fleet.registerFleetLocal(_uuid(100), US, ADMIN_CA, 2);
+
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.TierFull.selector);
+ fleet.reassignTier(tokenId, 0);
+ }
+
+ function test_RevertIf_reassignTier_promoteNotOwner() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.NotTokenOwner.selector);
+ fleet.reassignTier(tokenId, 3);
+ }
+
+ // --- burn ---
+
+ function test_burn_refundsTierBond() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ uint256 balBefore = bondToken.balanceOf(alice);
+
+ vm.prank(alice);
+ fleet.burn(tokenId);
+
+ assertEq(bondToken.balanceOf(alice), balBefore + BASE_BOND);
+ assertEq(bondToken.balanceOf(address(fleet)), 0);
+ assertEq(fleet.bonds(tokenId), 0);
+ }
+
+ function test_burn_emitsEvent() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.expectEmit(true, true, true, true);
+ emit FleetBurned(alice, tokenId, _regionUSCA(), 0, BASE_BOND);
+
+ vm.prank(alice);
+ fleet.burn(tokenId);
+ }
+
+ function test_burn_trimsTierCount() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 3);
+ assertEq(fleet.regionTierCount(_regionUS()), 4);
+
+ vm.prank(alice);
+ fleet.burn(tokenId);
+ assertEq(fleet.regionTierCount(_regionUS()), 0);
+ }
+
+ function test_burn_allowsReregistration_sameRegion() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.prank(alice);
+ fleet.burn(tokenId);
+
+ // Same UUID can be re-registered in same region, same tokenId
+ vm.prank(bob);
+ uint256 newId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ assertEq(newId, tokenId);
+ assertEq(fleet.tokenRegion(newId), _regionUSCA());
+ }
+
+ function test_multiRegion_sameUuidCanRegisterInDifferentRegions() public {
+ // Same UUID can be registered in multiple regions simultaneously (by SAME owner, SAME level)
+ vm.prank(alice);
+ uint256 localId1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.prank(alice);
+ uint256 localId2 = fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0);
+
+ // Different tokenIds for different regions
+ assertTrue(localId1 != localId2, "Different regions should have different tokenIds");
+
+ // Both have same UUID but different regions
+ assertEq(fleet.tokenUuid(localId1), UUID_1);
+ assertEq(fleet.tokenUuid(localId2), UUID_1);
+ assertEq(fleet.tokenRegion(localId1), _regionUSCA());
+ assertEq(fleet.tokenRegion(localId2), _makeAdminRegion(DE, ADMIN_CA));
+
+ // Both owned by alice
+ assertEq(fleet.ownerOf(localId1), alice);
+ assertEq(fleet.ownerOf(localId2), alice);
+ }
+
+ function test_RevertIf_burn_notOwner() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.NotTokenOwner.selector);
+ fleet.burn(tokenId);
+ }
+
+ // --- localInclusionHint ---
+
+ function test_localInclusionHint_emptyRegion() public view {
+ (uint256 tier, uint256 bond) = fleet.localInclusionHint(US, ADMIN_CA);
+ assertEq(tier, 0);
+ assertEq(bond, BASE_BOND);
+ }
+
+ function test_localInclusionHint_afterFillingAdminTier0() public {
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 0, 0);
+
+ // Admin tier 0 full → cheapest inclusion is tier 1.
+ (uint256 tier, uint256 bond) = fleet.localInclusionHint(US, ADMIN_CA);
+ assertEq(tier, 1);
+ assertEq(bond, BASE_BOND * 2);
+ }
+
+ // --- highestActiveTier ---
+
+ function test_highestActiveTier_noFleets() public view {
+ assertEq(fleet.highestActiveTier(_regionUS()), 0);
+ assertEq(fleet.highestActiveTier(_regionUSCA()), 0);
+ }
+
+ function test_highestActiveTier_afterRegistrations() public {
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 3);
+ assertEq(fleet.highestActiveTier(_regionUS()), 3);
+
+ // Different region still at 0
+ assertEq(fleet.highestActiveTier(_regionDE()), 0);
+ }
+
+ // --- EdgeBeaconScanner helpers ---
+
+ function test_tierMemberCount_perRegion() public {
+ _registerNLocalAt(alice, US, ADMIN_CA, 3, 0, 0);
+ _registerNCountryAt(bob, US, 4, 100, 0);
+
+ assertEq(fleet.tierMemberCount(_regionUSCA(), 0), 3);
+ assertEq(fleet.tierMemberCount(_regionUS(), 0), 4);
+ }
+
+ function test_getTierMembers_perRegion() public {
+ vm.prank(alice);
+ uint256 usId = fleet.registerFleetCountry(UUID_1, US, 0);
+
+ vm.prank(bob);
+ uint256 uscaId = fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0);
+
+ uint256[] memory usMembers = fleet.getTierMembers(_regionUS(), 0);
+ assertEq(usMembers.length, 1);
+ assertEq(usMembers[0], usId);
+
+ uint256[] memory uscaMembers = fleet.getTierMembers(_regionUSCA(), 0);
+ assertEq(uscaMembers.length, 1);
+ assertEq(uscaMembers[0], uscaId);
+ }
+
+ function test_getTierUuids_perRegion() public {
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+
+ vm.prank(bob);
+ fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0);
+
+ bytes16[] memory usUUIDs = fleet.getTierUuids(_regionUS(), 0);
+ assertEq(usUUIDs.length, 1);
+ assertEq(usUUIDs[0], UUID_1);
+
+ bytes16[] memory uscaUUIDs = fleet.getTierUuids(_regionUSCA(), 0);
+ assertEq(uscaUUIDs.length, 1);
+ assertEq(uscaUUIDs[0], UUID_2);
+ }
+
+ // --- Region indexes ---
+
+ function test_activeCountries_addedOnRegistration() public {
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+ vm.prank(bob);
+ fleet.registerFleetCountry(UUID_2, DE, 0);
+
+ uint16[] memory countries = fleet.getActiveCountries();
+ assertEq(countries.length, 2);
+ }
+
+ function test_activeCountries_removedWhenAllBurned() public {
+ vm.prank(alice);
+ uint256 id1 = fleet.registerFleetCountry(UUID_1, US, 0);
+
+ uint16[] memory before_ = fleet.getActiveCountries();
+ assertEq(before_.length, 1);
+
+ vm.prank(alice);
+ fleet.burn(id1);
+
+ uint16[] memory after_ = fleet.getActiveCountries();
+ assertEq(after_.length, 0);
+ }
+
+ function test_activeCountries_notDuplicated() public {
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+ vm.prank(bob);
+ fleet.registerFleetCountry(UUID_2, US, 0);
+
+ uint16[] memory countries = fleet.getActiveCountries();
+ assertEq(countries.length, 1);
+ assertEq(countries[0], US);
+ }
+
+ function test_activeAdminAreas_trackedCorrectly() public {
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ vm.prank(bob);
+ fleet.registerFleetLocal(UUID_2, US, ADMIN_NY, 0);
+
+ uint32[] memory areas = fleet.getActiveAdminAreas();
+ assertEq(areas.length, 2);
+ }
+
+ function test_activeAdminAreas_removedWhenAllBurned() public {
+ vm.prank(alice);
+ uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ assertEq(fleet.getActiveAdminAreas().length, 1);
+
+ vm.prank(alice);
+ fleet.burn(id1);
+
+ assertEq(fleet.getActiveAdminAreas().length, 0);
+ }
+
+ // --- Region key helpers ---
+
+ function test_countryRegionKey() public view {
+ assertEq(fleet.countryRegionKey(US), uint32(US));
+ assertEq(fleet.countryRegionKey(DE), uint32(DE));
+ }
+
+ function test_adminRegionKey() public view {
+ assertEq(fleet.adminRegionKey(US, ADMIN_CA), (uint32(US) << 10) | uint32(ADMIN_CA));
+ }
+
+ function test_regionKeyNoOverlap_countryVsAdmin() public pure {
+ uint32 maxCountry = 999;
+ uint32 minAdmin = (uint32(1) << 10) | uint32(1);
+ assertTrue(minAdmin > maxCountry);
+ }
+
+ // --- tokenUuid / bonds ---
+
+ function test_tokenUuid_roundTrip() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ assertEq(fleet.tokenUuid(tokenId), UUID_1);
+ }
+
+ function test_bonds_returnsTierBond() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ assertEq(fleet.bonds(tokenId), BASE_BOND);
+ }
+
+ function test_bonds_zeroForNonexistentToken() public view {
+ assertEq(fleet.bonds(99999), 0);
+ }
+
+ // --- ERC721Enumerable ---
+
+ function test_enumerable_totalSupply() public {
+ assertEq(fleet.totalSupply(), 0);
+
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+ assertEq(fleet.totalSupply(), 1);
+
+ vm.prank(bob);
+ fleet.registerFleetCountry(UUID_2, DE, 0);
+ assertEq(fleet.totalSupply(), 2);
+
+ vm.prank(carol);
+ fleet.registerFleetLocal(UUID_3, US, ADMIN_CA, 0);
+ assertEq(fleet.totalSupply(), 3);
+ }
+
+ function test_enumerable_supportsInterface() public view {
+ assertTrue(fleet.supportsInterface(0x780e9d63));
+ assertTrue(fleet.supportsInterface(0x80ac58cd));
+ assertTrue(fleet.supportsInterface(0x01ffc9a7));
+ }
+
+ // --- Bond accounting ---
+
+ function test_bondAccounting_acrossRegions() public {
+ vm.prank(alice);
+ uint256 c1 = fleet.registerFleetCountry(UUID_1, US, 0);
+ vm.prank(bob);
+ uint256 c2 = fleet.registerFleetCountry(UUID_2, DE, 0);
+ vm.prank(carol);
+ uint256 l1 = fleet.registerFleetLocal(UUID_3, US, ADMIN_CA, 0);
+
+ // c1 and c2 are country (8× multiplier), l1 is local (1× multiplier)
+ assertEq(bondToken.balanceOf(address(fleet)), BASE_BOND * 8 + BASE_BOND * 8 + BASE_BOND);
+
+ vm.prank(bob);
+ fleet.burn(c2);
+ assertEq(bondToken.balanceOf(address(fleet)), BASE_BOND * 8 + BASE_BOND);
+
+ vm.prank(alice);
+ fleet.burn(c1);
+ vm.prank(carol);
+ fleet.burn(l1);
+ assertEq(bondToken.balanceOf(address(fleet)), 0);
+ }
+
+ function test_bondAccounting_reassignTierRoundTrip() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ uint256 balStart = bondToken.balanceOf(alice);
+
+ vm.prank(alice);
+ fleet.reassignTier(tokenId, 3);
+
+ vm.prank(alice);
+ fleet.reassignTier(tokenId, 0);
+
+ assertEq(bondToken.balanceOf(alice), balStart);
+ assertEq(fleet.bonds(tokenId), BASE_BOND);
+ }
+
+ // --- ERC-20 edge case ---
+
+ function test_RevertIf_bondToken_transferFromReturnsFalse() public {
+ BadERC20 badToken = new BadERC20();
+ FleetIdentity f = new FleetIdentity(address(badToken), BASE_BOND);
+
+ badToken.mint(alice, 1_000 ether);
+ vm.prank(alice);
+ badToken.approve(address(f), type(uint256).max);
+
+ badToken.setFail(true);
+
+ vm.prank(alice);
+ vm.expectRevert();
+ f.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ }
+
+ // --- Transfer preserves region and tier ---
+
+ function test_transfer_regionAndTierStayWithToken() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 2);
+
+ vm.prank(alice);
+ fleet.transferFrom(alice, bob, tokenId);
+
+ assertEq(fleet.tokenRegion(tokenId), _regionUS());
+ assertEq(fleet.fleetTier(tokenId), 2);
+ assertEq(fleet.bonds(tokenId), fleet.tierBond(2, true));
+
+ uint256 bobBefore = bondToken.balanceOf(bob);
+ vm.prank(bob);
+ fleet.burn(tokenId);
+ assertEq(bondToken.balanceOf(bob), bobBefore + fleet.tierBond(2, true));
+ }
+
+ // --- Tier lifecycle ---
+
+ function test_tierLifecycle_fillBurnBackfillPerRegion() public {
+ // Register 4 US country fleets at tier 0 (fills capacity)
+ uint256[] memory usIds = _registerNCountryAt(alice, US, 4, 0, 0);
+ assertEq(fleet.tierMemberCount(_regionUS(), 0), 4);
+
+ // Next country fleet goes to tier 1
+ vm.prank(bob);
+ uint256 us5 = fleet.registerFleetCountry(_uuid(100), US, 1);
+ assertEq(fleet.fleetTier(us5), 1);
+
+ // Burn from tier 0 — now tier 0 has 3, tier 1 has 1.
+ vm.prank(alice);
+ fleet.burn(usIds[3]);
+
+ // Explicitly register into tier 1.
+ vm.prank(carol);
+ uint256 backfill = fleet.registerFleetCountry(_uuid(200), US, 1);
+ assertEq(fleet.fleetTier(backfill), 1);
+ assertEq(fleet.tierMemberCount(_regionUS(), 1), 2);
+ }
+
+ // --- Edge cases ---
+
+ function test_zeroBaseBond_allowsRegistration() public {
+ FleetIdentity f = new FleetIdentity(address(bondToken), 0);
+ vm.prank(alice);
+ bondToken.approve(address(f), type(uint256).max);
+
+ vm.prank(alice);
+ uint256 tokenId = f.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ assertEq(f.bonds(tokenId), 0);
+
+ vm.prank(alice);
+ f.burn(tokenId);
+ }
+
+ // --- Fuzz Tests ---
+
+ function testFuzz_registerFleetCountry_validCountryCodes(uint16 cc) public {
+ cc = uint16(bound(cc, 1, 999));
+
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetCountry(UUID_1, cc, 0);
+
+ assertEq(fleet.tokenRegion(tokenId), uint32(cc));
+ assertEq(fleet.fleetTier(tokenId), 0);
+ assertEq(fleet.bonds(tokenId), BASE_BOND * 8); // Country gets 8× multiplier
+ }
+
+ function testFuzz_registerFleetLocal_validCodes(uint16 cc, uint16 admin) public {
+ cc = uint16(bound(cc, 1, 999));
+ admin = uint16(bound(admin, 1, 255));
+
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, cc, admin, 0);
+
+ uint32 expectedRegion = (uint32(cc) << 10) | uint32(admin);
+ assertEq(fleet.tokenRegion(tokenId), expectedRegion);
+ assertEq(fleet.fleetTier(tokenId), 0);
+ assertEq(fleet.bonds(tokenId), BASE_BOND);
+ }
+
+ function testFuzz_promote_onlyOwner(address caller) public {
+ vm.assume(caller != alice);
+ vm.assume(caller != address(0));
+
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.prank(caller);
+ vm.expectRevert(FleetIdentity.NotTokenOwner.selector);
+ fleet.promote(tokenId);
+ }
+
+ function testFuzz_burn_onlyOwner(address caller) public {
+ vm.assume(caller != alice);
+ vm.assume(caller != address(0));
+
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.prank(caller);
+ vm.expectRevert(FleetIdentity.NotTokenOwner.selector);
+ fleet.burn(tokenId);
+ }
+
+ // ══════════════════════════════════════════════
+ // UUID Ownership Enforcement Tests
+ // ══════════════════════════════════════════════
+
+ function test_uuidOwner_setOnFirstRegistration() public {
+ assertEq(fleet.uuidOwner(UUID_1), address(0), "No owner before registration");
+ assertEq(fleet.uuidTokenCount(UUID_1), 0, "No tokens before registration");
+
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ assertEq(fleet.uuidOwner(UUID_1), alice, "Alice is UUID owner after registration");
+ assertEq(fleet.uuidTokenCount(UUID_1), 1, "Token count is 1 after registration");
+ }
+
+ function test_uuidOwner_sameOwnerCanRegisterMultipleRegions() public {
+ // Alice registers UUID_1 in first region (same level across all)
+ vm.prank(alice);
+ uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ // Alice can register same UUID in second region (same level)
+ vm.prank(alice);
+ uint256 id2 = fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0);
+
+ // And a third region (same level)
+ vm.prank(alice);
+ uint256 id3 = fleet.registerFleetLocal(UUID_1, FR, ADMIN_CA, 0);
+
+ assertEq(fleet.uuidOwner(UUID_1), alice, "Alice is still UUID owner");
+ assertEq(fleet.uuidTokenCount(UUID_1), 3, "Token count is 3");
+ assertEq(fleet.ownerOf(id1), alice);
+ assertEq(fleet.ownerOf(id2), alice);
+ assertEq(fleet.ownerOf(id3), alice);
+ }
+
+ function test_RevertIf_differentOwnerRegistersSameUuid_local() public {
+ // Alice registers UUID_1 first
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ // Bob tries to register same UUID in different region → revert
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.UuidOwnerMismatch.selector);
+ fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0);
+ }
+
+ function test_RevertIf_differentOwnerRegistersSameUuid_country() public {
+ // Alice registers UUID_1 first
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+
+ // Bob tries to register same UUID in different country → revert
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.UuidOwnerMismatch.selector);
+ fleet.registerFleetCountry(UUID_1, DE, 0);
+ }
+
+ function test_RevertIf_differentOwnerRegistersSameUuid_crossLevel() public {
+ // Alice registers UUID_1 at country level
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+
+ // Bob tries to register same UUID at local level → revert
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.UuidOwnerMismatch.selector);
+ fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0);
+ }
+
+ function test_uuidOwner_clearedWhenAllTokensBurned() public {
+ // Alice registers UUID_1 in one region
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ assertEq(fleet.uuidOwner(UUID_1), alice);
+ assertEq(fleet.uuidTokenCount(UUID_1), 1);
+
+ // Burn the token
+ vm.prank(alice);
+ fleet.burn(tokenId);
+
+ // UUID owner should be cleared
+ assertEq(fleet.uuidOwner(UUID_1), address(0), "UUID owner cleared after all tokens burned");
+ assertEq(fleet.uuidTokenCount(UUID_1), 0, "Token count is 0 after all burned");
+ }
+
+ function test_uuidOwner_notClearedWhileTokensRemain() public {
+ // Alice registers UUID_1 in two regions (same level)
+ vm.prank(alice);
+ uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.prank(alice);
+ uint256 id2 = fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0);
+
+ assertEq(fleet.uuidTokenCount(UUID_1), 2);
+
+ // Burn first token
+ vm.prank(alice);
+ fleet.burn(id1);
+
+ // UUID owner should still be alice (one token remains)
+ assertEq(fleet.uuidOwner(UUID_1), alice, "UUID owner still alice with remaining token");
+ assertEq(fleet.uuidTokenCount(UUID_1), 1, "Token count decremented to 1");
+
+ // Burn second token
+ vm.prank(alice);
+ fleet.burn(id2);
+
+ // Now UUID owner should be cleared
+ assertEq(fleet.uuidOwner(UUID_1), address(0), "UUID owner cleared after all burned");
+ assertEq(fleet.uuidTokenCount(UUID_1), 0);
+ }
+
+ function test_uuidOwner_differentUuidsHaveDifferentOwners() public {
+ // Alice registers UUID_1
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ // Bob registers UUID_2 (different UUID, no conflict)
+ vm.prank(bob);
+ fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0);
+
+ assertEq(fleet.uuidOwner(UUID_1), alice);
+ assertEq(fleet.uuidOwner(UUID_2), bob);
+ }
+
+ function test_uuidOwner_canReRegisterAfterBurningAll() public {
+ // Alice registers and burns UUID_1
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ vm.prank(alice);
+ fleet.burn(tokenId);
+
+ // Bob can now register the same UUID (uuid owner was cleared)
+ vm.prank(bob);
+ uint256 newTokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ assertEq(fleet.uuidOwner(UUID_1), bob, "Bob is now UUID owner");
+ assertEq(fleet.uuidTokenCount(UUID_1), 1);
+ assertEq(fleet.ownerOf(newTokenId), bob);
+ }
+
+ function test_uuidOwner_transferDoesNotChangeUuidOwner() public {
+ // Alice registers UUID_1
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ assertEq(fleet.uuidOwner(UUID_1), alice);
+
+ // Alice transfers to Bob
+ vm.prank(alice);
+ fleet.transferFrom(alice, bob, tokenId);
+
+ // Token owner changed but UUID owner did not
+ assertEq(fleet.ownerOf(tokenId), bob);
+ assertEq(fleet.uuidOwner(UUID_1), alice, "UUID owner still alice after transfer");
+ }
+
+ function test_RevertIf_transferRecipientTriesToRegisterSameUuid() public {
+ // Alice registers UUID_1
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ // Alice transfers to Bob
+ vm.prank(alice);
+ fleet.transferFrom(alice, bob, tokenId);
+
+ // Bob now owns tokenId, but cannot register NEW tokens for UUID_1
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.UuidOwnerMismatch.selector);
+ fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0);
+ }
+
+ function test_uuidOwner_originalOwnerCanStillRegisterAfterTransfer() public {
+ // Alice registers UUID_1 in one region
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ // Alice transfers to Bob
+ vm.prank(alice);
+ fleet.transferFrom(alice, bob, tokenId);
+
+ // Alice can still register UUID_1 in new regions (she's still uuidOwner, same level)
+ vm.prank(alice);
+ uint256 newTokenId = fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0);
+
+ assertEq(fleet.ownerOf(newTokenId), alice);
+ assertEq(fleet.uuidTokenCount(UUID_1), 2);
+ }
+
+ function testFuzz_uuidOwner_enforcedAcrossAllRegions(uint16 cc1, uint16 cc2, uint16 admin1, uint16 admin2) public {
+ cc1 = uint16(bound(cc1, 1, 999));
+ cc2 = uint16(bound(cc2, 1, 999));
+ admin1 = uint16(bound(admin1, 1, 255));
+ admin2 = uint16(bound(admin2, 1, 255));
+
+ // Alice registers first
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, cc1, admin1, 0);
+
+ // Bob cannot register same UUID anywhere
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.UuidOwnerMismatch.selector);
+ fleet.registerFleetLocal(UUID_1, cc2, admin2, 0);
+
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.UuidOwnerMismatch.selector);
+ fleet.registerFleetCountry(UUID_1, cc2, 0);
+ }
+
+ function testFuzz_uuidOwner_multiRegionTokenCount(uint8 regionCount) public {
+ regionCount = uint8(bound(regionCount, 1, 10));
+
+ for (uint8 i = 0; i < regionCount; i++) {
+ uint16 cc = uint16(1 + i);
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, cc, 0);
+ }
+
+ assertEq(fleet.uuidTokenCount(UUID_1), regionCount);
+ assertEq(fleet.uuidOwner(UUID_1), alice);
+ }
+
+ function testFuzz_uuidOwner_partialBurnPreservesOwnership(uint8 burnCount) public {
+ uint8 totalTokens = 5;
+ burnCount = uint8(bound(burnCount, 1, totalTokens - 1));
+
+ // Register tokens
+ uint256[] memory tokenIds = new uint256[](totalTokens);
+ for (uint8 i = 0; i < totalTokens; i++) {
+ uint16 cc = uint16(1 + i);
+ vm.prank(alice);
+ tokenIds[i] = fleet.registerFleetCountry(UUID_1, cc, 0);
+ }
+
+ assertEq(fleet.uuidTokenCount(UUID_1), totalTokens);
+
+ // Burn some tokens
+ for (uint8 i = 0; i < burnCount; i++) {
+ vm.prank(alice);
+ fleet.burn(tokenIds[i]);
+ }
+
+ // Owner still alice, count decreased
+ assertEq(fleet.uuidOwner(UUID_1), alice);
+ assertEq(fleet.uuidTokenCount(UUID_1), totalTokens - burnCount);
+ }
+
+ // ══════════════════════════════════════════════
+ // UUID Level Enforcement Tests
+ // ══════════════════════════════════════════════
+
+ function test_uuidLevel_setOnFirstRegistration_local() public {
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 0, "No level before registration");
+
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 1, "Level is 1 (local) after local registration");
+ }
+
+ function test_uuidLevel_setOnFirstRegistration_country() public {
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 0, "No level before registration");
+
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 2, "Level is 2 (country) after country registration");
+ }
+
+ function test_RevertIf_crossLevelRegistration_localThenCountry() public {
+ // Alice registers UUID_1 at local level
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ // Alice tries to register same UUID at country level → revert
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.UuidLevelMismatch.selector);
+ fleet.registerFleetCountry(UUID_1, DE, 0);
+ }
+
+ function test_RevertIf_crossLevelRegistration_countryThenLocal() public {
+ // Alice registers UUID_1 at country level
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+
+ // Alice tries to register same UUID at local level → revert
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.UuidLevelMismatch.selector);
+ fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0);
+ }
+
+ function test_uuidLevel_clearedOnLastTokenBurn() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 1);
+
+ vm.prank(alice);
+ fleet.burn(tokenId);
+
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 0, "Level cleared after all tokens burned");
+ }
+
+ function test_uuidLevel_notClearedWhileTokensRemain() public {
+ vm.prank(alice);
+ uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0);
+
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 1);
+
+ vm.prank(alice);
+ fleet.burn(id1);
+
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 1, "Level preserved while tokens remain");
+ }
+
+ function test_uuidLevel_canChangeLevelAfterBurningAll() public {
+ // Register as local
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 1);
+
+ // Burn
+ vm.prank(alice);
+ fleet.burn(tokenId);
+
+ // Now can register as country
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 2);
+ }
+
+ // ══════════════════════════════════════════════
+ // Owned-Only Mode Tests
+ // ══════════════════════════════════════════════
+
+ function test_claimUuid_basic() public {
+ uint256 aliceBalanceBefore = bondToken.balanceOf(alice);
+
+ vm.prank(alice);
+ uint256 tokenId = fleet.claimUuid(UUID_1);
+
+ // Token minted
+ assertEq(fleet.ownerOf(tokenId), alice);
+ assertEq(fleet.tokenUuid(tokenId), UUID_1);
+ assertEq(fleet.tokenRegion(tokenId), 0); // OWNED_REGION_KEY
+
+ // UUID ownership set
+ assertEq(fleet.uuidOwner(UUID_1), alice);
+ assertEq(fleet.uuidTokenCount(UUID_1), 1);
+ assertTrue(fleet.isOwnedOnly(UUID_1));
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 3); // Owned
+
+ // Bond pulled
+ assertEq(aliceBalanceBefore - bondToken.balanceOf(alice), BASE_BOND);
+
+ // bonds() returns BASE_BOND for owned-only
+ assertEq(fleet.bonds(tokenId), BASE_BOND);
+ }
+
+ function test_RevertIf_claimUuid_alreadyOwned() public {
+ vm.prank(alice);
+ fleet.claimUuid(UUID_1);
+
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.UuidAlreadyOwned.selector);
+ fleet.claimUuid(UUID_1);
+ }
+
+ function test_RevertIf_claimUuid_alreadyRegistered() public {
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.UuidAlreadyOwned.selector);
+ fleet.claimUuid(UUID_1);
+ }
+
+ function test_RevertIf_claimUuid_invalidUuid() public {
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.InvalidUUID.selector);
+ fleet.claimUuid(bytes16(0));
+ }
+
+ function test_registerFromOwned_local() public {
+ // First claim
+ vm.prank(alice);
+ uint256 ownedTokenId = fleet.claimUuid(UUID_1);
+
+ uint256 aliceBalanceBefore = bondToken.balanceOf(alice);
+
+ // Register from owned state
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ // Old owned token burned
+ vm.expectRevert();
+ fleet.ownerOf(ownedTokenId);
+
+ // New token exists
+ assertEq(fleet.ownerOf(tokenId), alice);
+ assertEq(fleet.tokenRegion(tokenId), _regionUSCA());
+ assertEq(fleet.fleetTier(tokenId), 0);
+
+ // UUID state updated
+ assertEq(fleet.uuidOwner(UUID_1), alice);
+ assertEq(fleet.uuidTokenCount(UUID_1), 1); // still 1
+ assertFalse(fleet.isOwnedOnly(UUID_1));
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 1); // Local
+
+ // Only incremental bond pulled (tier 0 local = BASE_BOND, already paid BASE_BOND)
+ assertEq(aliceBalanceBefore - bondToken.balanceOf(alice), 0);
+ }
+
+ function test_registerFromOwned_country() public {
+ vm.prank(alice);
+ fleet.claimUuid(UUID_1);
+
+ uint256 aliceBalanceBefore = bondToken.balanceOf(alice);
+
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 0);
+
+ assertEq(fleet.ownerOf(tokenId), alice);
+ assertEq(fleet.tokenRegion(tokenId), uint32(US));
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 2); // Country
+
+ // Incremental bond: country tier 0 = 8*BASE_BOND, already paid BASE_BOND, so 7*BASE_BOND
+ assertEq(aliceBalanceBefore - bondToken.balanceOf(alice), 7 * BASE_BOND);
+ }
+
+ function test_registerFromOwned_higherTier() public {
+ vm.prank(alice);
+ fleet.claimUuid(UUID_1);
+
+ uint256 aliceBalanceBefore = bondToken.balanceOf(alice);
+
+ // Register at tier 2 local (4*BASE_BOND)
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2);
+
+ // Incremental: 4*BASE_BOND - BASE_BOND = 3*BASE_BOND
+ assertEq(aliceBalanceBefore - bondToken.balanceOf(alice), 3 * BASE_BOND);
+ }
+
+ function test_unregisterToOwned_basic() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ uint256 aliceBalanceBefore = bondToken.balanceOf(alice);
+
+ vm.prank(alice);
+ uint256 ownedTokenId = fleet.unregisterToOwned(tokenId);
+
+ // Old token burned
+ vm.expectRevert();
+ fleet.ownerOf(tokenId);
+
+ // New owned-only token exists
+ assertEq(fleet.ownerOf(ownedTokenId), alice);
+ assertEq(fleet.tokenRegion(ownedTokenId), 0);
+
+ // UUID state updated
+ assertTrue(fleet.isOwnedOnly(UUID_1));
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 3); // Owned
+
+ // No refund for tier 0 local (BASE_BOND - BASE_BOND = 0)
+ assertEq(bondToken.balanceOf(alice) - aliceBalanceBefore, 0);
+ }
+
+ function test_unregisterToOwned_withRefund() public {
+ // Register at tier 2 local (4*BASE_BOND)
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 2);
+
+ uint256 aliceBalanceBefore = bondToken.balanceOf(alice);
+
+ vm.prank(alice);
+ fleet.unregisterToOwned(tokenId);
+
+ // Refund: 4*BASE_BOND - BASE_BOND = 3*BASE_BOND
+ assertEq(bondToken.balanceOf(alice) - aliceBalanceBefore, 3 * BASE_BOND);
+ }
+
+ function test_unregisterToOwned_fromCountry() public {
+ // Register country tier 0 (8*BASE_BOND)
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetCountry(UUID_1, US, 0);
+
+ uint256 aliceBalanceBefore = bondToken.balanceOf(alice);
+
+ vm.prank(alice);
+ fleet.unregisterToOwned(tokenId);
+
+ // Refund: 8*BASE_BOND - BASE_BOND = 7*BASE_BOND
+ assertEq(bondToken.balanceOf(alice) - aliceBalanceBefore, 7 * BASE_BOND);
+
+ // Level reset to Owned
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 3);
+ }
+
+ function test_RevertIf_unregisterToOwned_multipleTokens() public {
+ vm.prank(alice);
+ uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, DE, ADMIN_CA, 0);
+
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.CannotUnregisterMultipleTokens.selector);
+ fleet.unregisterToOwned(id1);
+ }
+
+ function test_RevertIf_unregisterToOwned_alreadyOwned() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.claimUuid(UUID_1);
+
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.UuidNotOwned.selector);
+ fleet.unregisterToOwned(tokenId);
+ }
+
+ function test_releaseUuid_basic() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.claimUuid(UUID_1);
+
+ uint256 aliceBalanceBefore = bondToken.balanceOf(alice);
+
+ vm.prank(alice);
+ fleet.releaseUuid(UUID_1);
+
+ // Token burned
+ vm.expectRevert();
+ fleet.ownerOf(tokenId);
+
+ // UUID cleared
+ assertEq(fleet.uuidOwner(UUID_1), address(0));
+ assertEq(fleet.uuidTokenCount(UUID_1), 0);
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 0); // None
+
+ // Refund received
+ assertEq(bondToken.balanceOf(alice) - aliceBalanceBefore, BASE_BOND);
+ }
+
+ function test_releaseUuid_afterTransfer() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.claimUuid(UUID_1);
+
+ // Transfer to bob
+ vm.prank(alice);
+ fleet.transferFrom(alice, bob, tokenId);
+
+ // uuidOwner should have updated
+ assertEq(fleet.uuidOwner(UUID_1), bob);
+
+ // Alice cannot release
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.NotUuidOwner.selector);
+ fleet.releaseUuid(UUID_1);
+
+ // Bob can release
+ uint256 bobBalanceBefore = bondToken.balanceOf(bob);
+ vm.prank(bob);
+ fleet.releaseUuid(UUID_1);
+ assertEq(bondToken.balanceOf(bob) - bobBalanceBefore, BASE_BOND);
+ }
+
+ function test_RevertIf_releaseUuid_notOwned() public {
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ vm.prank(alice);
+ vm.expectRevert(FleetIdentity.UuidNotOwned.selector);
+ fleet.releaseUuid(UUID_1);
+ }
+
+ function test_ownedOnly_transfer_updatesUuidOwner() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.claimUuid(UUID_1);
+
+ assertEq(fleet.uuidOwner(UUID_1), alice);
+
+ vm.prank(alice);
+ fleet.transferFrom(alice, bob, tokenId);
+
+ // uuidOwner updated on transfer for owned-only tokens
+ assertEq(fleet.uuidOwner(UUID_1), bob);
+ assertEq(fleet.ownerOf(tokenId), bob);
+ }
+
+ function test_ownedOnly_notInBundle() public {
+ // Claim some UUIDs as owned-only
+ vm.prank(alice);
+ fleet.claimUuid(UUID_1);
+ vm.prank(alice);
+ fleet.claimUuid(UUID_2);
+
+ // Bundle should be empty
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 0);
+
+ // Now register one
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ // Bundle should contain only the registered one
+ (uuids, count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 1);
+ assertEq(uuids[0], UUID_1);
+ }
+
+ function test_burn_ownedOnly() public {
+ vm.prank(alice);
+ uint256 tokenId = fleet.claimUuid(UUID_1);
+
+ uint256 aliceBalanceBefore = bondToken.balanceOf(alice);
+
+ vm.prank(alice);
+ fleet.burn(tokenId);
+
+ // Token burned
+ vm.expectRevert();
+ fleet.ownerOf(tokenId);
+
+ // UUID cleared
+ assertEq(fleet.uuidOwner(UUID_1), address(0));
+
+ // Refund received
+ assertEq(bondToken.balanceOf(alice) - aliceBalanceBefore, BASE_BOND);
+ }
+
+ function test_ownedOnly_canReRegisterAfterRelease() public {
+ vm.prank(alice);
+ fleet.claimUuid(UUID_1);
+
+ vm.prank(alice);
+ fleet.releaseUuid(UUID_1);
+
+ // Bob can now claim or register
+ vm.prank(bob);
+ uint256 tokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ assertEq(fleet.ownerOf(tokenId), bob);
+ assertEq(fleet.uuidOwner(UUID_1), bob);
+ }
+
+ function test_migration_viaUnregisterAndReregister() public {
+ // This test shows the new migration pattern using unregisterToOwned
+
+ // Register local in US
+ vm.prank(alice);
+ uint256 oldTokenId = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ uint256 aliceBalanceAfterRegister = bondToken.balanceOf(alice);
+
+ // Unregister to owned (no refund at tier 0 local)
+ vm.prank(alice);
+ fleet.unregisterToOwned(oldTokenId);
+
+ // Re-register in DE as country (pays 8*BASE_BOND - BASE_BOND = 7*BASE_BOND)
+ vm.prank(alice);
+ uint256 newTokenId = fleet.registerFleetCountry(UUID_1, DE, 0);
+
+ assertEq(fleet.ownerOf(newTokenId), alice);
+ assertEq(fleet.tokenRegion(newTokenId), uint32(DE));
+ assertEq(uint8(fleet.uuidLevel(UUID_1)), 2); // Country
+
+ // Net bond change: 7*BASE_BOND additional
+ assertEq(aliceBalanceAfterRegister - bondToken.balanceOf(alice), 7 * BASE_BOND);
+ }
+
+ function testFuzz_tierBond_geometric(uint256 tier) public view {
+ tier = bound(tier, 0, 10);
+ uint256 expected = BASE_BOND;
+ for (uint256 i = 0; i < tier; i++) {
+ expected *= 2;
+ }
+ // Local regions get 1× multiplier
+ assertEq(fleet.tierBond(tier, false), expected);
+ // Country regions get 8× multiplier
+ assertEq(fleet.tierBond(tier, true), expected * 8);
+ }
+
+ function testFuzz_perRegionTiers_newRegionAlwaysStartsAtTier0(uint16 cc) public {
+ cc = uint16(bound(cc, 1, 999));
+ vm.assume(cc != US); // Skip US since we fill it below
+
+ // Fill one country with 8 fleets
+ _registerNCountry(alice, US, 8, 0);
+ assertEq(fleet.regionTierCount(_regionUS()), 2);
+
+ // New country should start at tier 0 regardless of other regions
+ vm.prank(bob);
+ uint256 tokenId = fleet.registerFleetCountry(_uuid(999), cc, 0);
+ assertEq(fleet.fleetTier(tokenId), 0);
+ assertEq(fleet.bonds(tokenId), BASE_BOND * 8); // Country gets 8× multiplier
+ }
+
+ function testFuzz_tierAssignment_autoFillsSequentiallyPerRegion(uint8 count) public {
+ count = uint8(bound(count, 1, 40));
+
+ for (uint256 i = 0; i < count; i++) {
+ uint256 expectedTier = i / 4; // TIER_CAPACITY = 4
+ vm.prank(alice);
+ uint256 tokenId = fleet.registerFleetLocal(_uuid(i + 300), US, ADMIN_CA, expectedTier);
+
+ assertEq(fleet.fleetTier(tokenId), expectedTier);
+ }
+
+ uint256 expectedTiers = (uint256(count) + 3) / 4; // TIER_CAPACITY = 4
+ assertEq(fleet.regionTierCount(_regionUSCA()), expectedTiers);
+ }
+
+ // --- Invariants ---
+
+ function test_invariant_contractBalanceEqualsSumOfBonds() public {
+ vm.prank(alice);
+ uint256 id1 = fleet.registerFleetCountry(UUID_1, US, 0);
+ vm.prank(bob);
+ uint256 id2 = fleet.registerFleetCountry(UUID_2, DE, 0);
+ vm.prank(carol);
+ uint256 id3 = fleet.registerFleetLocal(UUID_3, US, ADMIN_CA, 0);
+
+ uint256 sumBonds = fleet.bonds(id1) + fleet.bonds(id2) + fleet.bonds(id3);
+ assertEq(bondToken.balanceOf(address(fleet)), sumBonds);
+
+ vm.prank(alice);
+ fleet.burn(id1);
+
+ assertEq(bondToken.balanceOf(address(fleet)), fleet.bonds(id2) + fleet.bonds(id3));
+ }
+
+ function test_invariant_contractBalanceAfterReassignTierBurn() public {
+ vm.prank(alice);
+ uint256 id1 = fleet.registerFleetCountry(UUID_1, US, 0);
+ vm.prank(bob);
+ uint256 id2 = fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0);
+ vm.prank(carol);
+ uint256 id3 = fleet.registerFleetLocal(UUID_3, DE, ADMIN_NY, 0);
+
+ vm.prank(alice);
+ fleet.reassignTier(id1, 3);
+
+ vm.prank(alice);
+ fleet.reassignTier(id1, 1);
+
+ uint256 expected = fleet.bonds(id1) + fleet.bonds(id2) + fleet.bonds(id3);
+ assertEq(bondToken.balanceOf(address(fleet)), expected);
+
+ vm.prank(alice);
+ fleet.burn(id1);
+ vm.prank(bob);
+ fleet.burn(id2);
+ vm.prank(carol);
+ fleet.burn(id3);
+
+ assertEq(bondToken.balanceOf(address(fleet)), 0);
+ }
+
+ // --- countryInclusionHint ---
+
+ function test_countryInclusionHint_emptyReturnsZero() public view {
+ (uint256 tier, uint256 bond) = fleet.countryInclusionHint(US);
+ assertEq(tier, 0);
+ assertEq(bond, BASE_BOND * 8); // Country pays 8× multiplier
+ }
+
+ function test_countryInclusionHint_onlyCountryFleets() public {
+ _registerNCountryAt(alice, US, 4, 1000, 0); // fills tier 0 (TIER_CAPACITY=4)
+ vm.prank(bob);
+ fleet.registerFleetCountry(_uuid(9000), US, 1); // tier 1
+
+ // Tier 0 is full → cheapest inclusion = tier 1.
+ (uint256 tier, uint256 bond) = fleet.countryInclusionHint(US);
+ assertEq(tier, 1);
+ assertEq(bond, BASE_BOND * 8 * 2); // Country pays 8× multiplier, tier 1 = 2× base
+ }
+
+ function test_countryInclusionHint_adminAreaCreatesPressure() public {
+ // Country US: tier 0 with 1 member
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(1000), US, 0);
+
+ // US-CA: push to tier 3 (1 member at tier 3)
+ vm.prank(bob);
+ fleet.registerFleetLocal(_uuid(2000), US, ADMIN_CA, 3);
+
+ // Country fleet needs to be included in bundle(US, ADMIN_CA).
+ // Simulation: cursor 3→0. At cursor 3: admin=1 (fits). At cursor 0: admin=0, country=1+1=2 (fits).
+ // Country tier 0 with 2 members: 2 <= 20-1 = 19. Fits.
+ // So cheapest = 0 (tier 0 has room: 1/4).
+ (uint256 tier,) = fleet.countryInclusionHint(US);
+ assertEq(tier, 0);
+ }
+
+ function test_countryInclusionHint_multipleAdminAreas_takesMax() public {
+ // US-CA: fill admin tier 0 (4) + fill country tier 0 (4) = 8
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 0, 0);
+ _registerNCountryAt(alice, US, 4, 100, 0);
+ // US-NY: light (3 admin)
+ _registerNLocal(alice, US, ADMIN_NY, 3, 200);
+
+ // Country tier 0 has 4/4 members → tier 0 is full.
+ // Even though the bundle has room, the tier capacity is exhausted.
+ // So cheapest inclusion tier for a country fleet = 1.
+ (uint256 tier,) = fleet.countryInclusionHint(US);
+ assertEq(tier, 1);
+ }
+
+ function test_countryInclusionHint_ignoresOtherCountries() public {
+ // DE admin area at tier 5 — should NOT affect US hint
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(1000), DE, 1, 5);
+
+ // US-CA at tier 1
+ vm.prank(bob);
+ fleet.registerFleetLocal(_uuid(2000), US, ADMIN_CA, 1);
+
+ (uint256 usTier,) = fleet.countryInclusionHint(US);
+ // US country fleet needs inclusion in bundle(US, ADMIN_CA).
+ // Admin has 1 at tier 1. Country at tier 0: +1=1, fits.
+ assertEq(usTier, 0);
+ }
+
+ function test_countryInclusionHint_afterBurn_updates() public {
+ vm.prank(alice);
+ uint256 id = fleet.registerFleetLocal(_uuid(1000), US, ADMIN_CA, 3);
+
+ vm.prank(alice);
+ fleet.burn(id);
+
+ (uint256 after_,) = fleet.countryInclusionHint(US);
+ assertEq(after_, 0);
+ }
+
+ function test_countryInclusionHint_registrantCanActOnHint() public {
+ // Fill up to create pressure
+ _registerNLocal(alice, US, ADMIN_CA, 8, 0);
+ _registerNCountry(alice, US, 8, 100);
+
+ (uint256 inclusionTier, uint256 hintBond) = fleet.countryInclusionHint(US);
+
+ // Bob registers at country level at the hinted tier
+ vm.prank(bob);
+ fleet.registerFleetCountry(_uuid(2000), US, inclusionTier);
+
+ uint256 tokenId = _tokenId(_uuid(2000), _regionUS());
+ assertEq(fleet.fleetTier(tokenId), inclusionTier);
+ assertEq(fleet.bonds(tokenId), hintBond);
+
+ // Bundle for US-CA includes Bob's fleet
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertGt(count, 0);
+ bool foundCountry;
+ for (uint256 i = 0; i < count; i++) {
+ if (uuids[i] == _uuid(2000)) foundCountry = true;
+ }
+ assertTrue(foundCountry, "Country fleet should appear in bundle");
+ }
+
+ // --- buildHighestBondedUuidBundle (shared-cursor fair-stop) ---
+
+ // ── Empty / Single-level basics ──
+
+ function test_buildBundle_emptyReturnsZero() public view {
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 0);
+ }
+
+ function test_RevertIf_buildBundle_adminCodeZero() public {
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+
+ vm.expectRevert(FleetIdentity.AdminAreaRequired.selector);
+ fleet.buildHighestBondedUuidBundle(US, 0);
+ }
+
+ function test_buildBundle_singleCountry() public {
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 1);
+ assertEq(uuids[0], UUID_1);
+ }
+
+ function test_buildBundle_singleLocal() public {
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 1);
+ assertEq(uuids[0], UUID_1);
+ }
+
+ // ── Same cursor, both levels at tier 0 ──
+
+ function test_buildBundle_bothLevelsTied_levelPriorityOrder() public {
+ // Both at tier 0 → shared cursor 0 → level priority: local, country
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0);
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 2);
+ assertEq(uuids[0], UUID_2); // local first
+ assertEq(uuids[1], UUID_1); // country second
+ }
+
+ function test_buildBundle_2LevelsTier0_fullCapacity() public {
+ // 4 local + 4 country at tier 0 = 8
+ // Bundle fits all since max is 20
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0);
+ _registerNCountryAt(alice, US, 4, 2000, 0);
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 8);
+ }
+
+ function test_buildBundle_2LevelsTier0_partialFill() public {
+ // 3 local + 2 country = 5
+ _registerNLocalAt(alice, US, ADMIN_CA, 3, 1000, 0);
+ _registerNCountryAt(alice, US, 2, 2000, 0);
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 5);
+ }
+
+ // ── Bond priority: higher tier index = higher bond = comes first ──
+
+ function test_buildBundle_higherBondFirst() public {
+ // Country: promote to tier 2 (bond=8*4*BASE)
+ vm.prank(alice);
+ uint256 usId = fleet.registerFleetCountry(UUID_1, US, 0);
+ vm.prank(alice);
+ fleet.reassignTier(usId, 2);
+ // Local: tier 0 (bond=BASE)
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 2);
+ assertEq(uuids[0], UUID_1); // highest bond first (country tier 2)
+ assertEq(uuids[1], UUID_2); // local tier 0
+ }
+
+ function test_buildBundle_multiTierDescendingBond() public {
+ // Local tier 2 (bond=4*BASE)
+ vm.prank(alice);
+ uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ vm.prank(alice);
+ fleet.reassignTier(id1, 2);
+
+ // Country tier 1 (bond=8*2*BASE)
+ vm.prank(alice);
+ uint256 id2 = fleet.registerFleetCountry(UUID_2, US, 0);
+ vm.prank(alice);
+ fleet.reassignTier(id2, 1);
+
+ // Local tier 0 (bond=BASE)
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_3, US, ADMIN_CA, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 3);
+ assertEq(uuids[0], UUID_1); // local tier 2: bond=4*BASE
+ assertEq(uuids[1], UUID_2); // country tier 1: bond=16*BASE (but added after local at cursor)
+ }
+
+ function test_buildBundle_multiTierMultiLevel_correctOrder() public {
+ // Admin: tier 0 (4 members) + tier 1 (1 member)
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 8000, 0);
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(8100), US, ADMIN_CA, 1);
+
+ // Country: promote to tier 1 (bond=8*2*BASE)
+ vm.prank(alice);
+ uint256 countryId = fleet.registerFleetCountry(_uuid(8200), US, 0);
+ vm.prank(alice);
+ fleet.reassignTier(countryId, 1);
+
+ // Country: promote to tier 2 (bond=8*4*BASE)
+ vm.prank(alice);
+ uint256 country2Id = fleet.registerFleetCountry(_uuid(8300), US, 0);
+ vm.prank(alice);
+ fleet.reassignTier(country2Id, 2);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // Cursor=2: country(1)→include. Count=1.
+ // Cursor=1: local(1)+country(1)→include. Count=3.
+ // Cursor=0: local(4)→include. Count=7.
+ assertEq(count, 7);
+ assertEq(uuids[0], fleet.tokenUuid(country2Id)); // tier 2 first
+ }
+
+ // ── All-or-nothing ──
+
+ function test_buildBundle_allOrNothing_tierSkippedWhenDoesNotFit() public {
+ // Fill room so that at a cursor position a tier can't fit.
+ // Admin tier 1: 4 members
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(5100 + i), US, ADMIN_CA, 1);
+ }
+ // Country tier 1: 4 members
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(6100 + i), US, 1);
+ }
+
+ // Tier 0: local(4), country(3)
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 5000, 0);
+ _registerNCountryAt(alice, US, 3, 6000, 0);
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // Cursor=1: local(4)+country(4)=8. Count=8, room=12.
+ // Cursor=0: local(4)≤12→include[count=12,room=8]. country(3)≤8→include[count=15,room=5].
+ assertEq(count, 15);
+ }
+
+ function test_buildBundle_allOrNothing_noPartialCollection() public {
+ // Room=3, tier has 5 members → some members skipped.
+ // Local tier 1: 4 members
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(2000 + i), US, ADMIN_CA, 1);
+ }
+ // Country tier 1: 4 members
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(3000 + i), US, 1);
+ }
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // Cursor=1: local(4)+country(4)=8. Count=8.
+ // Cursor=0: all empty at tier 0. Done.
+ assertEq(count, 8);
+ }
+
+ function test_buildBundle_partialInclusion_fillsRemainingSlots() public {
+ // With partial inclusion: bundle fills remaining slots.
+ // Country tier 0: 4 members
+ _registerNCountryAt(alice, US, 4, 0, 0);
+
+ // Local: 4 at tier 0 + 4 at tier 1 (TIER_CAPACITY = 4)
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 5000, 0);
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(5100 + i), US, ADMIN_CA, 1);
+ }
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // Cursor=1: local(4)=4. Count=4, room=16.
+ // Cursor=0: local(4)≤16→include 4[count=8,room=12]. country(4)≤12→include 4.
+ // Final count=12.
+ assertEq(count, 12);
+
+ // Verify country UUIDs ARE in the result
+ uint256 countryCount;
+ for (uint256 i = 0; i < count; i++) {
+ uint256 tokenId = _findTokenId(uuids[i], US, ADMIN_CA);
+ uint32 region = fleet.tokenRegion(tokenId);
+ if (region == _regionUS()) countryCount++;
+ }
+ assertEq(countryCount, 4, "4 country members included");
+ }
+
+ // ── Partial inclusion (replaces all-or-nothing + fair-stop) ──
+
+ function test_buildBundle_partialInclusion_fillsBundleCompletely() public {
+ // With partial inclusion, we fill the bundle completely by including
+ // as many members as fit, in array order.
+
+ // Consume 6 slots at tier 1.
+ for (uint256 i = 0; i < 3; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 1);
+ }
+ for (uint256 i = 0; i < 3; i++) {
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(2000 + i), US, 1);
+ }
+
+ // Tier 0: full capacities (TIER_CAPACITY = 4).
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 3000, 0);
+ _registerNCountryAt(alice, US, 4, 4000, 0);
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // Cursor=1: local(3)+country(3)=6. Count=6, room=14.
+ // Cursor=0: local(4)≤14→include 4[count=10,room=10].
+ // country(4)≤10→include 4[count=14,room=6].
+ assertEq(count, 14);
+ }
+
+ function test_buildBundle_partialFill_localAndCountry() public {
+ // Two local tiers consume 8 slots, leaving 12 for cursor=0.
+ // At cursor=0: local(4) fits. country(4) included.
+
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 1);
+ }
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(2000 + i), US, ADMIN_CA, 2);
+ }
+
+ // Tier 0: 4 local + 4 country (TIER_CAPACITY = 4)
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 3000, 0);
+ _registerNCountryAt(alice, US, 4, 4000, 0);
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // Cursor=2: local(4)→include. Count=4.
+ // Cursor=1: local(4)→include. Count=8, room=12.
+ // Cursor=0: local(4)≤12→include[count=12,room=8]. country(4)≤8→include[count=16,room=4].
+ assertEq(count, 16);
+ }
+
+ function test_buildBundle_partialInclusion_allLevelsPartiallyIncluded() public {
+ // With partial inclusion, both levels get included partially if needed.
+
+ // Consume 8 slots at tier 1.
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 1);
+ }
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(2000 + i), US, 1);
+ }
+
+ // Tier 0: local=4, country=4 (TIER_CAPACITY = 4)
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 3000, 0);
+ _registerNCountryAt(alice, US, 4, 4000, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // Cursor=1: local(4)+country(4)=8. Count=8, room=12.
+ // Cursor=0: local(4)≤12→include 4[count=12,room=8].
+ // country(4)≤8→include 4[count=16].
+ assertEq(count, 16);
+
+ // Verify local tier 0 is present
+ bool foundLocal = false;
+ for (uint256 i = 0; i < count; i++) {
+ if (uuids[i] == _uuid(3000)) foundLocal = true;
+ }
+ assertTrue(foundLocal, "local tier 0 should be included");
+
+ // Count how many country tier 0 members are included
+ uint256 countryT0Count;
+ for (uint256 i = 0; i < count; i++) {
+ uint256 tokenId = _findTokenId(uuids[i], US, ADMIN_CA);
+ if (fleet.tokenRegion(tokenId) == _regionUS() && fleet.fleetTier(tokenId) == 0) countryT0Count++;
+ }
+ assertEq(countryT0Count, 4, "4 country tier 0 members included");
+ }
+
+ function test_buildBundle_doesNotDescendAfterBundleFull() public {
+ // When cursor=1 fills bundle, cursor=0 tiers are NOT included.
+
+ // Tier 1: local(4) + country(4) + more local(4) + more country(4) = 16
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 1);
+ }
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(2000 + i), US, 1);
+ }
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(3000 + i), US, ADMIN_CA, 2);
+ }
+
+ // Tier 0: extras that might not all fit
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 4000, 0);
+ _registerNCountryAt(alice, US, 4, 5000, 0);
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // Cursor=1: admin(8)+country(8)+global(4)=20. Bundle full.
+ assertEq(count, 20);
+ }
+
+ function test_buildBundle_partialInclusion_fillsAtHighTier() public {
+ // With TIER_CAPACITY = 4:
+ // Cursor=2: local(3)→include. Count=3.
+ // Cursor=1: local(4)+country(4)=8→include. Count=11, room=9.
+ // Cursor=0: local(1)≤9→include[count=12,room=8]. country(1)≤8→include[count=13,room=7].
+
+ for (uint256 i = 0; i < 3; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 2);
+ }
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(2000 + i), US, ADMIN_CA, 1);
+ }
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(3000 + i), US, 1);
+ }
+
+ // Tier 0 extras (would be included with more room):
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(5000), US, ADMIN_CA, 0);
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(5001), US, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // Cursor=2: local(3)→include. Count=3, room=17.
+ // Cursor=1: local(4)+country(4)→include. Count=11, room=9.
+ // Cursor=0: local(1)≤9→include[count=12,room=8]. country(1)≤8→include[count=13,room=7].
+ assertEq(count, 13);
+ }
+
+ function test_buildBundle_partialInclusion_higherPriorityFirst() public {
+ // Partial inclusion fills higher-priority levels first at each tier.
+ // Local gets slots before country.
+
+ // Local tier 1: 4, Country tier 1: 4
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 1);
+ }
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(2000 + i), US, 1);
+ }
+
+ // Tier 0: local=4, country=4 (TIER_CAPACITY = 4)
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 3000, 0);
+ _registerNCountryAt(alice, US, 4, 4000, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // Cursor=1: local(4)+country(4)=8. Count=8, room=12.
+ // Cursor=0: local(4)≤12→include 4[count=12,room=8]. country(4)≤8→include 4[count=16].
+ assertEq(count, 16);
+
+ // Verify local tier 0 full inclusion (4 of 4)
+ uint256 localT0Count;
+ for (uint256 i = 0; i < count; i++) {
+ uint256 tokenId = _findTokenId(uuids[i], US, ADMIN_CA);
+ if (fleet.tokenRegion(tokenId) == _regionUSCA() && fleet.fleetTier(tokenId) == 0) localT0Count++;
+ }
+ assertEq(localT0Count, 4, "4 local tier 0 included");
+ }
+
+ // ── Tie-breaker: local before country at same cursor ──
+
+ function test_buildBundle_tieBreaker_localBeforeCountry() public {
+ // Room=8 after higher tiers. Local tier 0 (4) tried before country tier 0 (4).
+ // Local fits (4), then country (4).
+
+ // Eat 12 room at tier 1 and 2.
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(1000 + i), US, ADMIN_CA, 1);
+ }
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(2000 + i), US, 1);
+ }
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(3000 + i), US, ADMIN_CA, 2);
+ }
+
+ // Tier 0: local=4, country=4 (TIER_CAPACITY = 4)
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 4000, 0);
+ _registerNCountryAt(alice, US, 4, 5000, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // Cursor=2: local(4)→include. Count=4, room=16.
+ // Cursor=1: local(4)+country(4)=8→include. Count=12, room=8.
+ // Cursor=0: local(4)≤8→include[count=16,room=4]. country(4)≤4→include 4[count=20,room=0].
+ assertEq(count, 20);
+
+ // Verify: local(12) + country(8)
+ uint256 localCount;
+ uint256 countryCount;
+ for (uint256 i = 0; i < count; i++) {
+ uint256 tokenId = _findTokenId(uuids[i], US, ADMIN_CA);
+ uint32 region = fleet.tokenRegion(tokenId);
+ if (region == _regionUS()) countryCount++;
+ else if (region == _regionUSCA()) localCount++;
+ }
+ assertEq(localCount, 12); // tier 0 (4) + tier 1 (4) + tier 2 (4)
+ assertEq(countryCount, 8); // tier 1 (4) + tier 0 (4)
+ }
+
+ // ── Empty tiers and gaps ──
+
+ function test_buildBundle_emptyTiersSkippedCleanly() public {
+ // Register at tier 0 then promote to tier 2, leaving tier 1 empty.
+ vm.prank(alice);
+ uint256 id = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ vm.prank(alice);
+ fleet.reassignTier(id, 2);
+
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_2, US, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // Cursor=2: local(1)→include. Count=1.
+ // Cursor=1: all empty. No skip. Descend.
+ // Cursor=0: country(1)→include. Count=2.
+ assertEq(count, 2);
+ assertEq(uuids[0], UUID_1);
+ assertEq(uuids[1], UUID_2);
+ }
+
+ function test_buildBundle_multipleEmptyTiersInMiddle() public {
+ // Local at tier 5, country at tier 0. Tiers 1-4 empty.
+ vm.prank(alice);
+ uint256 id = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ vm.prank(alice);
+ fleet.reassignTier(id, 5);
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_2, US, 0);
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 2);
+ }
+
+ function test_buildBundle_emptyTiersInMiddle_countryToo() public {
+ // Country: register at tier 0 and tier 2 (tier 1 empty)
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_2, US, 2);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 2);
+ assertEq(uuids[0], UUID_2); // higher bond first
+ assertEq(uuids[1], UUID_1);
+ }
+
+ // ── Local isolation ──
+
+ function test_buildBundle_multipleAdminAreas_isolated() public {
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0);
+ _registerNLocalAt(alice, US, ADMIN_NY, 4, 2000, 0);
+
+ (, uint256 countCA) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // CA locals + any country
+ assertEq(countCA, 4);
+ (, uint256 countNY) = fleet.buildHighestBondedUuidBundle(US, ADMIN_NY);
+ // NY locals + any country (same country)
+ assertEq(countNY, 4);
+ }
+
+ // ── Single level, multiple tiers ──
+
+ function test_buildBundle_singleLevelMultipleTiers() public {
+ // Only country, multiple tiers. Country fleets fill all available slots.
+ _registerNCountryAt(alice, US, 4, 1000, 0); // tier 0: 4 members
+ _registerNCountryAt(alice, US, 4, 2000, 1); // tier 1: 4 members
+ _registerNCountryAt(alice, US, 4, 3000, 2); // tier 2: 4 members
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 12); // all country fleets included
+ // Verify order: tier 2 first (highest bond)
+ uint256[] memory t2 = fleet.getTierMembers(_regionUS(), 2);
+ for (uint256 i = 0; i < 4; i++) {
+ assertEq(uuids[i], bytes16(uint128(t2[i])));
+ }
+ }
+
+ function test_buildBundle_singleLevelOnlyLocal() public {
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0);
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 4);
+ }
+
+ function test_buildBundle_onlyCountry() public {
+ // TIER_CAPACITY = 4, so split across two tiers
+ _registerNCountryAt(alice, US, 4, 1000, 0);
+ _registerNCountryAt(alice, US, 4, 1100, 1);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 8);
+ assertEq(uuids[0], _uuid(1100)); // tier 1 comes first (higher bond)
+ }
+
+ function test_buildBundle_countryFillsSlots() public {
+ // Test that country fleets fill bundle slots when room is available.
+ //
+ // Setup: 2 local fleets + 12 country fleets across 3 tiers
+ // Expected: All 14 should be included since bundle has room
+ _registerNLocalAt(alice, US, ADMIN_CA, 2, 1000, 0);
+ _registerNCountryAt(alice, US, 4, 2000, 0); // tier 0: 4 country
+ _registerNCountryAt(alice, US, 4, 3000, 1); // tier 1: 4 country
+ _registerNCountryAt(alice, US, 4, 4000, 2); // tier 2: 4 country
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+
+ // All 14 should be included: 2 local + 12 country
+ assertEq(count, 14);
+
+ // Verify order: tier 2 country (highest bond) → tier 1 country → tier 0 local/country
+ // First 4 should be tier 2 country fleets
+ for (uint256 i = 0; i < 4; i++) {
+ assertEq(uuids[i], _uuid(4000 + i));
+ }
+ }
+
+ function test_buildBundle_localsPriorityWithinTier() public {
+ // When locals and country compete at same tier, locals are included first.
+ //
+ // Setup: 8 local fleets + 12 country fleets
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0);
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 1100, 1);
+ _registerNCountryAt(alice, US, 4, 2000, 0);
+ _registerNCountryAt(alice, US, 4, 3000, 1);
+ _registerNCountryAt(alice, US, 4, 4000, 2);
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+
+ // Total: 8 local + 12 country = 20 (bundle max)
+ assertEq(count, 20);
+ }
+
+ // ── Shared cursor: different max tier indices per level ──
+
+ function test_buildBundle_sharedCursor_levelsAtDifferentMaxTiers() public {
+ // Local at tier 3, Country at tier 1.
+ vm.prank(alice);
+ uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ vm.prank(alice);
+ fleet.reassignTier(id1, 3);
+ vm.prank(alice);
+ uint256 id2 = fleet.registerFleetCountry(UUID_2, US, 0);
+ vm.prank(alice);
+ fleet.reassignTier(id2, 1);
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_3, US, ADMIN_CA, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 3);
+ assertEq(uuids[0], UUID_1); // tier 3
+ assertEq(uuids[1], UUID_2); // tier 1
+ assertEq(uuids[2], UUID_3); // tier 0
+ }
+
+ function test_buildBundle_sharedCursor_sameTierIndex_differentBondByRegion() public view {
+ // Local tier 0 = BASE_BOND, Country tier 0 = BASE_BOND * 8 (multiplier)
+ assertEq(fleet.tierBond(0, false), BASE_BOND);
+ assertEq(fleet.tierBond(0, true), BASE_BOND * 8);
+ assertEq(fleet.tierBond(1, false), BASE_BOND * 2);
+ assertEq(fleet.tierBond(1, true), BASE_BOND * 8 * 2);
+ }
+
+ // ── Lifecycle ──
+
+ function test_buildBundle_afterBurn_reflects() public {
+ vm.prank(alice);
+ uint256 id1 = fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+ vm.prank(bob);
+ fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0);
+ vm.prank(carol);
+ fleet.registerFleetLocal(UUID_3, US, ADMIN_CA, 0);
+
+ (, uint256 countBefore) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(countBefore, 3);
+
+ vm.prank(alice);
+ fleet.burn(id1);
+
+ (, uint256 countAfter) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(countAfter, 2);
+ }
+
+ function test_buildBundle_exhaustsBothLevels() public {
+ vm.prank(alice);
+ fleet.registerFleetCountry(UUID_1, US, 0);
+ vm.prank(alice);
+ fleet.registerFleetLocal(UUID_2, US, ADMIN_CA, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 2);
+ bool found1;
+ bool found2;
+ for (uint256 i = 0; i < count; i++) {
+ if (uuids[i] == UUID_1) found1 = true;
+ if (uuids[i] == UUID_2) found2 = true;
+ }
+ assertTrue(found1 && found2);
+ }
+
+ function test_buildBundle_lifecycle_promotionsAndBurns() public {
+ vm.prank(alice);
+ uint256 l1 = fleet.registerFleetLocal(_uuid(100), US, ADMIN_CA, 0);
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(101), US, ADMIN_CA, 0);
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(102), US, ADMIN_CA, 0);
+
+ vm.prank(alice);
+ uint256 c1 = fleet.registerFleetCountry(_uuid(200), US, 0);
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(201), US, 0);
+
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(300), US, ADMIN_CA, 0);
+
+ vm.prank(alice);
+ fleet.reassignTier(l1, 3);
+ vm.prank(alice);
+ fleet.reassignTier(c1, 1);
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // Cursor=3: local(1)→include. Count=1.
+ // Cursor=2: empty. Descend.
+ // Cursor=1: country(1)→include. Count=2.
+ // Cursor=0: local(3)+country(1)=4→include. Count=6.
+ assertEq(count, 6);
+
+ vm.prank(alice);
+ fleet.burn(l1);
+
+ (, count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 5);
+ }
+
+ // ── Cap enforcement ──
+
+ function test_buildBundle_capsAt20() public {
+ // Fill local: 4+4+4 = 12 in 3 tiers
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 0, 0);
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 100, 1);
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 200, 2);
+ // Fill country US: 4+4 = 8 in 2 tiers (TIER_CAPACITY = 4)
+ _registerNCountryAt(bob, US, 4, 1000, 0);
+ _registerNCountryAt(bob, US, 4, 1100, 1);
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 20);
+ }
+
+ function test_buildBundle_exactlyFillsToCapacity() public {
+ // 12 local + 8 country = 20 exactly, spread across tiers (TIER_CAPACITY = 4).
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0);
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 1100, 1);
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 1200, 2);
+ _registerNCountryAt(alice, US, 4, 2000, 0);
+ _registerNCountryAt(alice, US, 4, 2100, 1);
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 20);
+ }
+
+ function test_buildBundle_twentyOneMembers_partialInclusion() public {
+ // 21 total: local 12 + country 8 + 1 extra country at tier 2.
+ // With partial inclusion, bundle fills to 20.
+ // TIER_CAPACITY = 4, so spread across tiers.
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0);
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 1100, 1);
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 1200, 2);
+ _registerNCountryAt(alice, US, 4, 2000, 0);
+ _registerNCountryAt(alice, US, 4, 2100, 1);
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(3000), US, 2);
+
+ // Cursor=2: local(4)+country(1)=5. Count=5, room=15.
+ // Cursor=1: local(4)+country(4)=8. Count=13, room=7.
+ // Cursor=0: local(4)≤7→include 4[count=17,room=3].
+ // country(4)>3→include 3 of 4[count=20,room=0].
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 20); // caps at max bundle size
+ }
+
+ // ── Integrity ──
+
+ function test_buildBundle_noDuplicateUUIDs() public {
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0);
+ _registerNCountryAt(bob, US, 4, 2000, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ for (uint256 i = 0; i < count; i++) {
+ for (uint256 j = i + 1; j < count; j++) {
+ assertTrue(uuids[i] != uuids[j], "Duplicate UUID found");
+ }
+ }
+ }
+
+ function test_buildBundle_noNonExistentUUIDs() public {
+ _registerNLocalAt(alice, US, ADMIN_CA, 3, 1000, 0);
+ _registerNCountryAt(bob, US, 2, 2000, 0);
+ vm.prank(carol);
+ fleet.registerFleetLocal(UUID_1, US, ADMIN_CA, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 6);
+ for (uint256 i = 0; i < count; i++) {
+ uint256 tokenId = _findTokenId(uuids[i], US, ADMIN_CA);
+ assertTrue(fleet.ownerOf(tokenId) != address(0));
+ }
+ }
+
+ function test_buildBundle_allReturnedAreFromCorrectRegions() public {
+ // Verify returned UUIDs are from local or country regions.
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 1000, 0);
+ _registerNCountryAt(alice, US, 3, 2000, 0);
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+
+ uint256 localFound;
+ uint256 countryFound;
+ for (uint256 i = 0; i < count; i++) {
+ uint256 tid = _findTokenId(uuids[i], US, ADMIN_CA);
+ uint32 region = fleet.tokenRegion(tid);
+ if (region == _regionUSCA()) localFound++;
+ else if (region == _regionUS()) countryFound++;
+ }
+ assertEq(localFound, 4, "local count");
+ assertEq(countryFound, 3, "country count");
+ }
+
+ // ── Fuzz ──
+
+ function testFuzz_buildBundle_neverExceeds20(uint8 cCount, uint8 lCount) public {
+ cCount = uint8(bound(cCount, 0, 15));
+ lCount = uint8(bound(lCount, 0, 15));
+
+ for (uint256 i = 0; i < cCount; i++) {
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(31_000 + i), US, i / 4);
+ }
+ for (uint256 i = 0; i < lCount; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(32_000 + i), US, ADMIN_CA, i / 4);
+ }
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertLe(count, 20);
+ }
+
+ function testFuzz_buildBundle_noDuplicates(uint8 cCount, uint8 lCount) public {
+ cCount = uint8(bound(cCount, 0, 12));
+ lCount = uint8(bound(lCount, 0, 12));
+
+ for (uint256 i = 0; i < cCount; i++) {
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(41_000 + i), US, i / 4);
+ }
+ for (uint256 i = 0; i < lCount; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(42_000 + i), US, ADMIN_CA, i / 4);
+ }
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ for (uint256 i = 0; i < count; i++) {
+ for (uint256 j = i + 1; j < count; j++) {
+ assertTrue(uuids[i] != uuids[j], "Fuzz: duplicate UUID");
+ }
+ }
+ }
+
+ function testFuzz_buildBundle_allReturnedUUIDsExist(uint8 cCount, uint8 lCount) public {
+ cCount = uint8(bound(cCount, 0, 12));
+ lCount = uint8(bound(lCount, 0, 12));
+
+ for (uint256 i = 0; i < cCount; i++) {
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(51_000 + i), US, i / 4);
+ }
+ for (uint256 i = 0; i < lCount; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(52_000 + i), US, ADMIN_CA, i / 4);
+ }
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ for (uint256 i = 0; i < count; i++) {
+ uint256 tokenId = _findTokenId(uuids[i], US, ADMIN_CA);
+ assertTrue(fleet.ownerOf(tokenId) != address(0), "Fuzz: UUID does not exist");
+ }
+ }
+
+ function testFuzz_buildBundle_partialInclusionInvariant(uint8 cCount, uint8 lCount) public {
+ cCount = uint8(bound(cCount, 0, 12));
+ lCount = uint8(bound(lCount, 0, 12));
+
+ for (uint256 i = 0; i < cCount; i++) {
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(61_000 + i), US, i / 4);
+ }
+ for (uint256 i = 0; i < lCount; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(62_000 + i), US, ADMIN_CA, i / 4);
+ }
+
+ (bytes16[] memory uuids2, uint256 count2) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+
+ // With partial inclusion: for each (region, tier) group in the bundle,
+ // the included members should be a PREFIX of the full tier (registration order).
+ // We verify this by checking that included members are the first N in the tier's array.
+ for (uint256 i = 0; i < count2; i++) {
+ uint256 tid = _findTokenId(uuids2[i], US, ADMIN_CA);
+ uint32 region = fleet.tokenRegion(tid);
+ uint256 tier = fleet.fleetTier(tid);
+
+ // Count how many from this (region, tier) are in the bundle
+ uint256 inBundle;
+ for (uint256 j = 0; j < count2; j++) {
+ uint256 tjd = _findTokenId(uuids2[j], US, ADMIN_CA);
+ if (fleet.tokenRegion(tjd) == region && fleet.fleetTier(tjd) == tier) {
+ inBundle++;
+ }
+ }
+
+ // Get the full tier members
+ uint256[] memory tierMembers = fleet.getTierMembers(region, tier);
+
+ // The included count should be <= total tier members
+ assertLe(inBundle, tierMembers.length, "Fuzz: more included than exist");
+
+ // Verify the included members are exactly the first `inBundle` members of the tier
+ // (prefix property for partial inclusion)
+ uint256 found;
+ for (uint256 m = 0; m < inBundle && m < tierMembers.length; m++) {
+ bytes16 expectedUuid = bytes16(uint128(tierMembers[m]));
+ for (uint256 j = 0; j < count2; j++) {
+ if (uuids2[j] == expectedUuid) {
+ found++;
+ break;
+ }
+ }
+ }
+ assertEq(found, inBundle, "Fuzz: included members not a prefix of tier");
+ }
+ }
+
+ // ══════════════════════════════════════════════════════════════════════════════════
+ // Edge Cases: _findCheapestInclusionTier & MaxTiersReached
+ // ══════════════════════════════════════════════════════════════════════════════════
+
+ /// @notice When all 24 tiers of a region are full, localInclusionHint should revert.
+ function test_RevertIf_localInclusionHint_allTiersFull() public {
+ // Fill all 24 tiers of US/ADMIN_CA (4 members each = 96 fleets)
+ // TIER_CAPACITY = 4, MAX_TIERS = 24
+ for (uint256 tier = 0; tier < 24; tier++) {
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(tier * 100 + i), US, ADMIN_CA, tier);
+ }
+ }
+
+ // Verify all tiers are full
+ for (uint256 tier = 0; tier < 24; tier++) {
+ assertEq(fleet.tierMemberCount(fleet.adminRegionKey(US, ADMIN_CA), tier), 4);
+ }
+
+ // localInclusionHint should revert
+ vm.expectRevert(FleetIdentity.MaxTiersReached.selector);
+ fleet.localInclusionHint(US, ADMIN_CA);
+ }
+
+ /// @notice When all tiers are full, registering at any tier should revert with TierFull.
+ function test_RevertIf_registerFleetLocal_allTiersFull() public {
+ // Fill all 24 tiers (TIER_CAPACITY = 4)
+ for (uint256 tier = 0; tier < 24; tier++) {
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(tier * 100 + i), US, ADMIN_CA, tier);
+ }
+ }
+
+ // Registration at tier 0 (or any full tier) should revert with TierFull
+ vm.prank(bob);
+ vm.expectRevert(FleetIdentity.TierFull.selector);
+ fleet.registerFleetLocal(_uuid(99999), US, ADMIN_CA, 0);
+ }
+
+ /// @notice countryInclusionHint reverts when all tiers in the country region are full.
+ function test_RevertIf_countryInclusionHint_allTiersFull() public {
+ // Fill all 24 tiers of country US (4 members each, TIER_CAPACITY = 4)
+ for (uint256 tier = 0; tier < 24; tier++) {
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetCountry(_uuid(tier * 100 + i), US, tier);
+ }
+ }
+
+ vm.expectRevert(FleetIdentity.MaxTiersReached.selector);
+ fleet.countryInclusionHint(US);
+ }
+
+ /// @notice Proves cheapest inclusion tier can be ABOVE maxTierIndex when bundle is
+ /// constrained by higher-priority levels at existing tiers.
+ ///
+ /// Scenario:
+ /// - Fill admin tiers 0, 1, 2 with 4 members each (full)
+ /// - Country US has 4 fleets at tier 2 (maxTierIndex)
+ /// - Admin tier 0-2 are FULL (4 members each), so a new fleet cannot join any.
+ /// - Cheapest inclusion should be tier 3 (above maxTierIndex=2).
+ function test_cheapestInclusionTier_aboveMaxTierIndex() public {
+ // Fill admin tiers 0, 1, 2 with 4 members each (TIER_CAPACITY = 4)
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 4000, 0);
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 5000, 1);
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 6000, 2);
+ // Country at tier 2 (sets maxTierIndex across regions)
+ _registerNCountryAt(alice, US, 4, 7000, 2);
+
+ // Verify tier 2 is maxTierIndex
+ assertEq(fleet.regionTierCount(fleet.countryRegionKey(US)), 3);
+ assertEq(fleet.regionTierCount(fleet.adminRegionKey(US, ADMIN_CA)), 3);
+
+ // All admin tiers 0-2 are full (4 members each = TIER_CAPACITY)
+ assertEq(fleet.tierMemberCount(fleet.adminRegionKey(US, ADMIN_CA), 0), 4);
+ assertEq(fleet.tierMemberCount(fleet.adminRegionKey(US, ADMIN_CA), 1), 4);
+ assertEq(fleet.tierMemberCount(fleet.adminRegionKey(US, ADMIN_CA), 2), 4);
+
+ // At tiers 0-2: all tiers are full (4 members = cap), cannot join.
+ // At tier 3: above maxTierIndex, countBefore = 0, has room.
+ (uint256 inclusionTier, uint256 bond) = fleet.localInclusionHint(US, ADMIN_CA);
+ assertEq(inclusionTier, 3, "Should recommend tier 3 (above maxTierIndex=2)");
+ assertEq(bond, BASE_BOND * 8); // tier 3 bond = BASE_BOND * 2^3
+
+ // Verify registration at tier 3 works
+ vm.prank(bob);
+ uint256 tokenId = fleet.registerFleetLocal(_uuid(9999), US, ADMIN_CA, 3);
+ assertEq(fleet.fleetTier(tokenId), 3);
+
+ // Confirm new fleet appears in bundle at the TOP (first position)
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ // tier 3 (1) + tier 2 admin (4) + tier 2 country (4) + tier 1 admin (4) + tier 0 admin (4) = 17
+ // But capped at 12 local + 8 country = 20 max. We have 13 local + 4 country = 17.
+ assertEq(count, 17, "tier 3 (1) + tier 2 admin (4) + country (4) + tier 1 admin (4) + tier 0 admin (4) = 17");
+ assertEq(uuids[0], _uuid(9999), "Tier 3 fleet should be first in bundle");
+ }
+
+ /// @notice Edge case: bundle is full from tier maxTierIndex, and all tiers 0..maxTierIndex
+ /// at the candidate region are also full. The cheapest tier is above maxTierIndex.
+ function test_cheapestInclusionTier_aboveMaxTierIndex_candidateTiersFull() public {
+ // Country tier 0 has 4 fleets
+ _registerNCountryAt(alice, US, 4, 1000, 0);
+
+ // Admin tier 0 has 4 fleets (full)
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 2000, 0);
+
+ // Verify admin tier 0 is full
+ assertEq(fleet.tierMemberCount(fleet.adminRegionKey(US, ADMIN_CA), 0), 4);
+
+ // Admin tier 0 is full (4 members = TIER_CAPACITY), so candidate must go elsewhere.
+ // Cheapest inclusion tier should be 1 (above maxTierIndex=0).
+ (uint256 inclusionTier,) = fleet.localInclusionHint(US, ADMIN_CA);
+ assertEq(inclusionTier, 1, "Should recommend tier 1 since tier 0 is full");
+ }
+
+ /// @notice When going above maxTierIndex would require tier >= MAX_TIERS, revert.
+ ///
+ /// Scenario: Fill global tiers 0-23 with 4 members each (96 global fleets).
+ /// A new LOCAL fleet cannot fit in any tier because:
+ /// - The bundle simulation runs through tiers 23→0
+ /// - At each tier, global's 4 members + potential admin members need to fit
+ /// - With global filling 4 slots at every tier, and country/admin potentially
+ /// competing, we design a scenario where no tier works.
+ ///
+ /// Simpler approach: Fill all 24 admin tiers AND make bundle full at every tier.
+ function test_RevertIf_cheapestInclusionTier_exceedsMaxTiers() public {
+ // Fill all 24 tiers of admin area US/CA with 4 members each (TIER_CAPACITY = 4)
+ for (uint256 tier = 0; tier < 24; tier++) {
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(alice);
+ fleet.registerFleetLocal(_uuid(tier * 100 + i), US, ADMIN_CA, tier);
+ }
+ }
+
+ // Now all admin tiers 0-23 are full. A new admin fleet must go to tier 24,
+ // which exceeds MAX_TIERS=24 (valid tiers are 0-23).
+ vm.expectRevert(FleetIdentity.MaxTiersReached.selector);
+ fleet.localInclusionHint(US, ADMIN_CA);
+ }
+
+ /// @notice Verify that when bundle is full due to higher-tier members preventing
+ /// lower-tier inclusion, the hint correctly identifies the cheapest viable tier.
+ function test_cheapestInclusionTier_bundleFullFromHigherTiers() public {
+ // Create a scenario where:
+ // - Admin tiers 0-5 are all full (4 each = TIER_CAPACITY)
+ // - Country tier 5 has 4 members
+ // Total at tier 5: 4 country + 4 admin = 8
+ // All admin tiers 0-5 are full, so must go to tier 6.
+
+ // Fill admin tiers 0-5 with 4 members each
+ for (uint256 tier = 0; tier <= 5; tier++) {
+ _registerNLocalAt(alice, US, ADMIN_CA, 4, 10000 + tier * 100, tier);
+ }
+ // Country at tier 5
+ _registerNCountryAt(alice, US, 4, 11000, 5);
+
+ // maxTierIndex = 5
+ // All admin tiers 0-5 are full (4 = capacity). Cannot join any.
+ // At tier 6: above maxTierIndex, countBefore = 0. Has room.
+ (uint256 inclusionTier,) = fleet.localInclusionHint(US, ADMIN_CA);
+ assertEq(inclusionTier, 6, "Must go above maxTierIndex=5 to tier 6");
+ }
+
+ /// @notice Verifies the bundle correctly includes a fleet registered above maxTierIndex.
+ function test_buildBundle_includesFleetAboveMaxTierIndex() public {
+ // Only country tier 0 has fleets (maxTierIndex = 0)
+ _registerNCountryAt(alice, US, 4, 20000, 0);
+
+ // New admin registers at tier 2 (above maxTierIndex)
+ vm.prank(bob);
+ uint256 adminToken = fleet.registerFleetLocal(_uuid(21000), US, ADMIN_CA, 2);
+
+ // Bundle should include admin tier 2 first (highest), then country tier 0
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(count, 5, "Admin tier 2 (1) + Country tier 0 (4) = 5");
+
+ // First should be admin tier 2
+ assertEq(_tokenId(uuids[0], _regionUSCA()), adminToken, "Admin tier 2 fleet should be first");
+ }
+
+ // ══════════════════════════════════════════════════════════════════════════════════
+ // Demonstration: Partial inclusion prevents total tier displacement
+ // ══════════════════════════════════════════════════════════════════════════════════
+
+ /// @notice DEMONSTRATES that partial inclusion prevents the scenario where a single
+ /// fleet registration could push an entire tier out of the bundle.
+ ///
+ /// Scenario (2-level system: country + local):
+ /// BEFORE:
+ /// - Admin tier 0: 4 members
+ /// - Country tier 0: 4 members
+ /// - Bundle: all 8 members included (4+4=8)
+ ///
+ /// AFTER (single admin tier 1 registration):
+ /// - Admin tier 1: 1 member (NEW - above previous maxTierIndex)
+ /// - With PARTIAL INCLUSION:
+ /// - Tier 1: admin(1) → count=1
+ /// - Tier 0: admin(4) + country(4) = 8, count=9
+ /// - Final bundle: 9 members (all fit)
+ ///
+ /// Result: All original fleets remain included.
+ function test_DEMO_partialInclusionPreventsFullDisplacement() public {
+ // === BEFORE STATE ===
+ uint32 countryRegion = fleet.countryRegionKey(US);
+
+ // Fill with admin(4) + country(4) = 8
+ uint256[] memory localIds = _registerNLocalAt(alice, US, ADMIN_CA, 4, 30000, 0); // Admin tier 0: 4
+ uint256[] memory countryIds = _registerNCountryAt(alice, US, 4, 31000, 0); // Country tier 0: 4
+
+ // Verify BEFORE: all 8 members in bundle
+ (bytes16[] memory uuidsBefore, uint256 countBefore) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+ assertEq(countBefore, 8, "BEFORE: All 8 members should be in bundle");
+
+ // Verify all 4 country fleets are included BEFORE
+ uint256 countryCountBefore;
+ for (uint256 i = 0; i < countBefore; i++) {
+ uint256 tokenId = _findTokenId(uuidsBefore[i], US, ADMIN_CA);
+ if (fleet.tokenRegion(tokenId) == countryRegion) countryCountBefore++;
+ }
+ assertEq(countryCountBefore, 4, "BEFORE: All 4 country fleets in bundle");
+
+ // === SINGLE REGISTRATION ===
+ // Bob registers just ONE fleet at admin tier 1
+ vm.prank(bob);
+ fleet.registerFleetLocal(_uuid(99999), US, ADMIN_CA, 1);
+
+ // === AFTER STATE ===
+ (bytes16[] memory uuidsAfter, uint256 countAfter) = fleet.buildHighestBondedUuidBundle(US, ADMIN_CA);
+
+ // Bundle now has 9 members (tier 1: 1 + tier 0: 4+4)
+ assertEq(countAfter, 9, "AFTER: Bundle should have 9 members");
+
+ // Count how many country fleets are included AFTER
+ uint256 countryCountAfter;
+ for (uint256 i = 0; i < countAfter; i++) {
+ uint256 tokenId = _findTokenId(uuidsAfter[i], US, ADMIN_CA);
+ if (fleet.tokenRegion(tokenId) == countryRegion) countryCountAfter++;
+ }
+ assertEq(countryCountAfter, 4, "AFTER: All 4 country fleets still in bundle");
+
+ // Verify all country fleets are still included
+ bool[] memory countryIncluded = new bool[](4);
+ for (uint256 i = 0; i < countAfter; i++) {
+ uint256 tokenId = _findTokenId(uuidsAfter[i], US, ADMIN_CA);
+ for (uint256 c = 0; c < 4; c++) {
+ if (tokenId == countryIds[c]) countryIncluded[c] = true;
+ }
+ }
+ assertTrue(countryIncluded[0], "First country fleet included");
+ assertTrue(countryIncluded[1], "Second country fleet included");
+ assertTrue(countryIncluded[2], "Third country fleet included");
+ assertTrue(countryIncluded[3], "Fourth country fleet included");
+
+ // === IMPROVEMENT SUMMARY ===
+ emit log_string("=== PARTIAL INCLUSION FIX DEMONSTRATED ===");
+ emit log_string("A single tier-1 registration does not displace any country fleets");
+ emit log_named_uint("Country fleets displaced", 0);
+ emit log_named_uint("Country fleets still included", 4);
+ }
+}
diff --git a/test/FleetIdentityFairness.t.sol b/test/FleetIdentityFairness.t.sol
new file mode 100644
index 0000000..f1923c5
--- /dev/null
+++ b/test/FleetIdentityFairness.t.sol
@@ -0,0 +1,562 @@
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.24;
+
+import "forge-std/Test.sol";
+import "../src/swarms/FleetIdentity.sol";
+import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol";
+
+/// @dev Minimal ERC-20 mock with public mint for testing.
+contract MockERC20Fairness is ERC20 {
+ constructor() ERC20("Mock Bond Token", "MBOND") {}
+
+ function mint(address to, uint256 amount) external {
+ _mint(to, amount);
+ }
+}
+
+/**
+ * @title FleetIdentityFairness Tests
+ * @notice Economic fairness analysis for FleetIdentity bundle allocation.
+ *
+ * @dev **Fairness Philosophy - Economic Advantage Model**
+ *
+ * The FleetIdentity contract uses a simple tier-descent algorithm:
+ * - Iterate from highest tier to lowest
+ * - At each tier: include local fleets first, then country fleets
+ * - Stop when bundle is full (20 slots)
+ *
+ * **Economic Fairness via COUNTRY_BOND_MULTIPLIER (8×)**
+ *
+ * Country fleets pay 8× more than local fleets at the same tier:
+ * - Local tier 0: BASE_BOND * 1 = 100 NODL
+ * - Country tier 0: BASE_BOND * 8 = 800 NODL
+ * - Local tier 3: BASE_BOND * 8 = 800 NODL (same cost!)
+ *
+ * This means a local player can reach tier 3 for the same cost a country player
+ * pays for tier 0. The 8× multiplier provides significant economic advantage to locals:
+ *
+ * | Tier | Local Bond | Country Bond | Country Overpay vs Local Same Tier |
+ * |------|------------|--------------|-----------------------------------|
+ * | 0 | 100 NODL | 800 NODL | 8× |
+ * | 1 | 200 NODL | 1600 NODL | 8× |
+ * | 2 | 400 NODL | 3200 NODL | 8× |
+ * | 3 | 800 NODL | 6400 NODL | 8× |
+ *
+ * **Priority Rules**
+ *
+ * 1. Higher tier always wins (regardless of level)
+ * 2. Within same tier: local beats country
+ * 3. Within same tier + level: earlier registration wins
+ *
+ * **Whale Attack Analysis**
+ *
+ * A country whale trying to dominate must pay significantly more:
+ * - To fill 8 country slots at tier 3: 8 × 6400 NODL = 51,200 NODL
+ * - 12 locals could counter at tier 3 for: 12 × 800 NODL = 9,600 NODL
+ * - Whale pays 5.3× more to compete at the same tier level
+ */
+contract FleetIdentityFairnessTest is Test {
+ MockERC20Fairness bondToken;
+
+ // Test addresses representing different market participants
+ address[] localPlayers;
+ address[] countryPlayers;
+ address whale;
+
+ uint256 constant BASE_BOND = 100 ether;
+ uint256 constant NUM_LOCAL_PLAYERS = 20;
+ uint256 constant NUM_COUNTRY_PLAYERS = 10;
+
+ // Test country and admin areas
+ uint16 constant COUNTRY_US = 840;
+ uint16[] adminAreas;
+ uint256 constant NUM_ADMIN_AREAS = 5;
+
+ function setUp() public {
+ bondToken = new MockERC20Fairness();
+
+ // Create test players
+ whale = address(0xABCDEF);
+ for (uint256 i = 0; i < NUM_LOCAL_PLAYERS; i++) {
+ localPlayers.push(address(uint160(0x1000 + i)));
+ }
+ for (uint256 i = 0; i < NUM_COUNTRY_PLAYERS; i++) {
+ countryPlayers.push(address(uint160(0x2000 + i)));
+ }
+
+ // Create admin areas
+ for (uint16 i = 1; i <= NUM_ADMIN_AREAS; i++) {
+ adminAreas.push(i);
+ }
+
+ // Fund all players generously
+ uint256 funding = 1_000_000_000_000 ether;
+ bondToken.mint(whale, funding);
+ for (uint256 i = 0; i < NUM_LOCAL_PLAYERS; i++) {
+ bondToken.mint(localPlayers[i], funding);
+ }
+ for (uint256 i = 0; i < NUM_COUNTRY_PLAYERS; i++) {
+ bondToken.mint(countryPlayers[i], funding);
+ }
+ }
+
+ // ══════════════════════════════════════════════════════════════════════════════════
+ // Helper Functions
+ // ══════════════════════════════════════════════════════════════════════════════════
+
+ function _deployFleet() internal returns (FleetIdentity) {
+ FleetIdentity fleet = new FleetIdentity(address(bondToken), BASE_BOND);
+
+ // Approve all players
+ vm.prank(whale);
+ bondToken.approve(address(fleet), type(uint256).max);
+ for (uint256 i = 0; i < localPlayers.length; i++) {
+ vm.prank(localPlayers[i]);
+ bondToken.approve(address(fleet), type(uint256).max);
+ }
+ for (uint256 i = 0; i < countryPlayers.length; i++) {
+ vm.prank(countryPlayers[i]);
+ bondToken.approve(address(fleet), type(uint256).max);
+ }
+
+ return fleet;
+ }
+
+ function _uuid(uint256 seed) internal pure returns (bytes16) {
+ return bytes16(keccak256(abi.encodePacked("fleet-fairness-", seed)));
+ }
+
+ function _makeAdminRegion(uint16 cc, uint16 admin) internal pure returns (uint32) {
+ return (uint32(cc) << 10) | uint32(admin);
+ }
+
+ /// @dev Count how many slots in a bundle are from country vs local registrations
+ function _countBundleComposition(FleetIdentity fleet, uint16 cc, uint16 admin)
+ internal
+ view
+ returns (uint256 localCount, uint256 countryCount)
+ {
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(cc, admin);
+ uint32 countryRegion = uint32(cc);
+
+ for (uint256 i = 0; i < count; i++) {
+ // Try to find token in country region first
+ uint256 countryTokenId = fleet.computeTokenId(uuids[i], countryRegion);
+ try fleet.ownerOf(countryTokenId) returns (address) {
+ countryCount++;
+ } catch {
+ localCount++;
+ }
+ }
+ }
+
+ // ══════════════════════════════════════════════════════════════════════════════════
+ // Scenario Tests: Priority & Economic Behavior
+ // ══════════════════════════════════════════════════════════════════════════════════
+
+ /**
+ * @notice Scenario A: Local-Heavy Market
+ * Many local players competing, few country players.
+ * Tests that locals correctly fill slots by tier-descent priority.
+ */
+ function test_scenarioA_localHeavyMarket() public {
+ FleetIdentity fleet = _deployFleet();
+ uint16 targetAdmin = adminAreas[0];
+
+ // 16 local players at tiers 0-3 (4 per tier due to TIER_CAPACITY)
+ for (uint256 i = 0; i < 16; i++) {
+ vm.prank(localPlayers[i % NUM_LOCAL_PLAYERS]);
+ fleet.registerFleetLocal(_uuid(1000 + i), COUNTRY_US, targetAdmin, i / 4);
+ }
+
+ // 4 country players at tier 0
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(countryPlayers[i]);
+ fleet.registerFleetCountry(_uuid(2000 + i), COUNTRY_US, 0);
+ }
+
+ (uint256 localCount, uint256 countryCount) = _countBundleComposition(fleet, COUNTRY_US, targetAdmin);
+ (, uint256 totalCount) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin);
+
+ emit log_string("=== Scenario A: Local-Heavy Market ===");
+ emit log_named_uint("Total bundle size", totalCount);
+ emit log_named_uint("Local slots used", localCount);
+ emit log_named_uint("Country slots used", countryCount);
+
+ // With tier-descent priority, all 16 locals fill first, then 4 country
+ assertEq(localCount, 16, "All 16 locals should be included");
+ assertEq(countryCount, 4, "All 4 country should fill remaining slots");
+ assertEq(totalCount, 20, "Bundle should be full");
+ }
+
+ /**
+ * @notice Scenario B: Country-Heavy Market
+ * Few local players, many country players at higher tiers.
+ * Tests that higher-tier country beats lower-tier local.
+ */
+ function test_scenarioB_countryHighTierDominance() public {
+ FleetIdentity fleet = _deployFleet();
+ uint16 targetAdmin = adminAreas[0];
+
+ // 4 local players at tier 0
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(localPlayers[i]);
+ fleet.registerFleetLocal(_uuid(1000 + i), COUNTRY_US, targetAdmin, 0);
+ }
+
+ // 12 country players at tiers 1-3 (4 per tier)
+ // These are at HIGHER tiers, so they come first in bundle
+ for (uint256 i = 0; i < 12; i++) {
+ vm.prank(countryPlayers[i % NUM_COUNTRY_PLAYERS]);
+ fleet.registerFleetCountry(_uuid(2000 + i), COUNTRY_US, (i / 4) + 1);
+ }
+
+ (uint256 localCount, uint256 countryCount) = _countBundleComposition(fleet, COUNTRY_US, targetAdmin);
+ (, uint256 totalCount) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin);
+
+ emit log_string("=== Scenario B: Country High-Tier Dominance ===");
+ emit log_named_uint("Total bundle size", totalCount);
+ emit log_named_uint("Local slots used", localCount);
+ emit log_named_uint("Country slots used", countryCount);
+
+ // Country at tiers 1-3 comes before locals at tier 0
+ assertEq(countryCount, 12, "All 12 country (higher tiers) included first");
+ assertEq(localCount, 4, "Tier-0 locals fill remaining slots");
+ assertEq(totalCount, 16, "Total should equal all registered fleets");
+ }
+
+ /**
+ * @notice Scenario C: Same-Tier Competition
+ * Locals and country at the same tier.
+ * Tests that locals get priority within the same tier.
+ */
+ function test_scenarioC_sameTierLocalPriority() public {
+ FleetIdentity fleet = _deployFleet();
+ uint16 targetAdmin = adminAreas[0];
+
+ // 4 local at tier 0
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(localPlayers[i]);
+ fleet.registerFleetLocal(_uuid(1000 + i), COUNTRY_US, targetAdmin, 0);
+ }
+
+ // 4 country at tier 0 (same tier)
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(countryPlayers[i]);
+ fleet.registerFleetCountry(_uuid(2000 + i), COUNTRY_US, 0);
+ }
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin);
+
+ emit log_string("=== Scenario C: Same-Tier Local Priority ===");
+ emit log_named_uint("Total bundle size", count);
+
+ // First 4 should be locals (priority within same tier)
+ for (uint256 i = 0; i < 4; i++) {
+ assertEq(uuids[i], _uuid(1000 + i), "Locals should come first");
+ }
+ // Next 4 should be country
+ for (uint256 i = 0; i < 4; i++) {
+ assertEq(uuids[4 + i], _uuid(2000 + i), "Country should follow locals");
+ }
+ }
+
+ /**
+ * @notice Scenario D: Country Whale at High Tier
+ * Single whale registers many high-tier country fleets.
+ * Tests that whale can dominate IF they outbid locals on tier level.
+ */
+ function test_scenarioD_countryWhaleHighTier() public {
+ FleetIdentity fleet = _deployFleet();
+ uint16 targetAdmin = adminAreas[0];
+
+ // 12 locals at tiers 0-2 (4 per tier)
+ for (uint256 i = 0; i < 12; i++) {
+ vm.prank(localPlayers[i]);
+ fleet.registerFleetLocal(_uuid(1000 + i), COUNTRY_US, targetAdmin, i / 4);
+ }
+
+ // Whale registers 8 country fleets at tiers 3-4 (4 per tier due to TIER_CAPACITY)
+ // This is above all locals (tiers 0-2)
+ for (uint256 i = 0; i < 8; i++) {
+ vm.prank(whale);
+ fleet.registerFleetCountry(_uuid(3000 + i), COUNTRY_US, 3 + (i / 4));
+ }
+
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin);
+ (uint256 localCount, uint256 countryCount) = _countBundleComposition(fleet, COUNTRY_US, targetAdmin);
+
+ emit log_string("=== Scenario D: Country Whale at High Tier ===");
+ emit log_named_uint("Total bundle size", count);
+ emit log_named_uint("Local slots", localCount);
+ emit log_named_uint("Country slots", countryCount);
+
+ // Whale's tier-3/4 country fleets come first (highest tiers)
+ // Then locals at tiers 0-2 fill remaining slots
+ assertEq(countryCount, 8, "Whale's 8 high-tier country fleets included");
+ assertEq(localCount, 12, "All 12 locals at lower tiers included");
+ assertEq(count, 20, "Bundle full");
+ }
+
+ /**
+ * @notice Scenario E: Locals Counter Whale by Matching Tier
+ * Shows that locals can economically counter a country whale.
+ */
+ function test_scenarioE_localsCounterWhale() public {
+ FleetIdentity fleet = _deployFleet();
+ uint16 targetAdmin = adminAreas[0];
+
+ // Whale registers 4 country fleets at tier 3
+ // Cost: 4 × (BASE_BOND × 8 × 8) = 4 × 6400 = 25,600 NODL
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(whale);
+ fleet.registerFleetCountry(_uuid(3000 + i), COUNTRY_US, 3);
+ }
+
+ // 4 locals match at tier 3 (same priority, but cheaper!)
+ // Cost: 4 × (BASE_BOND × 8) = 4 × 800 = 3,200 NODL
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(localPlayers[i]);
+ fleet.registerFleetLocal(_uuid(1000 + i), COUNTRY_US, targetAdmin, 3);
+ }
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin);
+
+ emit log_string("=== Scenario E: Locals Counter Whale ===");
+ emit log_named_uint("Total bundle size", count);
+
+ // Locals get priority at tier 3 (same tier, local-first)
+ for (uint256 i = 0; i < 4; i++) {
+ assertEq(uuids[i], _uuid(1000 + i), "Locals come first at same tier");
+ }
+ for (uint256 i = 0; i < 4; i++) {
+ assertEq(uuids[4 + i], _uuid(3000 + i), "Country follows at same tier");
+ }
+
+ // Calculate cost ratio
+ uint256 whaleCost = 4 * fleet.tierBond(3, true); // 25,600 NODL
+ uint256 localCost = 4 * fleet.tierBond(3, false); // 3,200 NODL
+
+ emit log_named_uint("Whale total cost (ether)", whaleCost / 1 ether);
+ emit log_named_uint("Locals total cost (ether)", localCost / 1 ether);
+ emit log_named_uint("Whale overpay factor", whaleCost / localCost);
+
+ assertEq(whaleCost / localCost, 8, "Whale pays 8x more for same tier");
+ }
+
+ // ══════════════════════════════════════════════════════════════════════════════════
+ // Economic Metrics & Analysis
+ // ══════════════════════════════════════════════════════════════════════════════════
+
+ /**
+ * @notice Verify the 8× economic advantage constants.
+ */
+ function test_economicAdvantage_8xMultiplier() public {
+ FleetIdentity fleet = _deployFleet();
+
+ // Verify multiplier
+ assertEq(fleet.COUNTRY_BOND_MULTIPLIER(), 8, "Multiplier should be 8");
+
+ // At every tier, country pays exactly 8× local
+ for (uint256 tier = 0; tier < 6; tier++) {
+ uint256 localBond = fleet.tierBond(tier, false);
+ uint256 countryBond = fleet.tierBond(tier, true);
+ assertEq(countryBond, localBond * 8, "Country should pay 8x at every tier");
+ }
+ }
+
+ /**
+ * @notice Demonstrate that a local at tier N+3 costs the same as country at tier N.
+ */
+ function test_economicAdvantage_localTierEquivalence() public {
+ FleetIdentity fleet = _deployFleet();
+
+ // Local tier 3 = Country tier 0
+ assertEq(
+ fleet.tierBond(3, false),
+ fleet.tierBond(0, true),
+ "Local tier 3 should equal country tier 0"
+ );
+
+ // Local tier 4 = Country tier 1
+ assertEq(
+ fleet.tierBond(4, false),
+ fleet.tierBond(1, true),
+ "Local tier 4 should equal country tier 1"
+ );
+
+ // Local tier 5 = Country tier 2
+ assertEq(
+ fleet.tierBond(5, false),
+ fleet.tierBond(2, true),
+ "Local tier 5 should equal country tier 2"
+ );
+
+ emit log_string("=== Local Tier Equivalence ===");
+ emit log_string("Local tier N+3 costs the same as Country tier N");
+ emit log_string("This gives locals a 3-tier economic advantage");
+ }
+
+ /**
+ * @notice Analyze country registration efficiency across admin areas.
+ */
+ function test_economicAdvantage_multiRegionEfficiency() public {
+ FleetIdentity fleet = _deployFleet();
+
+ // Single country registration covers ALL admin areas
+ uint256 countryBond = fleet.tierBond(0, true); // 800 NODL
+
+ // To cover N admin areas locally, costs N × local_bond
+ uint256 localPerArea = fleet.tierBond(0, false); // 100 NODL
+
+ emit log_string("=== Multi-Region Efficiency Analysis ===");
+ emit log_named_uint("Country tier-0 bond (ether)", countryBond / 1 ether);
+ emit log_named_uint("Local tier-0 bond per area (ether)", localPerArea / 1 ether);
+
+ // Country is MORE efficient when covering > 8 admin areas
+ // Break-even: 8 local registrations = 1 country registration
+ uint256 breakEvenAreas = countryBond / localPerArea;
+ emit log_named_uint("Break-even admin areas", breakEvenAreas);
+
+ assertEq(breakEvenAreas, 8, "Country efficient for 8+ admin areas");
+ }
+
+ /**
+ * @notice Bond escalation analysis showing geometric growth.
+ */
+ function test_bondEscalationAnalysis() public {
+ FleetIdentity fleet = _deployFleet();
+
+ emit log_string("");
+ emit log_string("=== BOND ESCALATION ANALYSIS ===");
+ emit log_string("");
+ emit log_string("Tier | Local Bond (ether) | Country Bond (ether)");
+ emit log_string("-----+--------------------+---------------------");
+
+ for (uint256 tier = 0; tier <= 6; tier++) {
+ uint256 localBond = fleet.tierBond(tier, false);
+ uint256 countryBond = fleet.tierBond(tier, true);
+
+ // Verify geometric progression (2× per tier)
+ if (tier > 0) {
+ assertEq(localBond, fleet.tierBond(tier - 1, false) * 2, "Local should double each tier");
+ assertEq(countryBond, fleet.tierBond(tier - 1, true) * 2, "Country should double each tier");
+ }
+ }
+ }
+
+ // ══════════════════════════════════════════════════════════════════════════════════
+ // Invariant Tests
+ // ══════════════════════════════════════════════════════════════════════════════════
+
+ /**
+ * @notice CRITICAL: Core invariants that must ALWAYS hold.
+ */
+ function test_invariant_coreGuarantees() public {
+ FleetIdentity fleet = _deployFleet();
+
+ // Invariant 1: Country multiplier is exactly 8
+ assertEq(fleet.COUNTRY_BOND_MULTIPLIER(), 8, "INVARIANT: Country multiplier must be 8");
+
+ // Invariant 2: Tier capacity allows fair competition
+ assertEq(fleet.TIER_CAPACITY(), 4, "INVARIANT: Tier capacity must be 4");
+
+ // Invariant 3: Bundle size reasonable for discovery
+ assertEq(fleet.MAX_BONDED_UUID_BUNDLE_SIZE(), 20, "INVARIANT: Bundle size must be 20");
+
+ // Invariant 4: Bond doubles per tier (geometric)
+ for (uint256 t = 1; t <= 5; t++) {
+ assertEq(
+ fleet.tierBond(t, false),
+ fleet.tierBond(t - 1, false) * 2,
+ "INVARIANT: Bond must double per tier"
+ );
+ }
+
+ emit log_string("[PASS] All core invariants verified");
+ }
+
+ /**
+ * @notice Bundle always respects tier-descent priority.
+ */
+ function test_invariant_tierDescentPriority() public {
+ FleetIdentity fleet = _deployFleet();
+ uint16 targetAdmin = adminAreas[0];
+
+ // Mixed setup: locals at tier 1, country at tier 2
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(localPlayers[i]);
+ fleet.registerFleetLocal(_uuid(1000 + i), COUNTRY_US, targetAdmin, 1);
+ }
+ for (uint256 i = 0; i < 4; i++) {
+ vm.prank(countryPlayers[i]);
+ fleet.registerFleetCountry(_uuid(2000 + i), COUNTRY_US, 2);
+ }
+
+ (bytes16[] memory uuids, uint256 count) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin);
+
+ // Tier 2 (country) must come before tier 1 (local) - higher tier wins
+ for (uint256 i = 0; i < 4; i++) {
+ assertEq(uuids[i], _uuid(2000 + i), "INVARIANT: Higher tier must come first");
+ }
+ for (uint256 i = 0; i < 4; i++) {
+ assertEq(uuids[4 + i], _uuid(1000 + i), "Lower tier follows");
+ }
+
+ assertEq(count, 8);
+ }
+
+ // ══════════════════════════════════════════════════════════════════════════════════
+ // Fuzz Tests
+ // ══════════════════════════════════════════════════════════════════════════════════
+
+ /**
+ * @notice Fuzz test to verify bundle properties across random market conditions.
+ */
+ function testFuzz_bundleProperties(uint8 numLocals, uint8 numCountry) public {
+ // Bound inputs to reasonable ranges
+ numLocals = uint8(bound(numLocals, 1, 16));
+ numCountry = uint8(bound(numCountry, 1, 12));
+
+ FleetIdentity fleet = _deployFleet();
+ uint16 targetAdmin = adminAreas[0];
+
+ // Register local players (spread across tiers for variety)
+ for (uint256 i = 0; i < numLocals; i++) {
+ vm.prank(localPlayers[i % NUM_LOCAL_PLAYERS]);
+ fleet.registerFleetLocal(_uuid(8000 + i), COUNTRY_US, targetAdmin, i / 4);
+ }
+
+ // Register country players
+ for (uint256 i = 0; i < numCountry; i++) {
+ vm.prank(countryPlayers[i % NUM_COUNTRY_PLAYERS]);
+ fleet.registerFleetCountry(_uuid(9000 + i), COUNTRY_US, i / 4);
+ }
+
+ // Get bundle
+ (, uint256 count) = fleet.buildHighestBondedUuidBundle(COUNTRY_US, targetAdmin);
+
+ // Properties that must always hold:
+
+ // 1. Bundle never exceeds max size
+ assertLe(count, fleet.MAX_BONDED_UUID_BUNDLE_SIZE(), "Bundle must not exceed max");
+
+ // 2. Bundle includes as many as possible (up to registered count)
+ uint256 totalRegistered = uint256(numLocals) + uint256(numCountry);
+ uint256 expectedMax = totalRegistered < 20 ? totalRegistered : 20;
+ assertEq(count, expectedMax, "Bundle should maximize utilization");
+ }
+
+ /**
+ * @notice Fuzz that 8x multiplier always holds at any tier.
+ */
+ function testFuzz_constantMultiplier(uint8 tier) public {
+ tier = uint8(bound(tier, 0, 20));
+ FleetIdentity fleet = _deployFleet();
+
+ uint256 localBond = fleet.tierBond(tier, false);
+ uint256 countryBond = fleet.tierBond(tier, true);
+
+ assertEq(countryBond, localBond * 8, "8x multiplier must hold at all tiers");
+ }
+}
diff --git a/test/ServiceProvider.t.sol b/test/ServiceProvider.t.sol
new file mode 100644
index 0000000..9672dd1
--- /dev/null
+++ b/test/ServiceProvider.t.sol
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.24;
+
+import "forge-std/Test.sol";
+import "../src/swarms/ServiceProvider.sol";
+
+contract ServiceProviderTest is Test {
+ ServiceProvider provider;
+
+ address alice = address(0xA);
+ address bob = address(0xB);
+
+ string constant URL_1 = "https://backend.swarm.example.com/api/v1";
+ string constant URL_2 = "https://relay.nodle.network:8443";
+ string constant URL_3 = "https://provider.third.io";
+
+ event ProviderRegistered(address indexed owner, string url, uint256 indexed tokenId);
+ event ProviderBurned(address indexed owner, uint256 indexed tokenId);
+
+ function setUp() public {
+ provider = new ServiceProvider();
+ }
+
+ // ==============================
+ // registerProvider
+ // ==============================
+
+ function test_registerProvider_mintsAndStoresURL() public {
+ vm.prank(alice);
+ uint256 tokenId = provider.registerProvider(URL_1);
+
+ assertEq(provider.ownerOf(tokenId), alice);
+ assertEq(keccak256(bytes(provider.providerUrls(tokenId))), keccak256(bytes(URL_1)));
+ }
+
+ function test_registerProvider_deterministicTokenId() public {
+ vm.prank(alice);
+ uint256 tokenId = provider.registerProvider(URL_1);
+
+ assertEq(tokenId, uint256(keccak256(bytes(URL_1))));
+ }
+
+ function test_registerProvider_emitsEvent() public {
+ uint256 expectedTokenId = uint256(keccak256(bytes(URL_1)));
+
+ vm.expectEmit(true, true, true, true);
+ emit ProviderRegistered(alice, URL_1, expectedTokenId);
+
+ vm.prank(alice);
+ provider.registerProvider(URL_1);
+ }
+
+ function test_registerProvider_multipleProviders() public {
+ vm.prank(alice);
+ uint256 id1 = provider.registerProvider(URL_1);
+
+ vm.prank(bob);
+ uint256 id2 = provider.registerProvider(URL_2);
+
+ assertEq(provider.ownerOf(id1), alice);
+ assertEq(provider.ownerOf(id2), bob);
+ assertTrue(id1 != id2);
+ }
+
+ function test_RevertIf_registerProvider_emptyURL() public {
+ vm.prank(alice);
+ vm.expectRevert(ServiceProvider.EmptyURL.selector);
+ provider.registerProvider("");
+ }
+
+ function test_RevertIf_registerProvider_duplicateURL() public {
+ vm.prank(alice);
+ provider.registerProvider(URL_1);
+
+ vm.prank(bob);
+ vm.expectRevert(); // ERC721: token already minted
+ provider.registerProvider(URL_1);
+ }
+
+ // ==============================
+ // burn
+ // ==============================
+
+ function test_burn_deletesURLAndToken() public {
+ vm.prank(alice);
+ uint256 tokenId = provider.registerProvider(URL_1);
+
+ vm.prank(alice);
+ provider.burn(tokenId);
+
+ // URL mapping cleared
+ assertEq(bytes(provider.providerUrls(tokenId)).length, 0);
+
+ // Token no longer exists
+ vm.expectRevert(); // ownerOf reverts for non-existent token
+ provider.ownerOf(tokenId);
+ }
+
+ function test_burn_emitsEvent() public {
+ vm.prank(alice);
+ uint256 tokenId = provider.registerProvider(URL_1);
+
+ vm.expectEmit(true, true, true, true);
+ emit ProviderBurned(alice, tokenId);
+
+ vm.prank(alice);
+ provider.burn(tokenId);
+ }
+
+ function test_RevertIf_burn_notOwner() public {
+ vm.prank(alice);
+ uint256 tokenId = provider.registerProvider(URL_1);
+
+ vm.prank(bob);
+ vm.expectRevert(ServiceProvider.NotTokenOwner.selector);
+ provider.burn(tokenId);
+ }
+
+ function test_burn_allowsReregistration() public {
+ vm.prank(alice);
+ uint256 tokenId = provider.registerProvider(URL_1);
+
+ vm.prank(alice);
+ provider.burn(tokenId);
+
+ // Same URL can now be registered by someone else
+ vm.prank(bob);
+ uint256 newTokenId = provider.registerProvider(URL_1);
+
+ assertEq(newTokenId, tokenId); // Same deterministic ID
+ assertEq(provider.ownerOf(newTokenId), bob);
+ }
+
+ // ==============================
+ // Fuzz Tests
+ // ==============================
+
+ function testFuzz_registerProvider_anyValidURL(string calldata url) public {
+ vm.assume(bytes(url).length > 0);
+
+ vm.prank(alice);
+ uint256 tokenId = provider.registerProvider(url);
+
+ assertEq(tokenId, uint256(keccak256(bytes(url))));
+ assertEq(provider.ownerOf(tokenId), alice);
+ }
+
+ function testFuzz_burn_onlyOwner(address caller) public {
+ vm.assume(caller != alice);
+ vm.assume(caller != address(0));
+
+ vm.prank(alice);
+ uint256 tokenId = provider.registerProvider(URL_1);
+
+ vm.prank(caller);
+ vm.expectRevert(ServiceProvider.NotTokenOwner.selector);
+ provider.burn(tokenId);
+ }
+}
diff --git a/test/SwarmRegistryL1.t.sol b/test/SwarmRegistryL1.t.sol
new file mode 100644
index 0000000..41ab53e
--- /dev/null
+++ b/test/SwarmRegistryL1.t.sol
@@ -0,0 +1,1026 @@
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.24;
+
+import "forge-std/Test.sol";
+import "../src/swarms/SwarmRegistryL1.sol";
+import "../src/swarms/FleetIdentity.sol";
+import "../src/swarms/ServiceProvider.sol";
+import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol";
+
+contract MockBondTokenL1 is ERC20 {
+ constructor() ERC20("Mock Bond", "MBOND") {}
+
+ function mint(address to, uint256 amount) external {
+ _mint(to, amount);
+ }
+}
+
+contract SwarmRegistryL1Test is Test {
+ SwarmRegistryL1 swarmRegistry;
+ FleetIdentity fleetContract;
+ ServiceProvider providerContract;
+ MockBondTokenL1 bondToken;
+
+ address fleetOwner = address(0x1);
+ address providerOwner = address(0x2);
+ address caller = address(0x3);
+
+ uint256 constant FLEET_BOND = 100 ether;
+
+ // Region constants for fleet registration
+ uint16 constant US = 840;
+ uint16 constant ADMIN_CA = 6; // California
+
+ event SwarmRegistered(uint256 indexed swarmId, uint256 indexed fleetId, uint256 indexed providerId, address owner);
+ event SwarmStatusChanged(uint256 indexed swarmId, SwarmRegistryL1.SwarmStatus status);
+ event SwarmFilterUpdated(uint256 indexed swarmId, address indexed owner, uint32 filterSize);
+ event SwarmProviderUpdated(uint256 indexed swarmId, uint256 indexed oldProvider, uint256 indexed newProvider);
+ event SwarmDeleted(uint256 indexed swarmId, uint256 indexed fleetId, address indexed owner);
+ event SwarmPurged(uint256 indexed swarmId, uint256 indexed fleetId, address indexed purgedBy);
+
+ function setUp() public {
+ bondToken = new MockBondTokenL1();
+ fleetContract = new FleetIdentity(address(bondToken), FLEET_BOND);
+ providerContract = new ServiceProvider();
+ swarmRegistry = new SwarmRegistryL1(address(fleetContract), address(providerContract));
+
+ // Fund fleet owner and approve
+ bondToken.mint(fleetOwner, 1_000_000 ether);
+ vm.prank(fleetOwner);
+ bondToken.approve(address(fleetContract), type(uint256).max);
+ }
+
+ // ==============================
+ // Helpers
+ // ==============================
+
+ function _registerFleet(address owner, bytes memory seed) internal returns (uint256) {
+ vm.prank(owner);
+ return fleetContract.registerFleetLocal(bytes16(keccak256(seed)), US, ADMIN_CA, 0);
+ }
+
+ function _registerProvider(address owner, string memory url) internal returns (uint256) {
+ vm.prank(owner);
+ return providerContract.registerProvider(url);
+ }
+
+ function _registerSwarm(
+ address owner,
+ uint256 fleetId,
+ uint256 providerId,
+ bytes memory filter,
+ uint8 fpSize,
+ SwarmRegistryL1.TagType tagType
+ ) internal returns (uint256) {
+ vm.prank(owner);
+ return swarmRegistry.registerSwarm(fleetId, providerId, filter, fpSize, tagType);
+ }
+
+ function getExpectedValues(bytes memory tagId, uint256 m, uint8 fpSize)
+ public
+ pure
+ returns (uint32 h1, uint32 h2, uint32 h3, uint256 fp)
+ {
+ bytes32 h = keccak256(tagId);
+ h1 = uint32(uint256(h)) % uint32(m);
+ h2 = uint32(uint256(h) >> 32) % uint32(m);
+ h3 = uint32(uint256(h) >> 64) % uint32(m);
+ uint256 fpMask = (1 << fpSize) - 1;
+ fp = (uint256(h) >> 96) & fpMask;
+ }
+
+ function _write16Bit(bytes memory data, uint256 slotIndex, uint16 value) internal pure {
+ uint256 bitOffset = slotIndex * 16;
+ uint256 byteOffset = bitOffset / 8;
+ data[byteOffset] = bytes1(uint8(value >> 8));
+ data[byteOffset + 1] = bytes1(uint8(value));
+ }
+
+ function _write8Bit(bytes memory data, uint256 slotIndex, uint8 value) internal pure {
+ data[slotIndex] = bytes1(value);
+ }
+
+ // ==============================
+ // Constructor
+ // ==============================
+
+ function test_constructor_setsImmutables() public view {
+ assertEq(address(swarmRegistry.FLEET_CONTRACT()), address(fleetContract));
+ assertEq(address(swarmRegistry.PROVIDER_CONTRACT()), address(providerContract));
+ }
+
+ function test_RevertIf_constructor_zeroFleetAddress() public {
+ vm.expectRevert(SwarmRegistryL1.InvalidSwarmData.selector);
+ new SwarmRegistryL1(address(0), address(providerContract));
+ }
+
+ function test_RevertIf_constructor_zeroProviderAddress() public {
+ vm.expectRevert(SwarmRegistryL1.InvalidSwarmData.selector);
+ new SwarmRegistryL1(address(fleetContract), address(0));
+ }
+
+ function test_RevertIf_constructor_bothZero() public {
+ vm.expectRevert(SwarmRegistryL1.InvalidSwarmData.selector);
+ new SwarmRegistryL1(address(0), address(0));
+ }
+
+ // ==============================
+ // registerSwarm — happy path
+ // ==============================
+
+ function test_registerSwarm_basicFlow() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "my-fleet");
+ uint256 providerId = _registerProvider(providerOwner, "https://api.example.com");
+
+ uint256 swarmId = _registerSwarm(
+ fleetOwner, fleetId, providerId, new bytes(100), 16, SwarmRegistryL1.TagType.IBEACON_INCLUDES_MAC
+ );
+
+ // Swarm ID is deterministic hash of (fleetId, providerId, filter)
+ uint256 expectedId = swarmRegistry.computeSwarmId(fleetId, providerId, new bytes(100));
+ assertEq(swarmId, expectedId);
+ }
+
+ function test_registerSwarm_storesMetadataCorrectly() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.VENDOR_ID);
+
+ (
+ uint256 storedFleetId,
+ uint256 storedProviderId,
+ address filterPointer,
+ uint8 storedFpSize,
+ SwarmRegistryL1.TagType storedTagType,
+ SwarmRegistryL1.SwarmStatus storedStatus
+ ) = swarmRegistry.swarms(swarmId);
+
+ assertEq(storedFleetId, fleetId);
+ assertEq(storedProviderId, providerId);
+ assertTrue(filterPointer != address(0));
+ assertEq(storedFpSize, 8);
+ assertEq(uint8(storedTagType), uint8(SwarmRegistryL1.TagType.VENDOR_ID));
+ assertEq(uint8(storedStatus), uint8(SwarmRegistryL1.SwarmStatus.REGISTERED));
+ }
+
+ function test_registerSwarm_deterministicId() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ bytes memory filter = new bytes(32);
+
+ uint256 expectedId = swarmRegistry.computeSwarmId(fleetId, providerId, filter);
+
+ uint256 swarmId = _registerSwarm(fleetOwner, fleetId, providerId, filter, 8, SwarmRegistryL1.TagType.GENERIC);
+ assertEq(swarmId, expectedId);
+ }
+
+ function test_RevertIf_registerSwarm_duplicateSwarm() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(32), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryL1.SwarmAlreadyExists.selector);
+ swarmRegistry.registerSwarm(fleetId, providerId, new bytes(32), 8, SwarmRegistryL1.TagType.GENERIC);
+ }
+
+ function test_registerSwarm_emitsSwarmRegistered() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ bytes memory filter = new bytes(50);
+ uint256 expectedId = swarmRegistry.computeSwarmId(fleetId, providerId, filter);
+
+ vm.expectEmit(true, true, true, true);
+ emit SwarmRegistered(expectedId, fleetId, providerId, fleetOwner);
+
+ _registerSwarm(fleetOwner, fleetId, providerId, filter, 16, SwarmRegistryL1.TagType.GENERIC);
+ }
+
+ function test_registerSwarm_linksFleetSwarms() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId1 = _registerProvider(providerOwner, "url1");
+ uint256 providerId2 = _registerProvider(providerOwner, "url2");
+
+ uint256 swarmId1 =
+ _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+ uint256 swarmId2 =
+ _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 0), swarmId1);
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 1), swarmId2);
+ }
+
+ function test_registerSwarm_allTagTypes() public {
+ uint256 fleetId1 = _registerFleet(fleetOwner, "f1");
+ uint256 fleetId2 = _registerFleet(fleetOwner, "f2");
+ uint256 fleetId3 = _registerFleet(fleetOwner, "f3");
+ uint256 fleetId4 = _registerFleet(fleetOwner, "f4");
+ uint256 providerId = _registerProvider(providerOwner, "url");
+
+ uint256 s1 = _registerSwarm(
+ fleetOwner, fleetId1, providerId, new bytes(32), 8, SwarmRegistryL1.TagType.IBEACON_PAYLOAD_ONLY
+ );
+ uint256 s2 = _registerSwarm(
+ fleetOwner, fleetId2, providerId, new bytes(32), 8, SwarmRegistryL1.TagType.IBEACON_INCLUDES_MAC
+ );
+ uint256 s3 =
+ _registerSwarm(fleetOwner, fleetId3, providerId, new bytes(32), 8, SwarmRegistryL1.TagType.VENDOR_ID);
+ uint256 s4 = _registerSwarm(fleetOwner, fleetId4, providerId, new bytes(32), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ (,,,, SwarmRegistryL1.TagType t1,) = swarmRegistry.swarms(s1);
+ (,,,, SwarmRegistryL1.TagType t2,) = swarmRegistry.swarms(s2);
+ (,,,, SwarmRegistryL1.TagType t3,) = swarmRegistry.swarms(s3);
+ (,,,, SwarmRegistryL1.TagType t4,) = swarmRegistry.swarms(s4);
+
+ assertEq(uint8(t1), uint8(SwarmRegistryL1.TagType.IBEACON_PAYLOAD_ONLY));
+ assertEq(uint8(t2), uint8(SwarmRegistryL1.TagType.IBEACON_INCLUDES_MAC));
+ assertEq(uint8(t3), uint8(SwarmRegistryL1.TagType.VENDOR_ID));
+ assertEq(uint8(t4), uint8(SwarmRegistryL1.TagType.GENERIC));
+ }
+
+ // ==============================
+ // registerSwarm — reverts
+ // ==============================
+
+ function test_RevertIf_registerSwarm_notFleetOwner() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "my-fleet");
+
+ vm.prank(caller);
+ vm.expectRevert(SwarmRegistryL1.NotFleetOwner.selector);
+ swarmRegistry.registerSwarm(fleetId, 1, new bytes(10), 16, SwarmRegistryL1.TagType.GENERIC);
+ }
+
+ function test_RevertIf_registerSwarm_fingerprintSizeZero() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryL1.InvalidFingerprintSize.selector);
+ swarmRegistry.registerSwarm(fleetId, providerId, new bytes(32), 0, SwarmRegistryL1.TagType.GENERIC);
+ }
+
+ function test_RevertIf_registerSwarm_fingerprintSizeExceedsMax() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryL1.InvalidFingerprintSize.selector);
+ swarmRegistry.registerSwarm(fleetId, providerId, new bytes(32), 17, SwarmRegistryL1.TagType.GENERIC);
+ }
+
+ function test_RevertIf_registerSwarm_emptyFilter() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryL1.InvalidFilterSize.selector);
+ swarmRegistry.registerSwarm(fleetId, providerId, new bytes(0), 8, SwarmRegistryL1.TagType.GENERIC);
+ }
+
+ function test_RevertIf_registerSwarm_filterTooLarge() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryL1.InvalidFilterSize.selector);
+ swarmRegistry.registerSwarm(fleetId, providerId, new bytes(24577), 8, SwarmRegistryL1.TagType.GENERIC);
+ }
+
+ function test_registerSwarm_maxFingerprintSize() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ // fpSize=16 is MAX_FINGERPRINT_SIZE, should succeed
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(100), 16, SwarmRegistryL1.TagType.GENERIC);
+ assertTrue(swarmId != 0);
+ }
+
+ function test_registerSwarm_maxFilterSize() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ // Exactly 24576 bytes should succeed
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(24576), 8, SwarmRegistryL1.TagType.GENERIC);
+ assertTrue(swarmId != 0);
+ }
+
+ // ==============================
+ // acceptSwarm / rejectSwarm
+ // ==============================
+
+ function test_acceptSwarm_setsStatusAndEmits() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.expectEmit(true, true, true, true);
+ emit SwarmStatusChanged(swarmId, SwarmRegistryL1.SwarmStatus.ACCEPTED);
+
+ vm.prank(providerOwner);
+ swarmRegistry.acceptSwarm(swarmId);
+
+ (,,,,, SwarmRegistryL1.SwarmStatus status) = swarmRegistry.swarms(swarmId);
+ assertEq(uint8(status), uint8(SwarmRegistryL1.SwarmStatus.ACCEPTED));
+ }
+
+ function test_rejectSwarm_setsStatusAndEmits() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.expectEmit(true, true, true, true);
+ emit SwarmStatusChanged(swarmId, SwarmRegistryL1.SwarmStatus.REJECTED);
+
+ vm.prank(providerOwner);
+ swarmRegistry.rejectSwarm(swarmId);
+
+ (,,,,, SwarmRegistryL1.SwarmStatus status) = swarmRegistry.swarms(swarmId);
+ assertEq(uint8(status), uint8(SwarmRegistryL1.SwarmStatus.REJECTED));
+ }
+
+ function test_RevertIf_acceptSwarm_notProviderOwner() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.prank(caller);
+ vm.expectRevert(SwarmRegistryL1.NotProviderOwner.selector);
+ swarmRegistry.acceptSwarm(swarmId);
+ }
+
+ function test_RevertIf_rejectSwarm_notProviderOwner() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.prank(fleetOwner); // fleet owner != provider owner
+ vm.expectRevert(SwarmRegistryL1.NotProviderOwner.selector);
+ swarmRegistry.rejectSwarm(swarmId);
+ }
+
+ function test_acceptSwarm_afterReject() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.prank(providerOwner);
+ swarmRegistry.rejectSwarm(swarmId);
+
+ // Provider changes mind
+ vm.prank(providerOwner);
+ swarmRegistry.acceptSwarm(swarmId);
+
+ (,,,,, SwarmRegistryL1.SwarmStatus status) = swarmRegistry.swarms(swarmId);
+ assertEq(uint8(status), uint8(SwarmRegistryL1.SwarmStatus.ACCEPTED));
+ }
+
+ // ==============================
+ // checkMembership — XOR logic
+ // ==============================
+
+ function test_checkMembership_XORLogic16Bit() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "u1");
+
+ bytes memory tagId = hex"1122334455";
+ uint8 fpSize = 16;
+ uint256 dataLen = 100;
+ uint256 m = (dataLen * 8) / fpSize; // 50 slots
+
+ (uint32 h1, uint32 h2, uint32 h3, uint256 expectedFp) = getExpectedValues(tagId, m, fpSize);
+
+ // Skip if collision (extremely unlikely with 50 slots)
+ if (h1 == h2 || h1 == h3 || h2 == h3) {
+ return;
+ }
+
+ bytes memory filter = new bytes(dataLen);
+ _write16Bit(filter, h1, uint16(expectedFp));
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, filter, fpSize, SwarmRegistryL1.TagType.GENERIC);
+
+ // Positive check
+ assertTrue(swarmRegistry.checkMembership(swarmId, keccak256(tagId)), "Valid tag should pass");
+
+ // Negative check
+ assertFalse(swarmRegistry.checkMembership(swarmId, keccak256(hex"999999")), "Invalid tag should fail");
+ }
+
+ function test_checkMembership_XORLogic8Bit() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "u1");
+
+ bytes memory tagId = hex"AABBCCDD";
+ uint8 fpSize = 8;
+ // SSTORE2 prepends 0x00 STOP byte, so on-chain:
+ // extcodesize = rawLen + 1, dataLen = extcodesize - 1 = rawLen
+ // But SSTORE2.read offsets reads by +1 (skips STOP byte), so
+ // the data bytes read on-chain map 1:1 to the bytes we pass in.
+ // Therefore m = (rawLen * 8) / fpSize and slot indices match directly.
+ uint256 rawLen = 80;
+ uint256 m = (rawLen * 8) / fpSize; // 80
+
+ (uint32 h1, uint32 h2, uint32 h3, uint256 expectedFp) = getExpectedValues(tagId, m, fpSize);
+
+ if (h1 == h2 || h1 == h3 || h2 == h3) {
+ return;
+ }
+
+ bytes memory filter = new bytes(rawLen);
+ _write8Bit(filter, h1, uint8(expectedFp));
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, filter, fpSize, SwarmRegistryL1.TagType.GENERIC);
+
+ assertTrue(swarmRegistry.checkMembership(swarmId, keccak256(tagId)), "8-bit valid tag should pass");
+ assertFalse(swarmRegistry.checkMembership(swarmId, keccak256(hex"FFFFFF")), "8-bit invalid tag should fail");
+ }
+
+ function test_RevertIf_checkMembership_swarmNotFound() public {
+ vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector);
+ swarmRegistry.checkMembership(999, keccak256("anything"));
+ }
+
+ function test_checkMembership_allZeroFilter_returnsConsistent() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "u1");
+
+ // All-zero filter: f1^f2^f3 = 0^0^0 = 0
+ // Only matches if expectedFp is also 0
+ bytes memory filter = new bytes(64);
+ uint256 swarmId = _registerSwarm(fleetOwner, fleetId, providerId, filter, 16, SwarmRegistryL1.TagType.GENERIC);
+
+ // Some tags will match (those with expectedFp=0), most won't
+ // The point is it doesn't revert
+ swarmRegistry.checkMembership(swarmId, keccak256("test1"));
+ swarmRegistry.checkMembership(swarmId, keccak256("test2"));
+ }
+
+ // ==============================
+ // Multiple swarms per fleet
+ // ==============================
+
+ function test_multipleSwarms_sameFleet() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId1 = _registerProvider(providerOwner, "url1");
+ uint256 providerId2 = _registerProvider(providerOwner, "url2");
+ uint256 providerId3 = _registerProvider(providerOwner, "url3");
+
+ uint256 s1 = _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(32), 8, SwarmRegistryL1.TagType.GENERIC);
+ uint256 s2 =
+ _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(64), 16, SwarmRegistryL1.TagType.VENDOR_ID);
+ uint256 s3 = _registerSwarm(
+ fleetOwner, fleetId, providerId3, new bytes(50), 12, SwarmRegistryL1.TagType.IBEACON_PAYLOAD_ONLY
+ );
+
+ // IDs are distinct hashes
+ assertTrue(s1 != s2 && s2 != s3 && s1 != s3);
+
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 0), s1);
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 1), s2);
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 2), s3);
+ }
+
+ // ==============================
+ // Constants
+ // ==============================
+
+ function test_constants() public view {
+ assertEq(swarmRegistry.MAX_FINGERPRINT_SIZE(), 16);
+ }
+
+ // ==============================
+ // Fuzz
+ // ==============================
+
+ function testFuzz_registerSwarm_validFingerprintSizes(uint8 fpSize) public {
+ fpSize = uint8(bound(fpSize, 1, 16));
+
+ uint256 fleetId = _registerFleet(fleetOwner, abi.encodePacked("fleet-", fpSize));
+ uint256 providerId = _registerProvider(providerOwner, string(abi.encodePacked("url-", fpSize)));
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(64), fpSize, SwarmRegistryL1.TagType.GENERIC);
+
+ (,,, uint8 storedFp,,) = swarmRegistry.swarms(swarmId);
+ assertEq(storedFp, fpSize);
+ }
+
+ function testFuzz_registerSwarm_invalidFingerprintSizes(uint8 fpSize) public {
+ vm.assume(fpSize == 0 || fpSize > 16);
+
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryL1.InvalidFingerprintSize.selector);
+ swarmRegistry.registerSwarm(fleetId, providerId, new bytes(32), fpSize, SwarmRegistryL1.TagType.GENERIC);
+ }
+
+ // ==============================
+ // updateSwarmFilter
+ // ==============================
+
+ function test_updateSwarmFilter_updatesFilterAndResetsStatus() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Provider accepts
+ vm.prank(providerOwner);
+ swarmRegistry.acceptSwarm(swarmId);
+
+ // Fleet owner updates filter
+ bytes memory newFilter = new bytes(100);
+ vm.expectEmit(true, true, true, true);
+ emit SwarmFilterUpdated(swarmId, fleetOwner, 100);
+
+ vm.prank(fleetOwner);
+ swarmRegistry.updateSwarmFilter(swarmId, newFilter);
+
+ // Status should be reset to REGISTERED
+ (,,,,, SwarmRegistryL1.SwarmStatus status) = swarmRegistry.swarms(swarmId);
+ assertEq(uint8(status), uint8(SwarmRegistryL1.SwarmStatus.REGISTERED));
+ }
+
+ function test_updateSwarmFilter_changesFilterPointer() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ (,, address oldPointer,,,) = swarmRegistry.swarms(swarmId);
+
+ bytes memory newFilter = new bytes(100);
+ vm.prank(fleetOwner);
+ swarmRegistry.updateSwarmFilter(swarmId, newFilter);
+
+ (,, address newPointer,,,) = swarmRegistry.swarms(swarmId);
+ assertTrue(newPointer != oldPointer);
+ assertTrue(newPointer != address(0));
+ }
+
+ function test_RevertIf_updateSwarmFilter_swarmNotFound() public {
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector);
+ swarmRegistry.updateSwarmFilter(999, new bytes(50));
+ }
+
+ function test_RevertIf_updateSwarmFilter_notFleetOwner() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.prank(caller);
+ vm.expectRevert(SwarmRegistryL1.NotFleetOwner.selector);
+ swarmRegistry.updateSwarmFilter(swarmId, new bytes(100));
+ }
+
+ function test_RevertIf_updateSwarmFilter_emptyFilter() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryL1.InvalidFilterSize.selector);
+ swarmRegistry.updateSwarmFilter(swarmId, new bytes(0));
+ }
+
+ function test_RevertIf_updateSwarmFilter_filterTooLarge() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryL1.InvalidFilterSize.selector);
+ swarmRegistry.updateSwarmFilter(swarmId, new bytes(24577));
+ }
+
+ // ==============================
+ // updateSwarmProvider
+ // ==============================
+
+ function test_updateSwarmProvider_updatesProviderAndResetsStatus() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId1 = _registerProvider(providerOwner, "url1");
+ uint256 providerId2 = _registerProvider(providerOwner, "url2");
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Provider accepts
+ vm.prank(providerOwner);
+ swarmRegistry.acceptSwarm(swarmId);
+
+ // Fleet owner updates provider
+ vm.expectEmit(true, true, true, true);
+ emit SwarmProviderUpdated(swarmId, providerId1, providerId2);
+
+ vm.prank(fleetOwner);
+ swarmRegistry.updateSwarmProvider(swarmId, providerId2);
+
+ // Check new provider and status reset
+ (, uint256 newProviderId,,,, SwarmRegistryL1.SwarmStatus status) = swarmRegistry.swarms(swarmId);
+ assertEq(newProviderId, providerId2);
+ assertEq(uint8(status), uint8(SwarmRegistryL1.SwarmStatus.REGISTERED));
+ }
+
+ function test_RevertIf_updateSwarmProvider_swarmNotFound() public {
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector);
+ swarmRegistry.updateSwarmProvider(999, providerId);
+ }
+
+ function test_RevertIf_updateSwarmProvider_notFleetOwner() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId1 = _registerProvider(providerOwner, "url1");
+ uint256 providerId2 = _registerProvider(providerOwner, "url2");
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.prank(caller);
+ vm.expectRevert(SwarmRegistryL1.NotFleetOwner.selector);
+ swarmRegistry.updateSwarmProvider(swarmId, providerId2);
+ }
+
+ function test_RevertIf_updateSwarmProvider_providerDoesNotExist() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ // ERC721 reverts before our custom error is reached
+ vm.expectRevert();
+ swarmRegistry.updateSwarmProvider(swarmId, 99999);
+ }
+
+ // ==============================
+ // deleteSwarm
+ // ==============================
+
+ function test_deleteSwarm_removesSwarmAndEmits() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.expectEmit(true, true, true, true);
+ emit SwarmDeleted(swarmId, fleetId, fleetOwner);
+
+ vm.prank(fleetOwner);
+ swarmRegistry.deleteSwarm(swarmId);
+
+ // Swarm should be zeroed
+ (,, address pointer,,,) = swarmRegistry.swarms(swarmId);
+ assertEq(pointer, address(0));
+ }
+
+ function test_deleteSwarm_removesFromFleetSwarms() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId1 = _registerProvider(providerOwner, "url1");
+ uint256 providerId2 = _registerProvider(providerOwner, "url2");
+
+ uint256 swarm1 =
+ _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+ uint256 swarm2 =
+ _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Delete first swarm
+ vm.prank(fleetOwner);
+ swarmRegistry.deleteSwarm(swarm1);
+
+ // Only swarm2 should remain in fleetSwarms
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 0), swarm2);
+ vm.expectRevert();
+ swarmRegistry.fleetSwarms(fleetId, 1); // Should be out of bounds
+ }
+
+ function test_deleteSwarm_swapAndPop() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId1 = _registerProvider(providerOwner, "url1");
+ uint256 providerId2 = _registerProvider(providerOwner, "url2");
+ uint256 providerId3 = _registerProvider(providerOwner, "url3");
+
+ uint256 swarm1 =
+ _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+ uint256 swarm2 =
+ _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+ uint256 swarm3 =
+ _registerSwarm(fleetOwner, fleetId, providerId3, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Delete middle swarm
+ vm.prank(fleetOwner);
+ swarmRegistry.deleteSwarm(swarm2);
+
+ // swarm3 should be swapped to index 1
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 0), swarm1);
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 1), swarm3);
+ vm.expectRevert();
+ swarmRegistry.fleetSwarms(fleetId, 2); // Should be out of bounds
+ }
+
+ function test_RevertIf_deleteSwarm_swarmNotFound() public {
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector);
+ swarmRegistry.deleteSwarm(999);
+ }
+
+ function test_RevertIf_deleteSwarm_notFleetOwner() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.prank(caller);
+ vm.expectRevert(SwarmRegistryL1.NotFleetOwner.selector);
+ swarmRegistry.deleteSwarm(swarmId);
+ }
+
+ function test_deleteSwarm_afterUpdate() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Update then delete
+ vm.prank(fleetOwner);
+ swarmRegistry.updateSwarmFilter(swarmId, new bytes(100));
+
+ vm.prank(fleetOwner);
+ swarmRegistry.deleteSwarm(swarmId);
+
+ (,, address pointer,,,) = swarmRegistry.swarms(swarmId);
+ assertEq(pointer, address(0));
+ }
+
+ function test_deleteSwarm_updatesSwarmIndexInFleet() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 p1 = _registerProvider(providerOwner, "url1");
+ uint256 p2 = _registerProvider(providerOwner, "url2");
+ uint256 p3 = _registerProvider(providerOwner, "url3");
+
+ uint256 s1 = _registerSwarm(fleetOwner, fleetId, p1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+ uint256 s2 = _registerSwarm(fleetOwner, fleetId, p2, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+ uint256 s3 = _registerSwarm(fleetOwner, fleetId, p3, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Verify initial indices
+ assertEq(swarmRegistry.swarmIndexInFleet(s1), 0);
+ assertEq(swarmRegistry.swarmIndexInFleet(s2), 1);
+ assertEq(swarmRegistry.swarmIndexInFleet(s3), 2);
+
+ // Delete s1 — s3 should be swapped to index 0
+ vm.prank(fleetOwner);
+ swarmRegistry.deleteSwarm(s1);
+
+ assertEq(swarmRegistry.swarmIndexInFleet(s3), 0);
+ assertEq(swarmRegistry.swarmIndexInFleet(s2), 1);
+ assertEq(swarmRegistry.swarmIndexInFleet(s1), 0); // deleted, reset to 0
+ }
+
+ // ==============================
+ // isSwarmValid
+ // ==============================
+
+ function test_isSwarmValid_bothValid() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId);
+ assertTrue(fleetValid);
+ assertTrue(providerValid);
+ }
+
+ function test_isSwarmValid_providerBurned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Burn provider
+ vm.prank(providerOwner);
+ providerContract.burn(providerId);
+
+ (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId);
+ assertTrue(fleetValid);
+ assertFalse(providerValid);
+ }
+
+ function test_isSwarmValid_fleetBurned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Burn fleet
+ vm.prank(fleetOwner);
+ fleetContract.burn(fleetId);
+
+ (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId);
+ assertFalse(fleetValid);
+ assertTrue(providerValid);
+ }
+
+ function test_isSwarmValid_bothBurned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ fleetContract.burn(fleetId);
+ vm.prank(providerOwner);
+ providerContract.burn(providerId);
+
+ (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId);
+ assertFalse(fleetValid);
+ assertFalse(providerValid);
+ }
+
+ function test_RevertIf_isSwarmValid_swarmNotFound() public {
+ vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector);
+ swarmRegistry.isSwarmValid(999);
+ }
+
+ // ==============================
+ // purgeOrphanedSwarm
+ // ==============================
+
+ function test_purgeOrphanedSwarm_providerBurned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Burn provider
+ vm.prank(providerOwner);
+ providerContract.burn(providerId);
+
+ // Anyone can purge
+ vm.expectEmit(true, true, true, true);
+ emit SwarmPurged(swarmId, fleetId, caller);
+
+ vm.prank(caller);
+ swarmRegistry.purgeOrphanedSwarm(swarmId);
+
+ // Swarm should be zeroed
+ (,, address pointer,,,) = swarmRegistry.swarms(swarmId);
+ assertEq(pointer, address(0));
+ }
+
+ function test_purgeOrphanedSwarm_fleetBurned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Burn fleet
+ vm.prank(fleetOwner);
+ fleetContract.burn(fleetId);
+
+ vm.prank(caller);
+ swarmRegistry.purgeOrphanedSwarm(swarmId);
+
+ (,, address pointer,,,) = swarmRegistry.swarms(swarmId);
+ assertEq(pointer, address(0));
+ }
+
+ function test_purgeOrphanedSwarm_removesFromFleetSwarms() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 p1 = _registerProvider(providerOwner, "url1");
+ uint256 p2 = _registerProvider(providerOwner, "url2");
+
+ uint256 s1 = _registerSwarm(fleetOwner, fleetId, p1, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+ uint256 s2 = _registerSwarm(fleetOwner, fleetId, p2, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Burn provider of s1
+ vm.prank(providerOwner);
+ providerContract.burn(p1);
+
+ vm.prank(caller);
+ swarmRegistry.purgeOrphanedSwarm(s1);
+
+ // s2 should be swapped to index 0
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 0), s2);
+ vm.expectRevert();
+ swarmRegistry.fleetSwarms(fleetId, 1);
+ }
+
+ function test_RevertIf_purgeOrphanedSwarm_swarmNotFound() public {
+ vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector);
+ swarmRegistry.purgeOrphanedSwarm(999);
+ }
+
+ function test_RevertIf_purgeOrphanedSwarm_swarmNotOrphaned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.expectRevert(SwarmRegistryL1.SwarmNotOrphaned.selector);
+ swarmRegistry.purgeOrphanedSwarm(swarmId);
+ }
+
+ // ==============================
+ // Orphan guards on accept/reject/checkMembership
+ // ==============================
+
+ function test_RevertIf_acceptSwarm_orphaned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Burn provider
+ vm.prank(providerOwner);
+ providerContract.burn(providerId);
+
+ vm.prank(providerOwner);
+ vm.expectRevert(SwarmRegistryL1.SwarmOrphaned.selector);
+ swarmRegistry.acceptSwarm(swarmId);
+ }
+
+ function test_RevertIf_rejectSwarm_orphaned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Burn fleet
+ vm.prank(fleetOwner);
+ fleetContract.burn(fleetId);
+
+ vm.prank(providerOwner);
+ vm.expectRevert(SwarmRegistryL1.SwarmOrphaned.selector);
+ swarmRegistry.rejectSwarm(swarmId);
+ }
+
+ function test_RevertIf_checkMembership_orphaned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ // Burn provider
+ vm.prank(providerOwner);
+ providerContract.burn(providerId);
+
+ vm.expectRevert(SwarmRegistryL1.SwarmOrphaned.selector);
+ swarmRegistry.checkMembership(swarmId, keccak256("test"));
+ }
+
+ function test_RevertIf_acceptSwarm_fleetBurned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ fleetContract.burn(fleetId);
+
+ vm.prank(providerOwner);
+ vm.expectRevert(SwarmRegistryL1.SwarmOrphaned.selector);
+ swarmRegistry.acceptSwarm(swarmId);
+ }
+
+ function test_purge_thenAcceptReverts() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryL1.TagType.GENERIC);
+
+ vm.prank(providerOwner);
+ providerContract.burn(providerId);
+
+ vm.prank(caller);
+ swarmRegistry.purgeOrphanedSwarm(swarmId);
+
+ // After purge, swarm no longer exists
+ vm.prank(providerOwner);
+ vm.expectRevert(SwarmRegistryL1.SwarmNotFound.selector);
+ swarmRegistry.acceptSwarm(swarmId);
+ }
+}
diff --git a/test/SwarmRegistryUniversal.t.sol b/test/SwarmRegistryUniversal.t.sol
new file mode 100644
index 0000000..57321c6
--- /dev/null
+++ b/test/SwarmRegistryUniversal.t.sol
@@ -0,0 +1,1162 @@
+// SPDX-License-Identifier: MIT
+pragma solidity ^0.8.24;
+
+import "forge-std/Test.sol";
+import "../src/swarms/SwarmRegistryUniversal.sol";
+import "../src/swarms/FleetIdentity.sol";
+import "../src/swarms/ServiceProvider.sol";
+import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol";
+
+contract MockBondTokenUniv is ERC20 {
+ constructor() ERC20("Mock Bond", "MBOND") {}
+
+ function mint(address to, uint256 amount) external {
+ _mint(to, amount);
+ }
+}
+
+contract SwarmRegistryUniversalTest is Test {
+ SwarmRegistryUniversal swarmRegistry;
+ FleetIdentity fleetContract;
+ ServiceProvider providerContract;
+ MockBondTokenUniv bondToken;
+
+ address fleetOwner = address(0x1);
+ address providerOwner = address(0x2);
+ address caller = address(0x3);
+
+ uint256 constant FLEET_BOND = 100 ether;
+
+ // Region constants for fleet registration
+ uint16 constant US = 840;
+ uint16 constant ADMIN_CA = 6; // California
+
+ event SwarmRegistered(
+ uint256 indexed swarmId, uint256 indexed fleetId, uint256 indexed providerId, address owner, uint32 filterSize
+ );
+ event SwarmStatusChanged(uint256 indexed swarmId, SwarmRegistryUniversal.SwarmStatus status);
+ event SwarmFilterUpdated(uint256 indexed swarmId, address indexed owner, uint32 newFilterSize);
+ event SwarmProviderUpdated(uint256 indexed swarmId, uint256 indexed oldProviderId, uint256 indexed newProviderId);
+ event SwarmDeleted(uint256 indexed swarmId, uint256 indexed fleetId, address indexed owner);
+ event SwarmPurged(uint256 indexed swarmId, uint256 indexed fleetId, address indexed purgedBy);
+
+ function setUp() public {
+ bondToken = new MockBondTokenUniv();
+ fleetContract = new FleetIdentity(address(bondToken), FLEET_BOND);
+ providerContract = new ServiceProvider();
+ swarmRegistry = new SwarmRegistryUniversal(address(fleetContract), address(providerContract));
+
+ // Fund fleet owner and approve
+ bondToken.mint(fleetOwner, 1_000_000 ether);
+ vm.prank(fleetOwner);
+ bondToken.approve(address(fleetContract), type(uint256).max);
+ }
+
+ // ==============================
+ // Helpers
+ // ==============================
+
+ function _registerFleet(address owner, bytes memory seed) internal returns (uint256) {
+ vm.prank(owner);
+ return fleetContract.registerFleetLocal(bytes16(keccak256(seed)), US, ADMIN_CA, 0);
+ }
+
+ function _registerProvider(address owner, string memory url) internal returns (uint256) {
+ vm.prank(owner);
+ return providerContract.registerProvider(url);
+ }
+
+ function _registerSwarm(
+ address owner,
+ uint256 fleetId,
+ uint256 providerId,
+ bytes memory filter,
+ uint8 fpSize,
+ SwarmRegistryUniversal.TagType tagType
+ ) internal returns (uint256) {
+ vm.prank(owner);
+ return swarmRegistry.registerSwarm(fleetId, providerId, filter, fpSize, tagType);
+ }
+
+ function getExpectedValues(bytes memory tagId, uint256 m, uint8 fpSize)
+ public
+ pure
+ returns (uint32 h1, uint32 h2, uint32 h3, uint256 fp)
+ {
+ bytes32 h = keccak256(tagId);
+ h1 = uint32(uint256(h)) % uint32(m);
+ h2 = uint32(uint256(h) >> 32) % uint32(m);
+ h3 = uint32(uint256(h) >> 64) % uint32(m);
+ uint256 fpMask = (1 << fpSize) - 1;
+ fp = (uint256(h) >> 96) & fpMask;
+ }
+
+ function _write16Bit(bytes memory data, uint256 slotIndex, uint16 value) internal pure {
+ uint256 byteOffset = (slotIndex * 16) / 8;
+ data[byteOffset] = bytes1(uint8(value >> 8));
+ data[byteOffset + 1] = bytes1(uint8(value));
+ }
+
+ function _write8Bit(bytes memory data, uint256 slotIndex, uint8 value) internal pure {
+ data[slotIndex] = bytes1(value);
+ }
+
+ // ==============================
+ // Constructor
+ // ==============================
+
+ function test_constructor_setsImmutables() public view {
+ assertEq(address(swarmRegistry.FLEET_CONTRACT()), address(fleetContract));
+ assertEq(address(swarmRegistry.PROVIDER_CONTRACT()), address(providerContract));
+ }
+
+ function test_RevertIf_constructor_zeroFleetAddress() public {
+ vm.expectRevert(SwarmRegistryUniversal.InvalidSwarmData.selector);
+ new SwarmRegistryUniversal(address(0), address(providerContract));
+ }
+
+ function test_RevertIf_constructor_zeroProviderAddress() public {
+ vm.expectRevert(SwarmRegistryUniversal.InvalidSwarmData.selector);
+ new SwarmRegistryUniversal(address(fleetContract), address(0));
+ }
+
+ function test_RevertIf_constructor_bothZero() public {
+ vm.expectRevert(SwarmRegistryUniversal.InvalidSwarmData.selector);
+ new SwarmRegistryUniversal(address(0), address(0));
+ }
+
+ // ==============================
+ // registerSwarm — happy path
+ // ==============================
+
+ function test_registerSwarm_basicFlow() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "my-fleet");
+ uint256 providerId = _registerProvider(providerOwner, "https://api.example.com");
+
+ uint256 swarmId = _registerSwarm(
+ fleetOwner, fleetId, providerId, new bytes(100), 16, SwarmRegistryUniversal.TagType.IBEACON_INCLUDES_MAC
+ );
+
+ // Swarm ID is deterministic hash of (fleetId, providerId, filter)
+ uint256 expectedId = swarmRegistry.computeSwarmId(fleetId, providerId, new bytes(100));
+ assertEq(swarmId, expectedId);
+ }
+
+ function test_registerSwarm_storesMetadataCorrectly() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 12, SwarmRegistryUniversal.TagType.VENDOR_ID);
+
+ (
+ uint256 storedFleetId,
+ uint256 storedProviderId,
+ uint32 storedFilterLen,
+ uint8 storedFpSize,
+ SwarmRegistryUniversal.TagType storedTagType,
+ SwarmRegistryUniversal.SwarmStatus storedStatus
+ ) = swarmRegistry.swarms(swarmId);
+
+ assertEq(storedFleetId, fleetId);
+ assertEq(storedProviderId, providerId);
+ assertEq(storedFilterLen, 50);
+ assertEq(storedFpSize, 12);
+ assertEq(uint8(storedTagType), uint8(SwarmRegistryUniversal.TagType.VENDOR_ID));
+ assertEq(uint8(storedStatus), uint8(SwarmRegistryUniversal.SwarmStatus.REGISTERED));
+ }
+
+ function test_registerSwarm_storesFilterData() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ bytes memory filter = new bytes(100);
+ // Write some non-zero data
+ filter[0] = 0xAB;
+ filter[50] = 0xCD;
+ filter[99] = 0xEF;
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, filter, 16, SwarmRegistryUniversal.TagType.GENERIC);
+
+ bytes memory storedFilter = swarmRegistry.getFilterData(swarmId);
+ assertEq(storedFilter.length, 100);
+ assertEq(uint8(storedFilter[0]), 0xAB);
+ assertEq(uint8(storedFilter[50]), 0xCD);
+ assertEq(uint8(storedFilter[99]), 0xEF);
+ }
+
+ function test_registerSwarm_deterministicId() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ bytes memory filter = new bytes(32);
+
+ uint256 expectedId = swarmRegistry.computeSwarmId(fleetId, providerId, filter);
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, filter, 8, SwarmRegistryUniversal.TagType.GENERIC);
+ assertEq(swarmId, expectedId);
+ }
+
+ function test_RevertIf_registerSwarm_duplicateSwarm() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(32), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryUniversal.SwarmAlreadyExists.selector);
+ swarmRegistry.registerSwarm(fleetId, providerId, new bytes(32), 8, SwarmRegistryUniversal.TagType.GENERIC);
+ }
+
+ function test_registerSwarm_emitsSwarmRegistered() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ bytes memory filter = new bytes(50);
+ uint256 expectedId = swarmRegistry.computeSwarmId(fleetId, providerId, filter);
+
+ vm.expectEmit(true, true, true, true);
+ emit SwarmRegistered(expectedId, fleetId, providerId, fleetOwner, 50);
+
+ _registerSwarm(fleetOwner, fleetId, providerId, filter, 16, SwarmRegistryUniversal.TagType.GENERIC);
+ }
+
+ function test_registerSwarm_linksFleetSwarms() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId1 = _registerProvider(providerOwner, "url1");
+ uint256 providerId2 = _registerProvider(providerOwner, "url2");
+
+ uint256 s1 =
+ _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+ uint256 s2 =
+ _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 0), s1);
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 1), s2);
+ }
+
+ function test_registerSwarm_allTagTypes() public {
+ uint256 fleetId1 = _registerFleet(fleetOwner, "f1");
+ uint256 fleetId2 = _registerFleet(fleetOwner, "f2");
+ uint256 fleetId3 = _registerFleet(fleetOwner, "f3");
+ uint256 fleetId4 = _registerFleet(fleetOwner, "f4");
+ uint256 providerId = _registerProvider(providerOwner, "url");
+
+ uint256 s1 = _registerSwarm(
+ fleetOwner, fleetId1, providerId, new bytes(32), 8, SwarmRegistryUniversal.TagType.IBEACON_PAYLOAD_ONLY
+ );
+ uint256 s2 = _registerSwarm(
+ fleetOwner, fleetId2, providerId, new bytes(32), 8, SwarmRegistryUniversal.TagType.IBEACON_INCLUDES_MAC
+ );
+ uint256 s3 =
+ _registerSwarm(fleetOwner, fleetId3, providerId, new bytes(32), 8, SwarmRegistryUniversal.TagType.VENDOR_ID);
+ uint256 s4 =
+ _registerSwarm(fleetOwner, fleetId4, providerId, new bytes(32), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ (,,,, SwarmRegistryUniversal.TagType t1,) = swarmRegistry.swarms(s1);
+ (,,,, SwarmRegistryUniversal.TagType t2,) = swarmRegistry.swarms(s2);
+ (,,,, SwarmRegistryUniversal.TagType t3,) = swarmRegistry.swarms(s3);
+ (,,,, SwarmRegistryUniversal.TagType t4,) = swarmRegistry.swarms(s4);
+
+ assertEq(uint8(t1), uint8(SwarmRegistryUniversal.TagType.IBEACON_PAYLOAD_ONLY));
+ assertEq(uint8(t2), uint8(SwarmRegistryUniversal.TagType.IBEACON_INCLUDES_MAC));
+ assertEq(uint8(t3), uint8(SwarmRegistryUniversal.TagType.VENDOR_ID));
+ assertEq(uint8(t4), uint8(SwarmRegistryUniversal.TagType.GENERIC));
+ }
+
+ // ==============================
+ // registerSwarm — reverts
+ // ==============================
+
+ function test_RevertIf_registerSwarm_notFleetOwner() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "my-fleet");
+
+ vm.prank(caller);
+ vm.expectRevert(SwarmRegistryUniversal.NotFleetOwner.selector);
+ swarmRegistry.registerSwarm(fleetId, 1, new bytes(10), 16, SwarmRegistryUniversal.TagType.GENERIC);
+ }
+
+ function test_RevertIf_registerSwarm_fingerprintSizeZero() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryUniversal.InvalidFingerprintSize.selector);
+ swarmRegistry.registerSwarm(fleetId, providerId, new bytes(32), 0, SwarmRegistryUniversal.TagType.GENERIC);
+ }
+
+ function test_RevertIf_registerSwarm_fingerprintSizeExceedsMax() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryUniversal.InvalidFingerprintSize.selector);
+ swarmRegistry.registerSwarm(fleetId, providerId, new bytes(32), 17, SwarmRegistryUniversal.TagType.GENERIC);
+ }
+
+ function test_RevertIf_registerSwarm_emptyFilter() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryUniversal.InvalidFilterSize.selector);
+ swarmRegistry.registerSwarm(fleetId, providerId, new bytes(0), 8, SwarmRegistryUniversal.TagType.GENERIC);
+ }
+
+ function test_RevertIf_registerSwarm_filterTooLarge() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryUniversal.FilterTooLarge.selector);
+ swarmRegistry.registerSwarm(fleetId, providerId, new bytes(24577), 8, SwarmRegistryUniversal.TagType.GENERIC);
+ }
+
+ function test_registerSwarm_maxFingerprintSize() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(100), 16, SwarmRegistryUniversal.TagType.GENERIC);
+ assertTrue(swarmId != 0);
+ }
+
+ function test_registerSwarm_maxFilterSize() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ // Exactly MAX_FILTER_SIZE (24576) should succeed
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(24576), 8, SwarmRegistryUniversal.TagType.GENERIC);
+ assertTrue(swarmId != 0);
+ }
+
+ function test_registerSwarm_minFilterSize() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ // 1 byte filter
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(1), 8, SwarmRegistryUniversal.TagType.GENERIC);
+ assertTrue(swarmId != 0);
+ }
+
+ // ==============================
+ // acceptSwarm / rejectSwarm
+ // ==============================
+
+ function test_acceptSwarm_setsStatusAndEmits() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.expectEmit(true, true, true, true);
+ emit SwarmStatusChanged(swarmId, SwarmRegistryUniversal.SwarmStatus.ACCEPTED);
+
+ vm.prank(providerOwner);
+ swarmRegistry.acceptSwarm(swarmId);
+
+ (,,,,, SwarmRegistryUniversal.SwarmStatus status) = swarmRegistry.swarms(swarmId);
+ assertEq(uint8(status), uint8(SwarmRegistryUniversal.SwarmStatus.ACCEPTED));
+ }
+
+ function test_rejectSwarm_setsStatusAndEmits() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.expectEmit(true, true, true, true);
+ emit SwarmStatusChanged(swarmId, SwarmRegistryUniversal.SwarmStatus.REJECTED);
+
+ vm.prank(providerOwner);
+ swarmRegistry.rejectSwarm(swarmId);
+
+ (,,,,, SwarmRegistryUniversal.SwarmStatus status) = swarmRegistry.swarms(swarmId);
+ assertEq(uint8(status), uint8(SwarmRegistryUniversal.SwarmStatus.REJECTED));
+ }
+
+ function test_RevertIf_acceptSwarm_notProviderOwner() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(caller);
+ vm.expectRevert(SwarmRegistryUniversal.NotProviderOwner.selector);
+ swarmRegistry.acceptSwarm(swarmId);
+ }
+
+ function test_RevertIf_rejectSwarm_notProviderOwner() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(fleetOwner); // fleet owner != provider owner
+ vm.expectRevert(SwarmRegistryUniversal.NotProviderOwner.selector);
+ swarmRegistry.rejectSwarm(swarmId);
+ }
+
+ function test_RevertIf_acceptSwarm_fleetOwnerNotProvider() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryUniversal.NotProviderOwner.selector);
+ swarmRegistry.acceptSwarm(swarmId);
+ }
+
+ function test_acceptSwarm_afterReject() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(providerOwner);
+ swarmRegistry.rejectSwarm(swarmId);
+
+ vm.prank(providerOwner);
+ swarmRegistry.acceptSwarm(swarmId);
+
+ (,,,,, SwarmRegistryUniversal.SwarmStatus status) = swarmRegistry.swarms(swarmId);
+ assertEq(uint8(status), uint8(SwarmRegistryUniversal.SwarmStatus.ACCEPTED));
+ }
+
+ function test_rejectSwarm_afterAccept() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(providerOwner);
+ swarmRegistry.acceptSwarm(swarmId);
+
+ vm.prank(providerOwner);
+ swarmRegistry.rejectSwarm(swarmId);
+
+ (,,,,, SwarmRegistryUniversal.SwarmStatus status) = swarmRegistry.swarms(swarmId);
+ assertEq(uint8(status), uint8(SwarmRegistryUniversal.SwarmStatus.REJECTED));
+ }
+
+ // ==============================
+ // checkMembership — XOR logic
+ // ==============================
+
+ function test_checkMembership_XORLogic16Bit() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "u1");
+
+ bytes memory tagId = hex"1122334455";
+ uint8 fpSize = 16;
+ uint256 dataLen = 100;
+ uint256 m = (dataLen * 8) / fpSize; // 50 slots
+
+ (uint32 h1, uint32 h2, uint32 h3, uint256 expectedFp) = getExpectedValues(tagId, m, fpSize);
+
+ if (h1 == h2 || h1 == h3 || h2 == h3) {
+ return;
+ }
+
+ bytes memory filter = new bytes(dataLen);
+ _write16Bit(filter, h1, uint16(expectedFp));
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, filter, fpSize, SwarmRegistryUniversal.TagType.GENERIC);
+
+ bytes32 tagHash = keccak256(tagId);
+ assertTrue(swarmRegistry.checkMembership(swarmId, tagHash), "Tag should be member");
+
+ bytes32 fakeHash = keccak256("not-a-tag");
+ assertFalse(swarmRegistry.checkMembership(swarmId, fakeHash), "Fake tag should not be member");
+ }
+
+ function test_checkMembership_XORLogic8Bit() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "u1");
+
+ bytes memory tagId = hex"AABBCCDD";
+ uint8 fpSize = 8;
+ uint256 dataLen = 80;
+ uint256 m = (dataLen * 8) / fpSize; // 80 slots
+
+ (uint32 h1, uint32 h2, uint32 h3, uint256 expectedFp) = getExpectedValues(tagId, m, fpSize);
+
+ if (h1 == h2 || h1 == h3 || h2 == h3) {
+ return;
+ }
+
+ bytes memory filter = new bytes(dataLen);
+ _write8Bit(filter, h1, uint8(expectedFp));
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, filter, fpSize, SwarmRegistryUniversal.TagType.GENERIC);
+
+ assertTrue(swarmRegistry.checkMembership(swarmId, keccak256(tagId)), "8-bit valid tag should pass");
+ assertFalse(swarmRegistry.checkMembership(swarmId, keccak256(hex"FFFFFF")), "8-bit invalid tag should fail");
+ }
+
+ function test_RevertIf_checkMembership_swarmNotFound() public {
+ vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector);
+ swarmRegistry.checkMembership(999, keccak256("anything"));
+ }
+
+ function test_checkMembership_allZeroFilter_returnsConsistent() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "u1");
+
+ // All-zero filter: f1^f2^f3 = 0^0^0 = 0
+ bytes memory filter = new bytes(64);
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, filter, 16, SwarmRegistryUniversal.TagType.GENERIC);
+
+ // Should not revert regardless of result
+ swarmRegistry.checkMembership(swarmId, keccak256("test1"));
+ swarmRegistry.checkMembership(swarmId, keccak256("test2"));
+ }
+
+ // ==============================
+ // getFilterData
+ // ==============================
+
+ function test_getFilterData_returnsCorrectData() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ bytes memory filter = new bytes(100);
+ filter[0] = 0xFF;
+ filter[99] = 0x01;
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, filter, 16, SwarmRegistryUniversal.TagType.GENERIC);
+
+ bytes memory stored = swarmRegistry.getFilterData(swarmId);
+ assertEq(stored.length, 100);
+ assertEq(uint8(stored[0]), 0xFF);
+ assertEq(uint8(stored[99]), 0x01);
+ }
+
+ function test_RevertIf_getFilterData_swarmNotFound() public {
+ vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector);
+ swarmRegistry.getFilterData(999);
+ }
+
+ // ==============================
+ // Multiple swarms per fleet
+ // ==============================
+
+ function test_multipleSwarms_sameFleet() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId1 = _registerProvider(providerOwner, "url1");
+ uint256 providerId2 = _registerProvider(providerOwner, "url2");
+ uint256 providerId3 = _registerProvider(providerOwner, "url3");
+
+ uint256 s1 =
+ _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(32), 8, SwarmRegistryUniversal.TagType.GENERIC);
+ uint256 s2 = _registerSwarm(
+ fleetOwner, fleetId, providerId2, new bytes(64), 16, SwarmRegistryUniversal.TagType.VENDOR_ID
+ );
+ uint256 s3 = _registerSwarm(
+ fleetOwner, fleetId, providerId3, new bytes(50), 12, SwarmRegistryUniversal.TagType.IBEACON_PAYLOAD_ONLY
+ );
+
+ // IDs are distinct hashes
+ assertTrue(s1 != s2 && s2 != s3 && s1 != s3);
+
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 0), s1);
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 1), s2);
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 2), s3);
+ }
+
+ // ==============================
+ // Constants
+ // ==============================
+
+ function test_constants() public view {
+ assertEq(swarmRegistry.MAX_FINGERPRINT_SIZE(), 16);
+ assertEq(swarmRegistry.MAX_FILTER_SIZE(), 24576);
+ }
+
+ // ==============================
+ // Fuzz
+ // ==============================
+
+ function testFuzz_registerSwarm_validFingerprintSizes(uint8 fpSize) public {
+ fpSize = uint8(bound(fpSize, 1, 16));
+
+ uint256 fleetId = _registerFleet(fleetOwner, abi.encodePacked("fleet-", fpSize));
+ uint256 providerId = _registerProvider(providerOwner, string(abi.encodePacked("url-", fpSize)));
+
+ uint256 swarmId = _registerSwarm(
+ fleetOwner, fleetId, providerId, new bytes(64), fpSize, SwarmRegistryUniversal.TagType.GENERIC
+ );
+
+ (,,, uint8 storedFp,,) = swarmRegistry.swarms(swarmId);
+ assertEq(storedFp, fpSize);
+ }
+
+ function testFuzz_registerSwarm_invalidFingerprintSizes(uint8 fpSize) public {
+ vm.assume(fpSize == 0 || fpSize > 16);
+
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryUniversal.InvalidFingerprintSize.selector);
+ swarmRegistry.registerSwarm(fleetId, providerId, new bytes(32), fpSize, SwarmRegistryUniversal.TagType.GENERIC);
+ }
+
+ function testFuzz_registerSwarm_filterSizeRange(uint256 size) public {
+ size = bound(size, 1, 24576);
+
+ uint256 fleetId = _registerFleet(fleetOwner, abi.encodePacked("f-", size));
+ uint256 providerId = _registerProvider(providerOwner, string(abi.encodePacked("url-", size)));
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(size), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ (,, uint32 storedLen,,,) = swarmRegistry.swarms(swarmId);
+ assertEq(storedLen, uint32(size));
+ }
+
+ // ==============================
+ // updateSwarmFilter
+ // ==============================
+
+ function test_updateSwarmFilter_updatesFilterAndResetsStatus() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ // Provider accepts
+ vm.prank(providerOwner);
+ swarmRegistry.acceptSwarm(swarmId);
+
+ // Fleet owner updates filter
+ bytes memory newFilter = new bytes(100);
+ for (uint256 i = 0; i < 100; i++) {
+ newFilter[i] = bytes1(uint8(i % 256));
+ }
+
+ vm.expectEmit(true, true, true, true);
+ emit SwarmFilterUpdated(swarmId, fleetOwner, 100);
+
+ vm.prank(fleetOwner);
+ swarmRegistry.updateSwarmFilter(swarmId, newFilter);
+
+ // Status should be reset to REGISTERED
+ (,, uint32 filterLength,,, SwarmRegistryUniversal.SwarmStatus status) = swarmRegistry.swarms(swarmId);
+ assertEq(uint8(status), uint8(SwarmRegistryUniversal.SwarmStatus.REGISTERED));
+ assertEq(filterLength, 100);
+ }
+
+ function test_updateSwarmFilter_changesFilterLength() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ (,, uint32 oldLen,,,) = swarmRegistry.swarms(swarmId);
+ assertEq(oldLen, 50);
+
+ bytes memory newFilter = new bytes(100);
+ vm.prank(fleetOwner);
+ swarmRegistry.updateSwarmFilter(swarmId, newFilter);
+
+ (,, uint32 newLen,,,) = swarmRegistry.swarms(swarmId);
+ assertEq(newLen, 100);
+ }
+
+ function test_RevertIf_updateSwarmFilter_swarmNotFound() public {
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector);
+ swarmRegistry.updateSwarmFilter(999, new bytes(50));
+ }
+
+ function test_RevertIf_updateSwarmFilter_notFleetOwner() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(caller);
+ vm.expectRevert(SwarmRegistryUniversal.NotFleetOwner.selector);
+ swarmRegistry.updateSwarmFilter(swarmId, new bytes(100));
+ }
+
+ function test_RevertIf_updateSwarmFilter_emptyFilter() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryUniversal.InvalidFilterSize.selector);
+ swarmRegistry.updateSwarmFilter(swarmId, new bytes(0));
+ }
+
+ function test_RevertIf_updateSwarmFilter_filterTooLarge() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryUniversal.FilterTooLarge.selector);
+ swarmRegistry.updateSwarmFilter(swarmId, new bytes(24577));
+ }
+
+ // ==============================
+ // updateSwarmProvider
+ // ==============================
+
+ function test_updateSwarmProvider_updatesProviderAndResetsStatus() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId1 = _registerProvider(providerOwner, "url1");
+ uint256 providerId2 = _registerProvider(providerOwner, "url2");
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ // Provider accepts
+ vm.prank(providerOwner);
+ swarmRegistry.acceptSwarm(swarmId);
+
+ // Fleet owner updates provider
+ vm.expectEmit(true, true, true, true);
+ emit SwarmProviderUpdated(swarmId, providerId1, providerId2);
+
+ vm.prank(fleetOwner);
+ swarmRegistry.updateSwarmProvider(swarmId, providerId2);
+
+ // Check new provider and status reset
+ (, uint256 newProviderId,,,, SwarmRegistryUniversal.SwarmStatus status) = swarmRegistry.swarms(swarmId);
+ assertEq(newProviderId, providerId2);
+ assertEq(uint8(status), uint8(SwarmRegistryUniversal.SwarmStatus.REGISTERED));
+ }
+
+ function test_RevertIf_updateSwarmProvider_swarmNotFound() public {
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector);
+ swarmRegistry.updateSwarmProvider(999, providerId);
+ }
+
+ function test_RevertIf_updateSwarmProvider_notFleetOwner() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId1 = _registerProvider(providerOwner, "url1");
+ uint256 providerId2 = _registerProvider(providerOwner, "url2");
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(caller);
+ vm.expectRevert(SwarmRegistryUniversal.NotFleetOwner.selector);
+ swarmRegistry.updateSwarmProvider(swarmId, providerId2);
+ }
+
+ function test_RevertIf_updateSwarmProvider_providerDoesNotExist() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ // ERC721 reverts before our custom error is reached
+ vm.expectRevert();
+ swarmRegistry.updateSwarmProvider(swarmId, 99999);
+ }
+
+ // ==============================
+ // deleteSwarm
+ // ==============================
+
+ function test_deleteSwarm_removesSwarmAndEmits() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.expectEmit(true, true, true, true);
+ emit SwarmDeleted(swarmId, fleetId, fleetOwner);
+
+ vm.prank(fleetOwner);
+ swarmRegistry.deleteSwarm(swarmId);
+
+ // Swarm should be zeroed
+ (uint256 fleetIdAfter,, uint32 filterLength,,,) = swarmRegistry.swarms(swarmId);
+ assertEq(fleetIdAfter, 0);
+ assertEq(filterLength, 0);
+ }
+
+ function test_deleteSwarm_removesFromFleetSwarms() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId1 = _registerProvider(providerOwner, "url1");
+ uint256 providerId2 = _registerProvider(providerOwner, "url2");
+
+ uint256 swarm1 =
+ _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+ uint256 swarm2 =
+ _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ // Delete first swarm
+ vm.prank(fleetOwner);
+ swarmRegistry.deleteSwarm(swarm1);
+
+ // Only swarm2 should remain in fleetSwarms
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 0), swarm2);
+ vm.expectRevert();
+ swarmRegistry.fleetSwarms(fleetId, 1); // Should be out of bounds
+ }
+
+ function test_deleteSwarm_swapAndPop() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId1 = _registerProvider(providerOwner, "url1");
+ uint256 providerId2 = _registerProvider(providerOwner, "url2");
+ uint256 providerId3 = _registerProvider(providerOwner, "url3");
+
+ uint256 swarm1 =
+ _registerSwarm(fleetOwner, fleetId, providerId1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+ uint256 swarm2 =
+ _registerSwarm(fleetOwner, fleetId, providerId2, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+ uint256 swarm3 =
+ _registerSwarm(fleetOwner, fleetId, providerId3, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ // Delete middle swarm
+ vm.prank(fleetOwner);
+ swarmRegistry.deleteSwarm(swarm2);
+
+ // swarm3 should be swapped to index 1
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 0), swarm1);
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 1), swarm3);
+ vm.expectRevert();
+ swarmRegistry.fleetSwarms(fleetId, 2); // Should be out of bounds
+ }
+
+ function test_deleteSwarm_clearsFilterData() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ bytes memory filterData = new bytes(50);
+ for (uint256 i = 0; i < 50; i++) {
+ filterData[i] = bytes1(uint8(i));
+ }
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, filterData, 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ // Delete swarm
+ vm.prank(fleetOwner);
+ swarmRegistry.deleteSwarm(swarmId);
+
+ // filterLength should be cleared
+ (,, uint32 filterLength,,,) = swarmRegistry.swarms(swarmId);
+ assertEq(filterLength, 0);
+ }
+
+ function test_RevertIf_deleteSwarm_swarmNotFound() public {
+ vm.prank(fleetOwner);
+ vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector);
+ swarmRegistry.deleteSwarm(999);
+ }
+
+ function test_RevertIf_deleteSwarm_notFleetOwner() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(caller);
+ vm.expectRevert(SwarmRegistryUniversal.NotFleetOwner.selector);
+ swarmRegistry.deleteSwarm(swarmId);
+ }
+
+ function test_deleteSwarm_afterUpdate() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ // Update then delete
+ vm.prank(fleetOwner);
+ swarmRegistry.updateSwarmFilter(swarmId, new bytes(100));
+
+ vm.prank(fleetOwner);
+ swarmRegistry.deleteSwarm(swarmId);
+
+ (uint256 fleetIdAfter,,,,,) = swarmRegistry.swarms(swarmId);
+ assertEq(fleetIdAfter, 0);
+ }
+
+ function test_deleteSwarm_updatesSwarmIndexInFleet() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 p1 = _registerProvider(providerOwner, "url1");
+ uint256 p2 = _registerProvider(providerOwner, "url2");
+ uint256 p3 = _registerProvider(providerOwner, "url3");
+
+ uint256 s1 = _registerSwarm(fleetOwner, fleetId, p1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+ uint256 s2 = _registerSwarm(fleetOwner, fleetId, p2, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+ uint256 s3 = _registerSwarm(fleetOwner, fleetId, p3, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ // Verify initial indices
+ assertEq(swarmRegistry.swarmIndexInFleet(s1), 0);
+ assertEq(swarmRegistry.swarmIndexInFleet(s2), 1);
+ assertEq(swarmRegistry.swarmIndexInFleet(s3), 2);
+
+ // Delete s1 — s3 should be swapped to index 0
+ vm.prank(fleetOwner);
+ swarmRegistry.deleteSwarm(s1);
+
+ assertEq(swarmRegistry.swarmIndexInFleet(s3), 0);
+ assertEq(swarmRegistry.swarmIndexInFleet(s2), 1);
+ assertEq(swarmRegistry.swarmIndexInFleet(s1), 0); // deleted, reset to 0
+ }
+
+ // ==============================
+ // isSwarmValid
+ // ==============================
+
+ function test_isSwarmValid_bothValid() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId);
+ assertTrue(fleetValid);
+ assertTrue(providerValid);
+ }
+
+ function test_isSwarmValid_providerBurned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(providerOwner);
+ providerContract.burn(providerId);
+
+ (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId);
+ assertTrue(fleetValid);
+ assertFalse(providerValid);
+ }
+
+ function test_isSwarmValid_fleetBurned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ fleetContract.burn(fleetId);
+
+ (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId);
+ assertFalse(fleetValid);
+ assertTrue(providerValid);
+ }
+
+ function test_isSwarmValid_bothBurned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ fleetContract.burn(fleetId);
+ vm.prank(providerOwner);
+ providerContract.burn(providerId);
+
+ (bool fleetValid, bool providerValid) = swarmRegistry.isSwarmValid(swarmId);
+ assertFalse(fleetValid);
+ assertFalse(providerValid);
+ }
+
+ function test_RevertIf_isSwarmValid_swarmNotFound() public {
+ vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector);
+ swarmRegistry.isSwarmValid(999);
+ }
+
+ // ==============================
+ // purgeOrphanedSwarm
+ // ==============================
+
+ function test_purgeOrphanedSwarm_providerBurned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(providerOwner);
+ providerContract.burn(providerId);
+
+ vm.expectEmit(true, true, true, true);
+ emit SwarmPurged(swarmId, fleetId, caller);
+
+ vm.prank(caller);
+ swarmRegistry.purgeOrphanedSwarm(swarmId);
+
+ (,, uint32 filterLength,,,) = swarmRegistry.swarms(swarmId);
+ assertEq(filterLength, 0);
+ }
+
+ function test_purgeOrphanedSwarm_fleetBurned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ fleetContract.burn(fleetId);
+
+ vm.prank(caller);
+ swarmRegistry.purgeOrphanedSwarm(swarmId);
+
+ (uint256 fId,, uint32 filterLength,,,) = swarmRegistry.swarms(swarmId);
+ assertEq(fId, 0);
+ assertEq(filterLength, 0);
+ }
+
+ function test_purgeOrphanedSwarm_removesFromFleetSwarms() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 p1 = _registerProvider(providerOwner, "url1");
+ uint256 p2 = _registerProvider(providerOwner, "url2");
+
+ uint256 s1 = _registerSwarm(fleetOwner, fleetId, p1, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+ uint256 s2 = _registerSwarm(fleetOwner, fleetId, p2, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ // Burn provider of s1
+ vm.prank(providerOwner);
+ providerContract.burn(p1);
+
+ vm.prank(caller);
+ swarmRegistry.purgeOrphanedSwarm(s1);
+
+ // s2 should be swapped to index 0
+ assertEq(swarmRegistry.fleetSwarms(fleetId, 0), s2);
+ vm.expectRevert();
+ swarmRegistry.fleetSwarms(fleetId, 1);
+ }
+
+ function test_purgeOrphanedSwarm_clearsFilterData() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+
+ bytes memory filter = new bytes(50);
+ for (uint256 i = 0; i < 50; i++) {
+ filter[i] = bytes1(uint8(i));
+ }
+
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, filter, 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(providerOwner);
+ providerContract.burn(providerId);
+
+ vm.prank(caller);
+ swarmRegistry.purgeOrphanedSwarm(swarmId);
+
+ // filterLength should be cleared
+ (,, uint32 filterLength,,,) = swarmRegistry.swarms(swarmId);
+ assertEq(filterLength, 0);
+ }
+
+ function test_RevertIf_purgeOrphanedSwarm_swarmNotFound() public {
+ vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector);
+ swarmRegistry.purgeOrphanedSwarm(999);
+ }
+
+ function test_RevertIf_purgeOrphanedSwarm_swarmNotOrphaned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.expectRevert(SwarmRegistryUniversal.SwarmNotOrphaned.selector);
+ swarmRegistry.purgeOrphanedSwarm(swarmId);
+ }
+
+ // ==============================
+ // Orphan guards on accept/reject/checkMembership
+ // ==============================
+
+ function test_RevertIf_acceptSwarm_orphaned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(providerOwner);
+ providerContract.burn(providerId);
+
+ vm.prank(providerOwner);
+ vm.expectRevert(SwarmRegistryUniversal.SwarmOrphaned.selector);
+ swarmRegistry.acceptSwarm(swarmId);
+ }
+
+ function test_RevertIf_rejectSwarm_orphaned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ fleetContract.burn(fleetId);
+
+ vm.prank(providerOwner);
+ vm.expectRevert(SwarmRegistryUniversal.SwarmOrphaned.selector);
+ swarmRegistry.rejectSwarm(swarmId);
+ }
+
+ function test_RevertIf_checkMembership_orphaned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(providerOwner);
+ providerContract.burn(providerId);
+
+ vm.expectRevert(SwarmRegistryUniversal.SwarmOrphaned.selector);
+ swarmRegistry.checkMembership(swarmId, keccak256("test"));
+ }
+
+ function test_RevertIf_acceptSwarm_fleetBurned() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(fleetOwner);
+ fleetContract.burn(fleetId);
+
+ vm.prank(providerOwner);
+ vm.expectRevert(SwarmRegistryUniversal.SwarmOrphaned.selector);
+ swarmRegistry.acceptSwarm(swarmId);
+ }
+
+ function test_purge_thenAcceptReverts() public {
+ uint256 fleetId = _registerFleet(fleetOwner, "f1");
+ uint256 providerId = _registerProvider(providerOwner, "url1");
+ uint256 swarmId =
+ _registerSwarm(fleetOwner, fleetId, providerId, new bytes(50), 8, SwarmRegistryUniversal.TagType.GENERIC);
+
+ vm.prank(providerOwner);
+ providerContract.burn(providerId);
+
+ vm.prank(caller);
+ swarmRegistry.purgeOrphanedSwarm(swarmId);
+
+ // After purge, swarm no longer exists
+ vm.prank(providerOwner);
+ vm.expectRevert(SwarmRegistryUniversal.SwarmNotFound.selector);
+ swarmRegistry.acceptSwarm(swarmId);
+ }
+}
diff --git a/test/contentsign/BaseContentSign.t.sol b/test/contentsign/BaseContentSign.t.sol
index b52438b..ef5a538 100644
--- a/test/contentsign/BaseContentSign.t.sol
+++ b/test/contentsign/BaseContentSign.t.sol
@@ -2,7 +2,7 @@
pragma solidity ^0.8.20;
-import {Test, console} from "forge-std/Test.sol";
+import {Test} from "forge-std/Test.sol";
import {BaseContentSign} from "../../src/contentsign/BaseContentSign.sol";
contract MockContentSign is BaseContentSign {
diff --git a/test/contentsign/PaymentMiddleware.t.sol b/test/contentsign/PaymentMiddleware.t.sol
index 3d5c24b..5c7bb6e 100644
--- a/test/contentsign/PaymentMiddleware.t.sol
+++ b/test/contentsign/PaymentMiddleware.t.sol
@@ -2,14 +2,13 @@
pragma solidity ^0.8.20;
-import {Test, console} from "forge-std/Test.sol";
+import {Test} from "forge-std/Test.sol";
import {BaseContentSign} from "../../src/contentsign/BaseContentSign.sol";
import {ERC20} from "@openzeppelin/contracts/token/ERC20/ERC20.sol";
import {IERC20Errors} from "@openzeppelin/contracts/interfaces/draft-IERC6093.sol";
import {ERC721} from "@openzeppelin/contracts/token/ERC721/ERC721.sol";
import {Ownable} from "@openzeppelin/contracts/access/Ownable.sol";
import {PaymentMiddleware} from "../../src/contentsign/PaymentMiddleware.sol";
-import {SafeERC20} from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol";
contract MockToken is ERC20 {
constructor() ERC20("Mock Token", "MTK") {}
diff --git a/typescript b/typescript
new file mode 100644
index 0000000..00563f8
--- /dev/null
+++ b/typescript
@@ -0,0 +1,3 @@
+Script started on Fri Feb 13 12:41:51 2026
+[1m[7m%[27m[1m[0m
[0m[27m[24m[Jalex@Alexs-MacBook-Pro-2 rollup % [K[?2004h[?2004l
+[1m[7m%[27m[1m[0m
[0m[27m[24m[Jalex@Alexs-MacBook-Pro-2 rollup % [K[?2004h
\ No newline at end of file