feat: drafted out hyper log log tombstone
This commit is contained in:
parent
ffd33049d9
commit
463706e6a5
2 changed files with 1118 additions and 0 deletions
314
posts/drafts/hyper-logLog-tombstone-garbage-collection.md
Normal file
314
posts/drafts/hyper-logLog-tombstone-garbage-collection.md
Normal file
|
|
@ -0,0 +1,314 @@
|
|||
## Abstract
|
||||
|
||||
When synchronizing records in a distributed network there comes to be a problem when deleting records. When a deletion is initiated if individuals within a network were to only to delete their copy of the records it is highly likely that after deletion other nodes would resynchronize the original data reverting the changes. This can happen due to events not happing simultaneously between nodes or due to nodes being temperately disconnected from the network and then reconnecting with an outdated state. The traditional solution to this problem is to create a "tombstone" record that is kept around after the deletion to track that we have in the past had this file but it is has now been deleted so we should not recreate it.
|
||||
|
||||
While this approach works it has the issue of every node that exists in the network needing to indefinitely keep around and ever growing amount of tombstone records. Generally after an arbitrary large amount of time is can be assumed that is is safe to clear a tombstone as there should be no more remaining rouge nodes that still have the original data around.
|
||||
|
||||
The methodology in this paper resolves around using the hyper log log algorithm to get an estimate of how many nodes have received a record and to then compare that to the same estimate for how many tombstones have been created to prune the amount of tombstones that exist within any given network down to a much smaller amount making it possible to extend the time that we can keep at least one tombstone alive in the network while still reducing the storage overhead.
|
||||
|
||||
### Core Concept
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant A as Node A
|
||||
participant B as Node B
|
||||
participant C as Node C
|
||||
|
||||
Note over A,C: Record propagation phase
|
||||
A->>B: record + recordHLL
|
||||
B->>C: record + recordHLL
|
||||
|
||||
Note over A,C: Tombstone propagation phase
|
||||
A->>A: Create tombstone with frozenRecordHLL
|
||||
A->>B: tombstone + tombstoneHLL + frozenRecordHLL
|
||||
B->>C: tombstone + tombstoneHLL + frozenRecordHLL
|
||||
|
||||
Note over A,C: GC phase (after convergence)
|
||||
C->>C: tombstoneCount >= frozenRecordCount, become keeper
|
||||
B->>B: sees higher estimate, step down and GC
|
||||
```
|
||||
|
||||
## Data Model
|
||||
|
||||
Records and tombstones are separate entities:
|
||||
|
||||
```ts
|
||||
interface Record<Data> {
|
||||
readonly id: string;
|
||||
readonly data: Data;
|
||||
readonly recordHLL: HLL; // Tracks nodes with this record
|
||||
}
|
||||
|
||||
interface Tombstone {
|
||||
readonly id: string;
|
||||
readonly recordHLL: HLL; // Snapshot of record distribution (updated during propagation)
|
||||
readonly tombstoneHLL: HLL; // Tracks nodes with the tombstone
|
||||
readonly isKeeper: boolean; // This node continues propagating
|
||||
}
|
||||
```
|
||||
|
||||
A node stores records and tombstones in separate maps. Tombstones reference records by ID.
|
||||
|
||||
## Algorithm
|
||||
|
||||
### 1. Record Distribution
|
||||
|
||||
When a node creates or receives a record, it adds itself to the record's HLL:
|
||||
|
||||
```ts
|
||||
const receiveNewRecord = (incoming: Record, nodeId: string): Record => ({
|
||||
...incoming,
|
||||
recordHLL: hllAdd(hllClone(incoming.recordHLL), nodeId),
|
||||
});
|
||||
```
|
||||
|
||||
### 2. Tombstone Creation
|
||||
|
||||
When deleting a record, create a tombstone with the frozen recordHLL:
|
||||
|
||||
```ts
|
||||
const createTombstone = <Data>(record: Record<Data>, nodeId: string): Tombstone => ({
|
||||
id: record.id,
|
||||
recordHLL: hllClone(record.recordHLL), // Snapshot (updated during propagation)
|
||||
tombstoneHLL: hllAdd(createHLL(), nodeId),
|
||||
isKeeper: false,
|
||||
});
|
||||
```
|
||||
|
||||
### 3. Tombstone Propagation
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[Receive tombstone message] --> B{Have this record?}
|
||||
B -->|No| C[Ignore - don't accept tombstones for unknown records]
|
||||
B -->|Yes| D[Merge HLLs]
|
||||
D --> E{tombstoneCount >= frozenRecordCount?}
|
||||
E -->|No| F[Store updated record]
|
||||
E -->|Yes| G{Already a keeper?}
|
||||
G -->|No| H[Become keeper]
|
||||
G -->|Yes| I{Incoming estimate > my previous?}
|
||||
I -->|Yes| J[Step down and GC]
|
||||
I -->|No| K[Stay as keeper]
|
||||
```
|
||||
|
||||
### 4. Garbage Collection Logic
|
||||
|
||||
```ts
|
||||
const checkGCStatus = (
|
||||
tombstone: Tombstone,
|
||||
incomingTombstoneEstimate: number | null,
|
||||
myPreviousTombstoneEstimate: number,
|
||||
myNodeId: string,
|
||||
senderNodeId: string | null
|
||||
): { shouldGC: boolean; becomeKeeper: boolean; stepDownAsKeeper: boolean } => {
|
||||
const targetCount = hllEstimate(tombstone.recordHLL);
|
||||
const tombstoneCount = hllEstimate(tombstone.tombstoneHLL);
|
||||
|
||||
if (tombstone.isKeeper) {
|
||||
// Step down if incoming estimate is higher
|
||||
if (incomingTombstoneEstimate !== null &&
|
||||
incomingTombstoneEstimate >= targetCount) {
|
||||
if (myPreviousTombstoneEstimate < incomingTombstoneEstimate) {
|
||||
return { shouldGC: true, becomeKeeper: false, stepDownAsKeeper: true };
|
||||
}
|
||||
// Tie-breaker: higher node ID steps down when estimates are equal
|
||||
if (myPreviousTombstoneEstimate === incomingTombstoneEstimate &&
|
||||
senderNodeId !== null && myNodeId > senderNodeId) {
|
||||
return { shouldGC: true, becomeKeeper: false, stepDownAsKeeper: true };
|
||||
}
|
||||
}
|
||||
return { shouldGC: false, becomeKeeper: false, stepDownAsKeeper: false };
|
||||
}
|
||||
|
||||
// Become keeper when threshold reached
|
||||
if (tombstoneCount >= targetCount) {
|
||||
return { shouldGC: false, becomeKeeper: true, stepDownAsKeeper: false };
|
||||
}
|
||||
|
||||
return { shouldGC: false, becomeKeeper: false, stepDownAsKeeper: false };
|
||||
};
|
||||
```
|
||||
|
||||
### 5. Forward on Step-Down
|
||||
|
||||
When a keeper steps down, it immediately forwards the incoming tombstone to all connected peers. This creates a cascading effect that rapidly eliminates redundant keepers:
|
||||
|
||||
```ts
|
||||
const forwardTombstoneToAllPeers = (
|
||||
network: NetworkState,
|
||||
forwardingNodeId: string,
|
||||
tombstone: Tombstone,
|
||||
excludePeerId?: string
|
||||
): NetworkState => {
|
||||
const forwardingNode = network.nodes.get(forwardingNodeId);
|
||||
if (!forwardingNode) return network;
|
||||
|
||||
for (const peerId of forwardingNode.peerIds) {
|
||||
if (peerId === excludePeerId) continue;
|
||||
|
||||
const peer = network.nodes.get(peerId);
|
||||
if (!peer || !peer.records.has(tombstone.id)) continue;
|
||||
|
||||
const updatedPeer = receiveTombstone(peer, tombstone, forwardingNodeId);
|
||||
network.nodes.set(peerId, updatedPeer);
|
||||
|
||||
// If this peer also stepped down, recursively forward
|
||||
if (!updatedPeer.tombstones.has(tombstone.id) &&
|
||||
peer.tombstones.has(tombstone.id)) {
|
||||
forwardTombstoneToAllPeers(network, peerId, tombstone, forwardingNodeId);
|
||||
}
|
||||
}
|
||||
|
||||
return network;
|
||||
};
|
||||
```
|
||||
|
||||
## Design Decisions
|
||||
|
||||
### Why Freeze the Record HLL?
|
||||
|
||||
Without a frozen snapshot, each node compares against its own local recordHLL estimate. The problem:
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph Without Frozen HLL
|
||||
A1[Node A: recordHLL=2] -->|gossip| B1[Node B: recordHLL=2]
|
||||
B1 --> C1[Both think 2 nodes have record]
|
||||
C1 --> D1[tombstoneHLL reaches 2]
|
||||
D1 --> E1[GC triggers - but Node C still has record!]
|
||||
end
|
||||
```
|
||||
|
||||
The frozen HLL captures the record count at tombstone creation time and propagates with the tombstone. All nodes compare against the same target.
|
||||
|
||||
### Why Dynamic Keeper Election?
|
||||
|
||||
Fixed originator as keeper creates a single point of failure. If the originator goes offline, no one propagates the tombstone.
|
||||
|
||||
Dynamic election means any node can become a keeper when it detects `tombstoneCount >= frozenRecordCount`. Multiple keepers provide redundancy.
|
||||
|
||||
### Why Keeper Step-Down?
|
||||
|
||||
Without step-down, every node eventually becomes a keeper (since they all eventually see the threshold condition). This means no one ever GCs.
|
||||
|
||||
Step-down creates convergence:
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
subgraph Keeper Convergence Over Time
|
||||
T0[t=0: 0 keepers]
|
||||
T1[t=1: 5 keepers - first nodes to detect threshold]
|
||||
T2[t=2: 3 keepers - 2 stepped down after seeing higher estimates]
|
||||
T3[t=3: 1 keeper - most informed node remains]
|
||||
end
|
||||
T0 --> T1 --> T2 --> T3
|
||||
```
|
||||
|
||||
### Why Node ID Tie-Breaker?
|
||||
|
||||
When HLL estimates converge (all nodes have similar tombstoneHLL values), no one can have a strictly higher estimate. Without a tie-breaker, keepers with equal estimates would never step down.
|
||||
|
||||
The lexicographic node ID comparison ensures deterministic convergence: when two keepers with equal estimates communicate, the one with the higher node ID steps down.
|
||||
|
||||
### Why Forward on Step-Down?
|
||||
|
||||
Without forwarding, keepers only step down when randomly selected for gossip. With aggressive forwarding, a stepping-down keeper immediately propagates the "winning" tombstone to all neighbors, creating a cascade effect that rapidly eliminates redundant keepers.
|
||||
|
||||
## Complete Receive Handlers
|
||||
|
||||
```ts
|
||||
interface NodeState<Data> {
|
||||
readonly records: ReadonlyMap<string, Record<Data>>;
|
||||
readonly tombstones: ReadonlyMap<string, Tombstone>;
|
||||
}
|
||||
|
||||
const receiveRecord = <Data>(
|
||||
node: NodeState<Data>,
|
||||
incoming: Record<Data>
|
||||
): NodeState<Data> => {
|
||||
const existing = node.records.get(incoming.id);
|
||||
|
||||
const updatedRecord: Record<Data> = existing
|
||||
? { ...existing, recordHLL: hllMerge(existing.recordHLL, incoming.recordHLL) }
|
||||
: incoming;
|
||||
|
||||
const newRecords = new Map(node.records);
|
||||
newRecords.set(incoming.id, updatedRecord);
|
||||
return { ...node, records: newRecords };
|
||||
};
|
||||
|
||||
const receiveTombstone = <Data>(
|
||||
node: NodeState<Data>,
|
||||
incoming: Tombstone
|
||||
): NodeState<Data> => {
|
||||
// Don't accept tombstones for unknown records
|
||||
if (!node.records.has(incoming.id)) {
|
||||
return node;
|
||||
}
|
||||
|
||||
const existing = node.tombstones.get(incoming.id);
|
||||
const previousEstimate = existing ? hllEstimate(existing.tombstoneHLL) : 0;
|
||||
|
||||
let updatedTombstone: Tombstone = existing
|
||||
? {
|
||||
...existing,
|
||||
tombstoneHLL: hllMerge(existing.tombstoneHLL, incoming.tombstoneHLL),
|
||||
recordHLL: keepHigherEstimate(existing.recordHLL, incoming.recordHLL),
|
||||
}
|
||||
: incoming;
|
||||
|
||||
const gcStatus = checkGCStatus(
|
||||
updatedTombstone,
|
||||
hllEstimate(incoming.tombstoneHLL),
|
||||
previousEstimate
|
||||
);
|
||||
|
||||
if (gcStatus.stepDownAsKeeper) {
|
||||
// GC both record and tombstone
|
||||
return deleteRecordAndTombstone(node, incoming.id);
|
||||
}
|
||||
|
||||
if (gcStatus.becomeKeeper) {
|
||||
updatedTombstone = { ...updatedTombstone, isKeeper: true };
|
||||
}
|
||||
|
||||
const newTombstones = new Map(node.tombstones);
|
||||
newTombstones.set(incoming.id, updatedTombstone);
|
||||
return { ...node, tombstones: newTombstones };
|
||||
};
|
||||
```
|
||||
|
||||
## Trade-offs
|
||||
|
||||
| Aspect | Impact |
|
||||
|--------|--------|
|
||||
| **Memory** | ~1KB per tombstone (frozen HLL at precision 10) |
|
||||
| **Bandwidth** | HLLs transmitted with each gossip message |
|
||||
| **Latency** | GC delayed until keeper convergence |
|
||||
| **Consistency** | Eventual - temporary resurrection events possible |
|
||||
|
||||
## Properties
|
||||
|
||||
- **Safety**: 100% - tombstones never prematurely deleted
|
||||
- **Liveness**: Keepers step down, enabling eventual GC
|
||||
- **Fault tolerance**: No single point of failure
|
||||
- **Convergence**: Keeper count decreases over time
|
||||
|
||||
## Simulation Results
|
||||
|
||||
A working simulation is available at `simulations/hyperloglog-tombstone/simulation.ts`.
|
||||
|
||||
| Test | Nodes | Records Deleted | Tombstones Remaining |
|
||||
|------|-------|-----------------|----------------------|
|
||||
| Single Node Deletion (50 trials) | 750 | 11 rounds | 118 (~16%) |
|
||||
| Early Tombstone | 20 | 10 rounds | 2 (10%) |
|
||||
| Bridged Network (2 clusters) | 30 | 10 rounds | 3 (10%) |
|
||||
| Concurrent Tombstones (3 deleters) | 20 | 10 rounds | 3 (15%) |
|
||||
| Network Partition and Heal | 20 | 10 rounds | 2 (10%) |
|
||||
| Sparse Network (15% connectivity) | 500 | 13 rounds | 108 (~22%) |
|
||||
|
||||
Key findings from simulation:
|
||||
- Records are consistently deleted within 10-13 gossip rounds
|
||||
- Tombstones converge to 10-22% of nodes remaining as keepers after 100 additional rounds
|
||||
- Bridged and partitioned networks converge to ~1 keeper per cluster
|
||||
- Higher connectivity leads to faster keeper convergence
|
||||
804
simulations/hyperloglog-tombstone/simulation.ts
Normal file
804
simulations/hyperloglog-tombstone/simulation.ts
Normal file
|
|
@ -0,0 +1,804 @@
|
|||
type HLLRegisters = Uint8Array;
|
||||
|
||||
interface HLL {
|
||||
registers: HLLRegisters;
|
||||
m: number;
|
||||
alphaMM: number;
|
||||
}
|
||||
|
||||
const createHLL = (precision: number = 10): HLL => {
|
||||
const m = 1 << precision;
|
||||
const alphaMM = m === 16 ? 0.673 * m * m
|
||||
: m === 32 ? 0.697 * m * m
|
||||
: m === 64 ? 0.709 * m * m
|
||||
: (0.7213 / (1 + 1.079 / m)) * m * m;
|
||||
|
||||
return { registers: new Uint8Array(m), m, alphaMM };
|
||||
};
|
||||
|
||||
const hashString = (value: string): number => {
|
||||
let hash = 0;
|
||||
for (let i = 0; i < value.length; i++) {
|
||||
hash = ((hash << 5) - hash) + value.charCodeAt(i);
|
||||
hash = hash & hash;
|
||||
}
|
||||
hash ^= hash >>> 16;
|
||||
hash = Math.imul(hash, 0x85ebca6b);
|
||||
hash ^= hash >>> 13;
|
||||
hash = Math.imul(hash, 0xc2b2ae35);
|
||||
hash ^= hash >>> 16;
|
||||
return hash >>> 0;
|
||||
};
|
||||
|
||||
const rho = (value: number): number => {
|
||||
if (value === 0) return 32;
|
||||
let count = 1;
|
||||
while ((value & 0x80000000) === 0) {
|
||||
count++;
|
||||
value <<= 1;
|
||||
}
|
||||
return count;
|
||||
};
|
||||
|
||||
const hllAdd = (hll: HLL, value: string): HLL => {
|
||||
const hash = hashString(value);
|
||||
const index = hash >>> (32 - Math.log2(hll.m));
|
||||
const w = hash << Math.log2(hll.m);
|
||||
const rank = rho(w);
|
||||
const newRegisters = new Uint8Array(hll.registers);
|
||||
newRegisters[index] = Math.max(newRegisters[index], rank);
|
||||
return { ...hll, registers: newRegisters };
|
||||
};
|
||||
|
||||
const hllEstimate = (hll: HLL): number => {
|
||||
let sum = 0;
|
||||
let zeros = 0;
|
||||
for (let i = 0; i < hll.m; i++) {
|
||||
sum += Math.pow(2, -hll.registers[i]);
|
||||
if (hll.registers[i] === 0) zeros++;
|
||||
}
|
||||
let estimate = hll.alphaMM / sum;
|
||||
if (estimate <= 2.5 * hll.m && zeros > 0) {
|
||||
estimate = hll.m * Math.log(hll.m / zeros);
|
||||
}
|
||||
return Math.round(estimate);
|
||||
};
|
||||
|
||||
const hllMerge = (a: HLL, b: HLL): HLL => {
|
||||
const newRegisters = new Uint8Array(a.m);
|
||||
for (let i = 0; i < a.m; i++) {
|
||||
newRegisters[i] = Math.max(a.registers[i], b.registers[i]);
|
||||
}
|
||||
return { ...a, registers: newRegisters };
|
||||
};
|
||||
|
||||
const hllClone = (hll: HLL): HLL => ({
|
||||
...hll,
|
||||
registers: new Uint8Array(hll.registers),
|
||||
});
|
||||
|
||||
interface DataRecord<Data> {
|
||||
readonly id: string;
|
||||
readonly data: Data;
|
||||
readonly recordHLL: HLL;
|
||||
}
|
||||
|
||||
interface Tombstone {
|
||||
readonly id: string;
|
||||
readonly frozenRecordHLL: HLL;
|
||||
readonly tombstoneHLL: HLL;
|
||||
readonly isKeeper: boolean;
|
||||
}
|
||||
|
||||
interface NodeState<Data> {
|
||||
readonly id: string;
|
||||
readonly records: ReadonlyMap<string, DataRecord<Data>>;
|
||||
readonly tombstones: ReadonlyMap<string, Tombstone>;
|
||||
readonly peerIds: readonly string[];
|
||||
readonly stats: {
|
||||
readonly messagesReceived: number;
|
||||
readonly tombstonesGarbageCollected: number;
|
||||
readonly resurrections: number;
|
||||
};
|
||||
}
|
||||
|
||||
interface NetworkState<Data> {
|
||||
readonly nodes: ReadonlyMap<string, NodeState<Data>>;
|
||||
}
|
||||
|
||||
const createRecord = <Data>(id: string, data: Data, nodeId: string): DataRecord<Data> => ({
|
||||
id,
|
||||
data,
|
||||
recordHLL: hllAdd(createHLL(), nodeId),
|
||||
});
|
||||
|
||||
const createTombstone = <Data>(record: DataRecord<Data>, nodeId: string): Tombstone => ({
|
||||
id: record.id,
|
||||
frozenRecordHLL: hllClone(record.recordHLL),
|
||||
tombstoneHLL: hllAdd(createHLL(), nodeId),
|
||||
isKeeper: false,
|
||||
});
|
||||
|
||||
const createNode = <Data>(id: string): NodeState<Data> => ({
|
||||
id,
|
||||
records: new Map(),
|
||||
tombstones: new Map(),
|
||||
peerIds: [],
|
||||
stats: { messagesReceived: 0, tombstonesGarbageCollected: 0, resurrections: 0 },
|
||||
});
|
||||
|
||||
const addPeerToNode = <Data>(node: NodeState<Data>, peerId: string): NodeState<Data> => {
|
||||
if (node.peerIds.includes(peerId)) return node;
|
||||
return { ...node, peerIds: [...node.peerIds, peerId] };
|
||||
};
|
||||
|
||||
const checkGCStatus = (
|
||||
tombstone: Tombstone,
|
||||
incomingTombstoneEstimate: number | null,
|
||||
myTombstoneEstimateBeforeMerge: number,
|
||||
myNodeId: string,
|
||||
senderNodeId: string | null
|
||||
): { shouldGC: boolean; becomeKeeper: boolean; stepDownAsKeeper: boolean } => {
|
||||
const targetCount = hllEstimate(tombstone.frozenRecordHLL);
|
||||
const tombstoneCount = hllEstimate(tombstone.tombstoneHLL);
|
||||
|
||||
if (tombstone.isKeeper) {
|
||||
// Keeper step-down logic:
|
||||
// If incoming tombstone has reached the target count, compare estimates.
|
||||
// If incoming estimate >= my estimate before merge, step down.
|
||||
// Use node ID as tie-breaker: higher node ID steps down when estimates are equal.
|
||||
if (incomingTombstoneEstimate !== null && incomingTombstoneEstimate >= targetCount) {
|
||||
if (myTombstoneEstimateBeforeMerge < incomingTombstoneEstimate) {
|
||||
return { shouldGC: true, becomeKeeper: false, stepDownAsKeeper: true };
|
||||
}
|
||||
// Tie-breaker: if estimates are equal, the lexicographically higher node ID steps down
|
||||
if (myTombstoneEstimateBeforeMerge === incomingTombstoneEstimate &&
|
||||
senderNodeId !== null && myNodeId > senderNodeId) {
|
||||
return { shouldGC: true, becomeKeeper: false, stepDownAsKeeper: true };
|
||||
}
|
||||
}
|
||||
return { shouldGC: false, becomeKeeper: false, stepDownAsKeeper: false };
|
||||
}
|
||||
|
||||
// Become keeper when tombstone count reaches target (all record holders have acknowledged)
|
||||
if (tombstoneCount >= targetCount) {
|
||||
return { shouldGC: false, becomeKeeper: true, stepDownAsKeeper: false };
|
||||
}
|
||||
|
||||
return { shouldGC: false, becomeKeeper: false, stepDownAsKeeper: false };
|
||||
};
|
||||
|
||||
const receiveRecord = <Data>(
|
||||
node: NodeState<Data>,
|
||||
incoming: DataRecord<Data>
|
||||
): NodeState<Data> => {
|
||||
const newStats = { ...node.stats, messagesReceived: node.stats.messagesReceived + 1 };
|
||||
|
||||
if (node.tombstones.has(incoming.id)) {
|
||||
return { ...node, stats: { ...newStats, resurrections: newStats.resurrections + 1 } };
|
||||
}
|
||||
|
||||
const existing = node.records.get(incoming.id);
|
||||
const updatedRecord: DataRecord<Data> = existing
|
||||
? { ...existing, recordHLL: hllAdd(hllMerge(existing.recordHLL, incoming.recordHLL), node.id) }
|
||||
: { ...incoming, recordHLL: hllAdd(hllClone(incoming.recordHLL), node.id) };
|
||||
|
||||
const newRecords = new Map(node.records);
|
||||
newRecords.set(incoming.id, updatedRecord);
|
||||
return { ...node, records: newRecords, stats: newStats };
|
||||
};
|
||||
|
||||
const receiveTombstone = <Data>(
|
||||
node: NodeState<Data>,
|
||||
incoming: Tombstone,
|
||||
senderNodeId: string
|
||||
): NodeState<Data> => {
|
||||
let newStats = { ...node.stats, messagesReceived: node.stats.messagesReceived + 1 };
|
||||
|
||||
const record = node.records.get(incoming.id);
|
||||
if (!record) {
|
||||
return { ...node, stats: newStats };
|
||||
}
|
||||
|
||||
const existing = node.tombstones.get(incoming.id);
|
||||
|
||||
const mergedTombstoneHLL = existing
|
||||
? hllAdd(hllMerge(existing.tombstoneHLL, incoming.tombstoneHLL), node.id)
|
||||
: hllAdd(hllClone(incoming.tombstoneHLL), node.id);
|
||||
|
||||
let bestFrozenHLL = incoming.frozenRecordHLL;
|
||||
if (existing?.frozenRecordHLL) {
|
||||
bestFrozenHLL = hllEstimate(existing.frozenRecordHLL) > hllEstimate(bestFrozenHLL)
|
||||
? existing.frozenRecordHLL
|
||||
: bestFrozenHLL;
|
||||
}
|
||||
if (hllEstimate(record.recordHLL) > hllEstimate(bestFrozenHLL)) {
|
||||
bestFrozenHLL = hllClone(record.recordHLL);
|
||||
}
|
||||
|
||||
let updatedTombstone: Tombstone = {
|
||||
id: incoming.id,
|
||||
tombstoneHLL: mergedTombstoneHLL,
|
||||
frozenRecordHLL: bestFrozenHLL,
|
||||
isKeeper: existing?.isKeeper ?? false,
|
||||
};
|
||||
|
||||
const myEstimateBeforeMerge = existing ? hllEstimate(existing.tombstoneHLL) : 0;
|
||||
|
||||
const gcStatus = checkGCStatus(
|
||||
updatedTombstone,
|
||||
hllEstimate(incoming.tombstoneHLL),
|
||||
myEstimateBeforeMerge,
|
||||
node.id,
|
||||
senderNodeId
|
||||
);
|
||||
|
||||
// Always delete the record when we have a tombstone
|
||||
const newRecords = new Map(node.records);
|
||||
newRecords.delete(incoming.id);
|
||||
|
||||
if (gcStatus.stepDownAsKeeper) {
|
||||
// Step down: delete both record and tombstone
|
||||
const newTombstones = new Map(node.tombstones);
|
||||
newTombstones.delete(incoming.id);
|
||||
newStats = { ...newStats, tombstonesGarbageCollected: newStats.tombstonesGarbageCollected + 1 };
|
||||
return { ...node, records: newRecords, tombstones: newTombstones, stats: newStats };
|
||||
}
|
||||
|
||||
if (gcStatus.becomeKeeper) {
|
||||
updatedTombstone = { ...updatedTombstone, isKeeper: true };
|
||||
}
|
||||
|
||||
const newTombstones = new Map(node.tombstones);
|
||||
newTombstones.set(incoming.id, updatedTombstone);
|
||||
return { ...node, records: newRecords, tombstones: newTombstones, stats: newStats };
|
||||
};
|
||||
|
||||
const createNetwork = <Data>(nodeCount: number, connectivityRatio: number): NetworkState<Data> => {
|
||||
let nodes = new Map<string, NodeState<Data>>();
|
||||
|
||||
for (let i = 0; i < nodeCount; i++) {
|
||||
nodes.set(`node-${i}`, createNode<Data>(`node-${i}`));
|
||||
}
|
||||
|
||||
const nodeIds = Array.from(nodes.keys());
|
||||
for (let i = 0; i < nodeIds.length; i++) {
|
||||
for (let j = i + 1; j < nodeIds.length; j++) {
|
||||
if (Math.random() < connectivityRatio) {
|
||||
nodes = new Map(nodes)
|
||||
.set(nodeIds[i], addPeerToNode(nodes.get(nodeIds[i])!, nodeIds[j]))
|
||||
.set(nodeIds[j], addPeerToNode(nodes.get(nodeIds[j])!, nodeIds[i]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (let i = 0; i < nodeIds.length; i++) {
|
||||
const nextIdx = (i + 1) % nodeIds.length;
|
||||
nodes = new Map(nodes)
|
||||
.set(nodeIds[i], addPeerToNode(nodes.get(nodeIds[i])!, nodeIds[nextIdx]))
|
||||
.set(nodeIds[nextIdx], addPeerToNode(nodes.get(nodeIds[nextIdx])!, nodeIds[i]));
|
||||
}
|
||||
|
||||
return { nodes };
|
||||
};
|
||||
|
||||
const createBridgedNetwork = <Data>(
|
||||
clusterSize: number,
|
||||
intraClusterConnectivity: number
|
||||
): NetworkState<Data> => {
|
||||
let nodes = new Map<string, NodeState<Data>>();
|
||||
|
||||
for (let i = 0; i < clusterSize; i++) {
|
||||
nodes.set(`cluster-a-${i}`, createNode<Data>(`cluster-a-${i}`));
|
||||
nodes.set(`cluster-b-${i}`, createNode<Data>(`cluster-b-${i}`));
|
||||
}
|
||||
|
||||
const clusterA = Array.from(nodes.keys()).filter(id => id.startsWith('cluster-a'));
|
||||
const clusterB = Array.from(nodes.keys()).filter(id => id.startsWith('cluster-b'));
|
||||
|
||||
const connectCluster = (clusterIds: string[]) => {
|
||||
for (let i = 0; i < clusterIds.length; i++) {
|
||||
for (let j = i + 1; j < clusterIds.length; j++) {
|
||||
if (Math.random() < intraClusterConnectivity) {
|
||||
nodes = new Map(nodes)
|
||||
.set(clusterIds[i], addPeerToNode(nodes.get(clusterIds[i])!, clusterIds[j]))
|
||||
.set(clusterIds[j], addPeerToNode(nodes.get(clusterIds[j])!, clusterIds[i]));
|
||||
}
|
||||
}
|
||||
}
|
||||
for (let i = 0; i < clusterIds.length; i++) {
|
||||
const nextIdx = (i + 1) % clusterIds.length;
|
||||
nodes = new Map(nodes)
|
||||
.set(clusterIds[i], addPeerToNode(nodes.get(clusterIds[i])!, clusterIds[nextIdx]))
|
||||
.set(clusterIds[nextIdx], addPeerToNode(nodes.get(clusterIds[nextIdx])!, clusterIds[i]));
|
||||
}
|
||||
};
|
||||
|
||||
connectCluster(clusterA);
|
||||
connectCluster(clusterB);
|
||||
|
||||
const bridgeA = clusterA[0];
|
||||
const bridgeB = clusterB[0];
|
||||
nodes = new Map(nodes)
|
||||
.set(bridgeA, addPeerToNode(nodes.get(bridgeA)!, bridgeB))
|
||||
.set(bridgeB, addPeerToNode(nodes.get(bridgeB)!, bridgeA));
|
||||
|
||||
return { nodes };
|
||||
};
|
||||
|
||||
const forwardTombstoneToAllPeers = <Data>(
|
||||
network: NetworkState<Data>,
|
||||
forwardingNodeId: string,
|
||||
tombstone: Tombstone,
|
||||
excludePeerId?: string
|
||||
): NetworkState<Data> => {
|
||||
const forwardingNode = network.nodes.get(forwardingNodeId);
|
||||
if (!forwardingNode) return network;
|
||||
|
||||
let newNodes = new Map(network.nodes);
|
||||
|
||||
for (const peerId of forwardingNode.peerIds) {
|
||||
if (peerId === excludePeerId) continue;
|
||||
|
||||
const peer = newNodes.get(peerId);
|
||||
if (!peer || !peer.records.has(tombstone.id)) continue;
|
||||
|
||||
const updatedPeer = receiveTombstone(peer, tombstone, forwardingNodeId);
|
||||
newNodes.set(peerId, updatedPeer);
|
||||
|
||||
// If this peer also stepped down, recursively forward
|
||||
if (!updatedPeer.tombstones.has(tombstone.id) && peer.tombstones.has(tombstone.id)) {
|
||||
const result = forwardTombstoneToAllPeers({ nodes: newNodes }, peerId, tombstone, forwardingNodeId);
|
||||
newNodes = new Map(result.nodes);
|
||||
}
|
||||
}
|
||||
|
||||
return { nodes: newNodes };
|
||||
};
|
||||
|
||||
const gossipOnce = <Data>(network: NetworkState<Data>, senderNodeId: string, recordId: string): NetworkState<Data> => {
|
||||
const sender = network.nodes.get(senderNodeId);
|
||||
if (!sender || sender.peerIds.length === 0) return network;
|
||||
|
||||
const record = sender.records.get(recordId);
|
||||
const tombstone = sender.tombstones.get(recordId);
|
||||
if (!record && !tombstone) return network;
|
||||
|
||||
const peerId = sender.peerIds[Math.floor(Math.random() * sender.peerIds.length)];
|
||||
const peer = network.nodes.get(peerId);
|
||||
if (!peer) return network;
|
||||
|
||||
let newNodes = new Map(network.nodes);
|
||||
|
||||
if (record && !tombstone) {
|
||||
const updatedPeer = receiveRecord(peer, record);
|
||||
newNodes.set(peerId, updatedPeer);
|
||||
}
|
||||
|
||||
if (tombstone) {
|
||||
if (record && !peer.records.has(recordId)) {
|
||||
const peerWithRecord = receiveRecord(peer, record);
|
||||
newNodes.set(peerId, peerWithRecord);
|
||||
}
|
||||
const currentPeer = newNodes.get(peerId)!;
|
||||
const peerHadTombstone = currentPeer.tombstones.has(recordId);
|
||||
const updatedPeer = receiveTombstone(currentPeer, tombstone, senderNodeId);
|
||||
newNodes.set(peerId, updatedPeer);
|
||||
|
||||
// If peer stepped down (had tombstone before, doesn't have it now), forward the incoming tombstone
|
||||
if (peerHadTombstone && !updatedPeer.tombstones.has(recordId)) {
|
||||
const result = forwardTombstoneToAllPeers({ nodes: newNodes }, peerId, tombstone, senderNodeId);
|
||||
newNodes = new Map(result.nodes);
|
||||
}
|
||||
|
||||
if (updatedPeer.tombstones.has(recordId)) {
|
||||
const peerTombstone = updatedPeer.tombstones.get(recordId)!;
|
||||
const senderEstimateBeforeMerge = hllEstimate(tombstone.tombstoneHLL);
|
||||
|
||||
// Merge HLLs
|
||||
const mergedTombstoneHLL = hllMerge(tombstone.tombstoneHLL, peerTombstone.tombstoneHLL);
|
||||
const bestFrozenHLL = hllEstimate(peerTombstone.frozenRecordHLL) > hllEstimate(tombstone.frozenRecordHLL)
|
||||
? peerTombstone.frozenRecordHLL
|
||||
: tombstone.frozenRecordHLL;
|
||||
|
||||
let updatedSenderTombstone: Tombstone = {
|
||||
...tombstone,
|
||||
tombstoneHLL: mergedTombstoneHLL,
|
||||
frozenRecordHLL: bestFrozenHLL,
|
||||
};
|
||||
|
||||
// Check if sender should step down (peer has higher estimate or wins tie-breaker)
|
||||
const gcStatus = checkGCStatus(
|
||||
updatedSenderTombstone,
|
||||
hllEstimate(peerTombstone.tombstoneHLL),
|
||||
senderEstimateBeforeMerge,
|
||||
senderNodeId,
|
||||
peerId
|
||||
);
|
||||
|
||||
if (gcStatus.stepDownAsKeeper) {
|
||||
// Sender steps down - remove their tombstone
|
||||
const currentSender = newNodes.get(senderNodeId)!;
|
||||
const newSenderTombstones = new Map(currentSender.tombstones);
|
||||
newSenderTombstones.delete(recordId);
|
||||
const newSenderStats = { ...currentSender.stats, tombstonesGarbageCollected: currentSender.stats.tombstonesGarbageCollected + 1 };
|
||||
newNodes.set(senderNodeId, { ...currentSender, tombstones: newSenderTombstones, stats: newSenderStats });
|
||||
|
||||
// Forward the peer's tombstone to all sender's other peers
|
||||
const result = forwardTombstoneToAllPeers({ nodes: newNodes }, senderNodeId, peerTombstone, peerId);
|
||||
newNodes = new Map(result.nodes);
|
||||
} else {
|
||||
// Keep tombstone with merged data
|
||||
if (gcStatus.becomeKeeper) {
|
||||
updatedSenderTombstone = { ...updatedSenderTombstone, isKeeper: true };
|
||||
}
|
||||
const currentSender = newNodes.get(senderNodeId)!;
|
||||
const newSenderTombstones = new Map(currentSender.tombstones);
|
||||
newSenderTombstones.set(recordId, updatedSenderTombstone);
|
||||
newNodes.set(senderNodeId, { ...currentSender, tombstones: newSenderTombstones });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { nodes: newNodes };
|
||||
};
|
||||
|
||||
const gossipRounds = <Data>(network: NetworkState<Data>, recordId: string, rounds: number): NetworkState<Data> => {
|
||||
let state = network;
|
||||
for (let round = 0; round < rounds; round++) {
|
||||
for (const [nodeId, node] of state.nodes) {
|
||||
if (node.records.has(recordId) || node.tombstones.has(recordId)) {
|
||||
state = gossipOnce(state, nodeId, recordId);
|
||||
}
|
||||
}
|
||||
}
|
||||
return state;
|
||||
};
|
||||
|
||||
interface ClusterStats {
|
||||
name: string;
|
||||
nodeCount: number;
|
||||
recordCount: number;
|
||||
tombstoneCount: number;
|
||||
}
|
||||
|
||||
interface SimulationResult {
|
||||
testName: string;
|
||||
recordsDeleted: boolean;
|
||||
roundsToDeleteRecords: number;
|
||||
totalRounds: number;
|
||||
clusters: ClusterStats[];
|
||||
}
|
||||
|
||||
const getClusterStats = <Data>(
|
||||
network: NetworkState<Data>,
|
||||
recordId: string,
|
||||
clusterPrefix?: string
|
||||
): ClusterStats => {
|
||||
let recordCount = 0;
|
||||
let tombstoneCount = 0;
|
||||
let nodeCount = 0;
|
||||
|
||||
for (const [nodeId, node] of network.nodes) {
|
||||
if (clusterPrefix && !nodeId.startsWith(clusterPrefix)) continue;
|
||||
nodeCount++;
|
||||
if (node.records.has(recordId)) recordCount++;
|
||||
if (node.tombstones.has(recordId)) tombstoneCount++;
|
||||
}
|
||||
|
||||
return {
|
||||
name: clusterPrefix ?? 'all',
|
||||
nodeCount,
|
||||
recordCount,
|
||||
tombstoneCount,
|
||||
};
|
||||
};
|
||||
|
||||
const printSimulationResult = (result: SimulationResult): void => {
|
||||
console.log(`\n== ${result.testName} ==`);
|
||||
|
||||
if (result.recordsDeleted) {
|
||||
console.log(` Records deleted: YES (${result.roundsToDeleteRecords} rounds)`);
|
||||
} else {
|
||||
console.log(` Records deleted: NO`);
|
||||
}
|
||||
console.log(` Total rounds run: ${result.totalRounds}`);
|
||||
|
||||
console.log(` Final State:`);
|
||||
|
||||
for (const cluster of result.clusters) {
|
||||
const clusterLabel = cluster.name === 'all' ? 'Network' : `Cluster ${cluster.name}`;
|
||||
console.log(` ${clusterLabel} (${cluster.nodeCount} nodes):`);
|
||||
console.log(` Records: ${cluster.recordCount}`);
|
||||
console.log(` Tombstones: ${cluster.tombstoneCount}`);
|
||||
}
|
||||
};
|
||||
|
||||
interface ConvergenceResult<Data> {
|
||||
network: NetworkState<Data>;
|
||||
recordsDeleted: boolean;
|
||||
roundsToDeleteRecords: number;
|
||||
totalRounds: number;
|
||||
}
|
||||
|
||||
const runToConvergence = <Data>(
|
||||
network: NetworkState<Data>,
|
||||
recordId: string,
|
||||
maxRounds: number,
|
||||
extraRoundsAfterDeletion: number = 100
|
||||
): ConvergenceResult<Data> => {
|
||||
let rounds = 0;
|
||||
let state = network;
|
||||
let recordsDeleted = false;
|
||||
let roundsToDeleteRecords = 0;
|
||||
|
||||
// Phase 1: Run until records are deleted
|
||||
while (rounds < maxRounds && !recordsDeleted) {
|
||||
const stats = getClusterStats(state, recordId);
|
||||
if (stats.recordCount === 0) {
|
||||
recordsDeleted = true;
|
||||
roundsToDeleteRecords = rounds;
|
||||
}
|
||||
state = gossipRounds(state, recordId, 10);
|
||||
rounds += 10;
|
||||
}
|
||||
|
||||
// Phase 2: Continue running to let tombstones converge
|
||||
let extraRounds = 0;
|
||||
while (extraRounds < extraRoundsAfterDeletion) {
|
||||
state = gossipRounds(state, recordId, 10);
|
||||
extraRounds += 10;
|
||||
rounds += 10;
|
||||
}
|
||||
|
||||
return {
|
||||
network: state,
|
||||
recordsDeleted,
|
||||
roundsToDeleteRecords,
|
||||
totalRounds: rounds,
|
||||
};
|
||||
};
|
||||
|
||||
const addRecordToNetwork = <Data>(network: NetworkState<Data>, nodeId: string, recordId: string, data: Data): NetworkState<Data> => {
|
||||
const node = network.nodes.get(nodeId);
|
||||
if (!node) return network;
|
||||
|
||||
const newRecords = new Map(node.records);
|
||||
newRecords.set(recordId, createRecord(recordId, data, nodeId));
|
||||
const newNodes = new Map(network.nodes);
|
||||
newNodes.set(nodeId, { ...node, records: newRecords });
|
||||
return { nodes: newNodes };
|
||||
};
|
||||
|
||||
const addTombstoneToNetwork = <Data>(network: NetworkState<Data>, nodeId: string, recordId: string): NetworkState<Data> => {
|
||||
const node = network.nodes.get(nodeId);
|
||||
if (!node) return network;
|
||||
|
||||
const record = node.records.get(recordId);
|
||||
if (!record) return network;
|
||||
|
||||
const newTombstones = new Map(node.tombstones);
|
||||
newTombstones.set(recordId, createTombstone(record, nodeId));
|
||||
const newNodes = new Map(network.nodes);
|
||||
newNodes.set(nodeId, { ...node, tombstones: newTombstones });
|
||||
return { nodes: newNodes };
|
||||
};
|
||||
|
||||
const testSingleNodeDeletion = (): void => {
|
||||
const trials = 50;
|
||||
const maxRounds = 99999;
|
||||
let deletedCount = 0;
|
||||
let totalDeletionRounds = 0;
|
||||
let totalRounds = 0;
|
||||
let finalRecords = 0;
|
||||
let finalTombstones = 0;
|
||||
|
||||
for (let trial = 0; trial < trials; trial++) {
|
||||
let network = createNetwork<string>(15, 0.4);
|
||||
const recordId = `test-${trial}`;
|
||||
|
||||
network = addRecordToNetwork(network, "node-0", recordId, "Test Data");
|
||||
network = gossipRounds(network, recordId, 20);
|
||||
network = addTombstoneToNetwork(network, "node-0", recordId);
|
||||
|
||||
const result = runToConvergence(network, recordId, maxRounds);
|
||||
|
||||
if (result.recordsDeleted) {
|
||||
deletedCount++;
|
||||
totalDeletionRounds += result.roundsToDeleteRecords;
|
||||
}
|
||||
totalRounds += result.totalRounds;
|
||||
|
||||
const stats = getClusterStats(result.network, recordId);
|
||||
finalRecords += stats.recordCount;
|
||||
finalTombstones += stats.tombstoneCount;
|
||||
}
|
||||
|
||||
printSimulationResult({
|
||||
testName: `Single Node Deletion (${trials} trials)`,
|
||||
recordsDeleted: deletedCount === trials,
|
||||
roundsToDeleteRecords: deletedCount > 0 ? Math.round(totalDeletionRounds / deletedCount) : 0,
|
||||
totalRounds: Math.round(totalRounds / trials),
|
||||
clusters: [{
|
||||
name: 'all',
|
||||
nodeCount: 15 * trials,
|
||||
recordCount: finalRecords,
|
||||
tombstoneCount: finalTombstones,
|
||||
}],
|
||||
});
|
||||
};
|
||||
|
||||
const testEarlyTombstoneCreation = (): void => {
|
||||
const maxRounds = 99999;
|
||||
let network = createNetwork<string>(20, 0.4);
|
||||
const recordId = "early-tombstone";
|
||||
|
||||
// Only propagate record for 3 rounds before creating tombstone
|
||||
network = addRecordToNetwork(network, "node-0", recordId, "Test");
|
||||
network = gossipRounds(network, recordId, 3);
|
||||
network = addTombstoneToNetwork(network, "node-0", recordId);
|
||||
|
||||
const result = runToConvergence(network, recordId, maxRounds);
|
||||
|
||||
printSimulationResult({
|
||||
testName: "Early Tombstone (record partially propagated)",
|
||||
recordsDeleted: result.recordsDeleted,
|
||||
roundsToDeleteRecords: result.roundsToDeleteRecords,
|
||||
totalRounds: result.totalRounds,
|
||||
clusters: [getClusterStats(result.network, recordId)],
|
||||
});
|
||||
};
|
||||
|
||||
const testBridgedNetwork = (): void => {
|
||||
const maxRounds = 99999;
|
||||
const clusterSize = 15;
|
||||
let network = createBridgedNetwork<string>(clusterSize, 0.5);
|
||||
const recordId = "bridged-record";
|
||||
|
||||
network = addRecordToNetwork(network, "cluster-a-0", recordId, "Test Data");
|
||||
network = gossipRounds(network, recordId, 20);
|
||||
network = addTombstoneToNetwork(network, "cluster-a-0", recordId);
|
||||
|
||||
const result = runToConvergence(network, recordId, maxRounds);
|
||||
|
||||
printSimulationResult({
|
||||
testName: "Bridged Network (two clusters with single connection)",
|
||||
recordsDeleted: result.recordsDeleted,
|
||||
roundsToDeleteRecords: result.roundsToDeleteRecords,
|
||||
totalRounds: result.totalRounds,
|
||||
clusters: [
|
||||
getClusterStats(result.network, recordId, "cluster-a"),
|
||||
getClusterStats(result.network, recordId, "cluster-b"),
|
||||
],
|
||||
});
|
||||
};
|
||||
|
||||
const testConcurrentTombstones = (): void => {
|
||||
const maxRounds = 99999;
|
||||
let network = createNetwork<string>(20, 0.4);
|
||||
const recordId = "concurrent-delete";
|
||||
|
||||
network = addRecordToNetwork(network, "node-0", recordId, "Test Data");
|
||||
network = gossipRounds(network, recordId, 30);
|
||||
|
||||
network = addTombstoneToNetwork(network, "node-0", recordId);
|
||||
network = addTombstoneToNetwork(network, "node-5", recordId);
|
||||
network = addTombstoneToNetwork(network, "node-10", recordId);
|
||||
|
||||
const result = runToConvergence(network, recordId, maxRounds);
|
||||
|
||||
printSimulationResult({
|
||||
testName: "Concurrent Tombstones (3 nodes delete same record)",
|
||||
recordsDeleted: result.recordsDeleted,
|
||||
roundsToDeleteRecords: result.roundsToDeleteRecords,
|
||||
totalRounds: result.totalRounds,
|
||||
clusters: [getClusterStats(result.network, recordId)],
|
||||
});
|
||||
};
|
||||
|
||||
const testNetworkPartitionHeal = (): void => {
|
||||
const maxRounds = 99999;
|
||||
const clusterSize = 10;
|
||||
let network = createBridgedNetwork<string>(clusterSize, 0.5);
|
||||
const recordId = "partition-test";
|
||||
|
||||
network = addRecordToNetwork(network, "cluster-a-0", recordId, "Test Data");
|
||||
network = gossipRounds(network, recordId, 30);
|
||||
|
||||
// Partition the network
|
||||
const bridgeA = network.nodes.get("cluster-a-0")!;
|
||||
const bridgeB = network.nodes.get("cluster-b-0")!;
|
||||
const newBridgeAPeers = bridgeA.peerIds.filter(p => p !== "cluster-b-0");
|
||||
const newBridgeBPeers = bridgeB.peerIds.filter(p => p !== "cluster-a-0");
|
||||
|
||||
let partitionedNodes = new Map(network.nodes);
|
||||
partitionedNodes.set("cluster-a-0", { ...bridgeA, peerIds: newBridgeAPeers });
|
||||
partitionedNodes.set("cluster-b-0", { ...bridgeB, peerIds: newBridgeBPeers });
|
||||
network = { nodes: partitionedNodes };
|
||||
|
||||
network = addTombstoneToNetwork(network, "cluster-a-0", recordId);
|
||||
|
||||
// Run during partition
|
||||
const partitionResult = runToConvergence(network, recordId, 500);
|
||||
network = partitionResult.network;
|
||||
|
||||
// Heal the network
|
||||
const healedBridgeA = network.nodes.get("cluster-a-0")!;
|
||||
const healedBridgeB = network.nodes.get("cluster-b-0")!;
|
||||
let healedNodes = new Map(network.nodes);
|
||||
healedNodes.set("cluster-a-0", addPeerToNode(healedBridgeA, "cluster-b-0"));
|
||||
healedNodes.set("cluster-b-0", addPeerToNode(healedBridgeB, "cluster-a-0"));
|
||||
network = { nodes: healedNodes };
|
||||
|
||||
const result = runToConvergence(network, recordId, maxRounds);
|
||||
|
||||
printSimulationResult({
|
||||
testName: "Network Partition and Heal",
|
||||
recordsDeleted: result.recordsDeleted,
|
||||
roundsToDeleteRecords: partitionResult.roundsToDeleteRecords + result.roundsToDeleteRecords,
|
||||
totalRounds: partitionResult.totalRounds + result.totalRounds,
|
||||
clusters: [
|
||||
getClusterStats(result.network, recordId, "cluster-a"),
|
||||
getClusterStats(result.network, recordId, "cluster-b"),
|
||||
],
|
||||
});
|
||||
};
|
||||
|
||||
const testSparseNetwork = (): void => {
|
||||
const trials = 20;
|
||||
const maxRounds = 99999;
|
||||
let deletedCount = 0;
|
||||
let totalDeletionRounds = 0;
|
||||
let totalRounds = 0;
|
||||
let finalRecords = 0;
|
||||
let finalTombstones = 0;
|
||||
|
||||
for (let trial = 0; trial < trials; trial++) {
|
||||
let network = createNetwork<string>(25, 0.15);
|
||||
const recordId = `sparse-${trial}`;
|
||||
|
||||
network = addRecordToNetwork(network, "node-0", recordId, "Test");
|
||||
network = gossipRounds(network, recordId, 50);
|
||||
network = addTombstoneToNetwork(network, "node-0", recordId);
|
||||
|
||||
const result = runToConvergence(network, recordId, maxRounds);
|
||||
|
||||
if (result.recordsDeleted) {
|
||||
deletedCount++;
|
||||
totalDeletionRounds += result.roundsToDeleteRecords;
|
||||
}
|
||||
totalRounds += result.totalRounds;
|
||||
|
||||
const stats = getClusterStats(result.network, recordId);
|
||||
finalRecords += stats.recordCount;
|
||||
finalTombstones += stats.tombstoneCount;
|
||||
}
|
||||
|
||||
printSimulationResult({
|
||||
testName: `Sparse Network (${trials} trials, 15% connectivity)`,
|
||||
recordsDeleted: deletedCount === trials,
|
||||
roundsToDeleteRecords: deletedCount > 0 ? Math.round(totalDeletionRounds / deletedCount) : 0,
|
||||
totalRounds: Math.round(totalRounds / trials),
|
||||
clusters: [{
|
||||
name: 'all',
|
||||
nodeCount: 25 * trials,
|
||||
recordCount: finalRecords,
|
||||
tombstoneCount: finalTombstones,
|
||||
}],
|
||||
});
|
||||
};
|
||||
|
||||
const runAllTests = (): void => {
|
||||
console.log("=== HyperLogLog Tombstone Simulation ===");
|
||||
|
||||
testSingleNodeDeletion();
|
||||
testEarlyTombstoneCreation();
|
||||
testBridgedNetwork();
|
||||
testConcurrentTombstones();
|
||||
testNetworkPartitionHeal();
|
||||
testSparseNetwork();
|
||||
|
||||
console.log("\n=== Simulation Complete ===");
|
||||
};
|
||||
|
||||
runAllTests();
|
||||
Loading…
Add table
Add a link
Reference in a new issue