feat: drafted out hyper log log tombstone
This commit is contained in:
parent
ffd33049d9
commit
463706e6a5
2 changed files with 1118 additions and 0 deletions
804
simulations/hyperloglog-tombstone/simulation.ts
Normal file
804
simulations/hyperloglog-tombstone/simulation.ts
Normal file
|
|
@ -0,0 +1,804 @@
|
|||
type HLLRegisters = Uint8Array;
|
||||
|
||||
interface HLL {
|
||||
registers: HLLRegisters;
|
||||
m: number;
|
||||
alphaMM: number;
|
||||
}
|
||||
|
||||
const createHLL = (precision: number = 10): HLL => {
|
||||
const m = 1 << precision;
|
||||
const alphaMM = m === 16 ? 0.673 * m * m
|
||||
: m === 32 ? 0.697 * m * m
|
||||
: m === 64 ? 0.709 * m * m
|
||||
: (0.7213 / (1 + 1.079 / m)) * m * m;
|
||||
|
||||
return { registers: new Uint8Array(m), m, alphaMM };
|
||||
};
|
||||
|
||||
const hashString = (value: string): number => {
|
||||
let hash = 0;
|
||||
for (let i = 0; i < value.length; i++) {
|
||||
hash = ((hash << 5) - hash) + value.charCodeAt(i);
|
||||
hash = hash & hash;
|
||||
}
|
||||
hash ^= hash >>> 16;
|
||||
hash = Math.imul(hash, 0x85ebca6b);
|
||||
hash ^= hash >>> 13;
|
||||
hash = Math.imul(hash, 0xc2b2ae35);
|
||||
hash ^= hash >>> 16;
|
||||
return hash >>> 0;
|
||||
};
|
||||
|
||||
const rho = (value: number): number => {
|
||||
if (value === 0) return 32;
|
||||
let count = 1;
|
||||
while ((value & 0x80000000) === 0) {
|
||||
count++;
|
||||
value <<= 1;
|
||||
}
|
||||
return count;
|
||||
};
|
||||
|
||||
const hllAdd = (hll: HLL, value: string): HLL => {
|
||||
const hash = hashString(value);
|
||||
const index = hash >>> (32 - Math.log2(hll.m));
|
||||
const w = hash << Math.log2(hll.m);
|
||||
const rank = rho(w);
|
||||
const newRegisters = new Uint8Array(hll.registers);
|
||||
newRegisters[index] = Math.max(newRegisters[index], rank);
|
||||
return { ...hll, registers: newRegisters };
|
||||
};
|
||||
|
||||
const hllEstimate = (hll: HLL): number => {
|
||||
let sum = 0;
|
||||
let zeros = 0;
|
||||
for (let i = 0; i < hll.m; i++) {
|
||||
sum += Math.pow(2, -hll.registers[i]);
|
||||
if (hll.registers[i] === 0) zeros++;
|
||||
}
|
||||
let estimate = hll.alphaMM / sum;
|
||||
if (estimate <= 2.5 * hll.m && zeros > 0) {
|
||||
estimate = hll.m * Math.log(hll.m / zeros);
|
||||
}
|
||||
return Math.round(estimate);
|
||||
};
|
||||
|
||||
const hllMerge = (a: HLL, b: HLL): HLL => {
|
||||
const newRegisters = new Uint8Array(a.m);
|
||||
for (let i = 0; i < a.m; i++) {
|
||||
newRegisters[i] = Math.max(a.registers[i], b.registers[i]);
|
||||
}
|
||||
return { ...a, registers: newRegisters };
|
||||
};
|
||||
|
||||
const hllClone = (hll: HLL): HLL => ({
|
||||
...hll,
|
||||
registers: new Uint8Array(hll.registers),
|
||||
});
|
||||
|
||||
interface DataRecord<Data> {
|
||||
readonly id: string;
|
||||
readonly data: Data;
|
||||
readonly recordHLL: HLL;
|
||||
}
|
||||
|
||||
interface Tombstone {
|
||||
readonly id: string;
|
||||
readonly frozenRecordHLL: HLL;
|
||||
readonly tombstoneHLL: HLL;
|
||||
readonly isKeeper: boolean;
|
||||
}
|
||||
|
||||
interface NodeState<Data> {
|
||||
readonly id: string;
|
||||
readonly records: ReadonlyMap<string, DataRecord<Data>>;
|
||||
readonly tombstones: ReadonlyMap<string, Tombstone>;
|
||||
readonly peerIds: readonly string[];
|
||||
readonly stats: {
|
||||
readonly messagesReceived: number;
|
||||
readonly tombstonesGarbageCollected: number;
|
||||
readonly resurrections: number;
|
||||
};
|
||||
}
|
||||
|
||||
interface NetworkState<Data> {
|
||||
readonly nodes: ReadonlyMap<string, NodeState<Data>>;
|
||||
}
|
||||
|
||||
const createRecord = <Data>(id: string, data: Data, nodeId: string): DataRecord<Data> => ({
|
||||
id,
|
||||
data,
|
||||
recordHLL: hllAdd(createHLL(), nodeId),
|
||||
});
|
||||
|
||||
const createTombstone = <Data>(record: DataRecord<Data>, nodeId: string): Tombstone => ({
|
||||
id: record.id,
|
||||
frozenRecordHLL: hllClone(record.recordHLL),
|
||||
tombstoneHLL: hllAdd(createHLL(), nodeId),
|
||||
isKeeper: false,
|
||||
});
|
||||
|
||||
const createNode = <Data>(id: string): NodeState<Data> => ({
|
||||
id,
|
||||
records: new Map(),
|
||||
tombstones: new Map(),
|
||||
peerIds: [],
|
||||
stats: { messagesReceived: 0, tombstonesGarbageCollected: 0, resurrections: 0 },
|
||||
});
|
||||
|
||||
const addPeerToNode = <Data>(node: NodeState<Data>, peerId: string): NodeState<Data> => {
|
||||
if (node.peerIds.includes(peerId)) return node;
|
||||
return { ...node, peerIds: [...node.peerIds, peerId] };
|
||||
};
|
||||
|
||||
const checkGCStatus = (
|
||||
tombstone: Tombstone,
|
||||
incomingTombstoneEstimate: number | null,
|
||||
myTombstoneEstimateBeforeMerge: number,
|
||||
myNodeId: string,
|
||||
senderNodeId: string | null
|
||||
): { shouldGC: boolean; becomeKeeper: boolean; stepDownAsKeeper: boolean } => {
|
||||
const targetCount = hllEstimate(tombstone.frozenRecordHLL);
|
||||
const tombstoneCount = hllEstimate(tombstone.tombstoneHLL);
|
||||
|
||||
if (tombstone.isKeeper) {
|
||||
// Keeper step-down logic:
|
||||
// If incoming tombstone has reached the target count, compare estimates.
|
||||
// If incoming estimate >= my estimate before merge, step down.
|
||||
// Use node ID as tie-breaker: higher node ID steps down when estimates are equal.
|
||||
if (incomingTombstoneEstimate !== null && incomingTombstoneEstimate >= targetCount) {
|
||||
if (myTombstoneEstimateBeforeMerge < incomingTombstoneEstimate) {
|
||||
return { shouldGC: true, becomeKeeper: false, stepDownAsKeeper: true };
|
||||
}
|
||||
// Tie-breaker: if estimates are equal, the lexicographically higher node ID steps down
|
||||
if (myTombstoneEstimateBeforeMerge === incomingTombstoneEstimate &&
|
||||
senderNodeId !== null && myNodeId > senderNodeId) {
|
||||
return { shouldGC: true, becomeKeeper: false, stepDownAsKeeper: true };
|
||||
}
|
||||
}
|
||||
return { shouldGC: false, becomeKeeper: false, stepDownAsKeeper: false };
|
||||
}
|
||||
|
||||
// Become keeper when tombstone count reaches target (all record holders have acknowledged)
|
||||
if (tombstoneCount >= targetCount) {
|
||||
return { shouldGC: false, becomeKeeper: true, stepDownAsKeeper: false };
|
||||
}
|
||||
|
||||
return { shouldGC: false, becomeKeeper: false, stepDownAsKeeper: false };
|
||||
};
|
||||
|
||||
const receiveRecord = <Data>(
|
||||
node: NodeState<Data>,
|
||||
incoming: DataRecord<Data>
|
||||
): NodeState<Data> => {
|
||||
const newStats = { ...node.stats, messagesReceived: node.stats.messagesReceived + 1 };
|
||||
|
||||
if (node.tombstones.has(incoming.id)) {
|
||||
return { ...node, stats: { ...newStats, resurrections: newStats.resurrections + 1 } };
|
||||
}
|
||||
|
||||
const existing = node.records.get(incoming.id);
|
||||
const updatedRecord: DataRecord<Data> = existing
|
||||
? { ...existing, recordHLL: hllAdd(hllMerge(existing.recordHLL, incoming.recordHLL), node.id) }
|
||||
: { ...incoming, recordHLL: hllAdd(hllClone(incoming.recordHLL), node.id) };
|
||||
|
||||
const newRecords = new Map(node.records);
|
||||
newRecords.set(incoming.id, updatedRecord);
|
||||
return { ...node, records: newRecords, stats: newStats };
|
||||
};
|
||||
|
||||
const receiveTombstone = <Data>(
|
||||
node: NodeState<Data>,
|
||||
incoming: Tombstone,
|
||||
senderNodeId: string
|
||||
): NodeState<Data> => {
|
||||
let newStats = { ...node.stats, messagesReceived: node.stats.messagesReceived + 1 };
|
||||
|
||||
const record = node.records.get(incoming.id);
|
||||
if (!record) {
|
||||
return { ...node, stats: newStats };
|
||||
}
|
||||
|
||||
const existing = node.tombstones.get(incoming.id);
|
||||
|
||||
const mergedTombstoneHLL = existing
|
||||
? hllAdd(hllMerge(existing.tombstoneHLL, incoming.tombstoneHLL), node.id)
|
||||
: hllAdd(hllClone(incoming.tombstoneHLL), node.id);
|
||||
|
||||
let bestFrozenHLL = incoming.frozenRecordHLL;
|
||||
if (existing?.frozenRecordHLL) {
|
||||
bestFrozenHLL = hllEstimate(existing.frozenRecordHLL) > hllEstimate(bestFrozenHLL)
|
||||
? existing.frozenRecordHLL
|
||||
: bestFrozenHLL;
|
||||
}
|
||||
if (hllEstimate(record.recordHLL) > hllEstimate(bestFrozenHLL)) {
|
||||
bestFrozenHLL = hllClone(record.recordHLL);
|
||||
}
|
||||
|
||||
let updatedTombstone: Tombstone = {
|
||||
id: incoming.id,
|
||||
tombstoneHLL: mergedTombstoneHLL,
|
||||
frozenRecordHLL: bestFrozenHLL,
|
||||
isKeeper: existing?.isKeeper ?? false,
|
||||
};
|
||||
|
||||
const myEstimateBeforeMerge = existing ? hllEstimate(existing.tombstoneHLL) : 0;
|
||||
|
||||
const gcStatus = checkGCStatus(
|
||||
updatedTombstone,
|
||||
hllEstimate(incoming.tombstoneHLL),
|
||||
myEstimateBeforeMerge,
|
||||
node.id,
|
||||
senderNodeId
|
||||
);
|
||||
|
||||
// Always delete the record when we have a tombstone
|
||||
const newRecords = new Map(node.records);
|
||||
newRecords.delete(incoming.id);
|
||||
|
||||
if (gcStatus.stepDownAsKeeper) {
|
||||
// Step down: delete both record and tombstone
|
||||
const newTombstones = new Map(node.tombstones);
|
||||
newTombstones.delete(incoming.id);
|
||||
newStats = { ...newStats, tombstonesGarbageCollected: newStats.tombstonesGarbageCollected + 1 };
|
||||
return { ...node, records: newRecords, tombstones: newTombstones, stats: newStats };
|
||||
}
|
||||
|
||||
if (gcStatus.becomeKeeper) {
|
||||
updatedTombstone = { ...updatedTombstone, isKeeper: true };
|
||||
}
|
||||
|
||||
const newTombstones = new Map(node.tombstones);
|
||||
newTombstones.set(incoming.id, updatedTombstone);
|
||||
return { ...node, records: newRecords, tombstones: newTombstones, stats: newStats };
|
||||
};
|
||||
|
||||
const createNetwork = <Data>(nodeCount: number, connectivityRatio: number): NetworkState<Data> => {
|
||||
let nodes = new Map<string, NodeState<Data>>();
|
||||
|
||||
for (let i = 0; i < nodeCount; i++) {
|
||||
nodes.set(`node-${i}`, createNode<Data>(`node-${i}`));
|
||||
}
|
||||
|
||||
const nodeIds = Array.from(nodes.keys());
|
||||
for (let i = 0; i < nodeIds.length; i++) {
|
||||
for (let j = i + 1; j < nodeIds.length; j++) {
|
||||
if (Math.random() < connectivityRatio) {
|
||||
nodes = new Map(nodes)
|
||||
.set(nodeIds[i], addPeerToNode(nodes.get(nodeIds[i])!, nodeIds[j]))
|
||||
.set(nodeIds[j], addPeerToNode(nodes.get(nodeIds[j])!, nodeIds[i]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (let i = 0; i < nodeIds.length; i++) {
|
||||
const nextIdx = (i + 1) % nodeIds.length;
|
||||
nodes = new Map(nodes)
|
||||
.set(nodeIds[i], addPeerToNode(nodes.get(nodeIds[i])!, nodeIds[nextIdx]))
|
||||
.set(nodeIds[nextIdx], addPeerToNode(nodes.get(nodeIds[nextIdx])!, nodeIds[i]));
|
||||
}
|
||||
|
||||
return { nodes };
|
||||
};
|
||||
|
||||
const createBridgedNetwork = <Data>(
|
||||
clusterSize: number,
|
||||
intraClusterConnectivity: number
|
||||
): NetworkState<Data> => {
|
||||
let nodes = new Map<string, NodeState<Data>>();
|
||||
|
||||
for (let i = 0; i < clusterSize; i++) {
|
||||
nodes.set(`cluster-a-${i}`, createNode<Data>(`cluster-a-${i}`));
|
||||
nodes.set(`cluster-b-${i}`, createNode<Data>(`cluster-b-${i}`));
|
||||
}
|
||||
|
||||
const clusterA = Array.from(nodes.keys()).filter(id => id.startsWith('cluster-a'));
|
||||
const clusterB = Array.from(nodes.keys()).filter(id => id.startsWith('cluster-b'));
|
||||
|
||||
const connectCluster = (clusterIds: string[]) => {
|
||||
for (let i = 0; i < clusterIds.length; i++) {
|
||||
for (let j = i + 1; j < clusterIds.length; j++) {
|
||||
if (Math.random() < intraClusterConnectivity) {
|
||||
nodes = new Map(nodes)
|
||||
.set(clusterIds[i], addPeerToNode(nodes.get(clusterIds[i])!, clusterIds[j]))
|
||||
.set(clusterIds[j], addPeerToNode(nodes.get(clusterIds[j])!, clusterIds[i]));
|
||||
}
|
||||
}
|
||||
}
|
||||
for (let i = 0; i < clusterIds.length; i++) {
|
||||
const nextIdx = (i + 1) % clusterIds.length;
|
||||
nodes = new Map(nodes)
|
||||
.set(clusterIds[i], addPeerToNode(nodes.get(clusterIds[i])!, clusterIds[nextIdx]))
|
||||
.set(clusterIds[nextIdx], addPeerToNode(nodes.get(clusterIds[nextIdx])!, clusterIds[i]));
|
||||
}
|
||||
};
|
||||
|
||||
connectCluster(clusterA);
|
||||
connectCluster(clusterB);
|
||||
|
||||
const bridgeA = clusterA[0];
|
||||
const bridgeB = clusterB[0];
|
||||
nodes = new Map(nodes)
|
||||
.set(bridgeA, addPeerToNode(nodes.get(bridgeA)!, bridgeB))
|
||||
.set(bridgeB, addPeerToNode(nodes.get(bridgeB)!, bridgeA));
|
||||
|
||||
return { nodes };
|
||||
};
|
||||
|
||||
const forwardTombstoneToAllPeers = <Data>(
|
||||
network: NetworkState<Data>,
|
||||
forwardingNodeId: string,
|
||||
tombstone: Tombstone,
|
||||
excludePeerId?: string
|
||||
): NetworkState<Data> => {
|
||||
const forwardingNode = network.nodes.get(forwardingNodeId);
|
||||
if (!forwardingNode) return network;
|
||||
|
||||
let newNodes = new Map(network.nodes);
|
||||
|
||||
for (const peerId of forwardingNode.peerIds) {
|
||||
if (peerId === excludePeerId) continue;
|
||||
|
||||
const peer = newNodes.get(peerId);
|
||||
if (!peer || !peer.records.has(tombstone.id)) continue;
|
||||
|
||||
const updatedPeer = receiveTombstone(peer, tombstone, forwardingNodeId);
|
||||
newNodes.set(peerId, updatedPeer);
|
||||
|
||||
// If this peer also stepped down, recursively forward
|
||||
if (!updatedPeer.tombstones.has(tombstone.id) && peer.tombstones.has(tombstone.id)) {
|
||||
const result = forwardTombstoneToAllPeers({ nodes: newNodes }, peerId, tombstone, forwardingNodeId);
|
||||
newNodes = new Map(result.nodes);
|
||||
}
|
||||
}
|
||||
|
||||
return { nodes: newNodes };
|
||||
};
|
||||
|
||||
const gossipOnce = <Data>(network: NetworkState<Data>, senderNodeId: string, recordId: string): NetworkState<Data> => {
|
||||
const sender = network.nodes.get(senderNodeId);
|
||||
if (!sender || sender.peerIds.length === 0) return network;
|
||||
|
||||
const record = sender.records.get(recordId);
|
||||
const tombstone = sender.tombstones.get(recordId);
|
||||
if (!record && !tombstone) return network;
|
||||
|
||||
const peerId = sender.peerIds[Math.floor(Math.random() * sender.peerIds.length)];
|
||||
const peer = network.nodes.get(peerId);
|
||||
if (!peer) return network;
|
||||
|
||||
let newNodes = new Map(network.nodes);
|
||||
|
||||
if (record && !tombstone) {
|
||||
const updatedPeer = receiveRecord(peer, record);
|
||||
newNodes.set(peerId, updatedPeer);
|
||||
}
|
||||
|
||||
if (tombstone) {
|
||||
if (record && !peer.records.has(recordId)) {
|
||||
const peerWithRecord = receiveRecord(peer, record);
|
||||
newNodes.set(peerId, peerWithRecord);
|
||||
}
|
||||
const currentPeer = newNodes.get(peerId)!;
|
||||
const peerHadTombstone = currentPeer.tombstones.has(recordId);
|
||||
const updatedPeer = receiveTombstone(currentPeer, tombstone, senderNodeId);
|
||||
newNodes.set(peerId, updatedPeer);
|
||||
|
||||
// If peer stepped down (had tombstone before, doesn't have it now), forward the incoming tombstone
|
||||
if (peerHadTombstone && !updatedPeer.tombstones.has(recordId)) {
|
||||
const result = forwardTombstoneToAllPeers({ nodes: newNodes }, peerId, tombstone, senderNodeId);
|
||||
newNodes = new Map(result.nodes);
|
||||
}
|
||||
|
||||
if (updatedPeer.tombstones.has(recordId)) {
|
||||
const peerTombstone = updatedPeer.tombstones.get(recordId)!;
|
||||
const senderEstimateBeforeMerge = hllEstimate(tombstone.tombstoneHLL);
|
||||
|
||||
// Merge HLLs
|
||||
const mergedTombstoneHLL = hllMerge(tombstone.tombstoneHLL, peerTombstone.tombstoneHLL);
|
||||
const bestFrozenHLL = hllEstimate(peerTombstone.frozenRecordHLL) > hllEstimate(tombstone.frozenRecordHLL)
|
||||
? peerTombstone.frozenRecordHLL
|
||||
: tombstone.frozenRecordHLL;
|
||||
|
||||
let updatedSenderTombstone: Tombstone = {
|
||||
...tombstone,
|
||||
tombstoneHLL: mergedTombstoneHLL,
|
||||
frozenRecordHLL: bestFrozenHLL,
|
||||
};
|
||||
|
||||
// Check if sender should step down (peer has higher estimate or wins tie-breaker)
|
||||
const gcStatus = checkGCStatus(
|
||||
updatedSenderTombstone,
|
||||
hllEstimate(peerTombstone.tombstoneHLL),
|
||||
senderEstimateBeforeMerge,
|
||||
senderNodeId,
|
||||
peerId
|
||||
);
|
||||
|
||||
if (gcStatus.stepDownAsKeeper) {
|
||||
// Sender steps down - remove their tombstone
|
||||
const currentSender = newNodes.get(senderNodeId)!;
|
||||
const newSenderTombstones = new Map(currentSender.tombstones);
|
||||
newSenderTombstones.delete(recordId);
|
||||
const newSenderStats = { ...currentSender.stats, tombstonesGarbageCollected: currentSender.stats.tombstonesGarbageCollected + 1 };
|
||||
newNodes.set(senderNodeId, { ...currentSender, tombstones: newSenderTombstones, stats: newSenderStats });
|
||||
|
||||
// Forward the peer's tombstone to all sender's other peers
|
||||
const result = forwardTombstoneToAllPeers({ nodes: newNodes }, senderNodeId, peerTombstone, peerId);
|
||||
newNodes = new Map(result.nodes);
|
||||
} else {
|
||||
// Keep tombstone with merged data
|
||||
if (gcStatus.becomeKeeper) {
|
||||
updatedSenderTombstone = { ...updatedSenderTombstone, isKeeper: true };
|
||||
}
|
||||
const currentSender = newNodes.get(senderNodeId)!;
|
||||
const newSenderTombstones = new Map(currentSender.tombstones);
|
||||
newSenderTombstones.set(recordId, updatedSenderTombstone);
|
||||
newNodes.set(senderNodeId, { ...currentSender, tombstones: newSenderTombstones });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { nodes: newNodes };
|
||||
};
|
||||
|
||||
const gossipRounds = <Data>(network: NetworkState<Data>, recordId: string, rounds: number): NetworkState<Data> => {
|
||||
let state = network;
|
||||
for (let round = 0; round < rounds; round++) {
|
||||
for (const [nodeId, node] of state.nodes) {
|
||||
if (node.records.has(recordId) || node.tombstones.has(recordId)) {
|
||||
state = gossipOnce(state, nodeId, recordId);
|
||||
}
|
||||
}
|
||||
}
|
||||
return state;
|
||||
};
|
||||
|
||||
interface ClusterStats {
|
||||
name: string;
|
||||
nodeCount: number;
|
||||
recordCount: number;
|
||||
tombstoneCount: number;
|
||||
}
|
||||
|
||||
interface SimulationResult {
|
||||
testName: string;
|
||||
recordsDeleted: boolean;
|
||||
roundsToDeleteRecords: number;
|
||||
totalRounds: number;
|
||||
clusters: ClusterStats[];
|
||||
}
|
||||
|
||||
const getClusterStats = <Data>(
|
||||
network: NetworkState<Data>,
|
||||
recordId: string,
|
||||
clusterPrefix?: string
|
||||
): ClusterStats => {
|
||||
let recordCount = 0;
|
||||
let tombstoneCount = 0;
|
||||
let nodeCount = 0;
|
||||
|
||||
for (const [nodeId, node] of network.nodes) {
|
||||
if (clusterPrefix && !nodeId.startsWith(clusterPrefix)) continue;
|
||||
nodeCount++;
|
||||
if (node.records.has(recordId)) recordCount++;
|
||||
if (node.tombstones.has(recordId)) tombstoneCount++;
|
||||
}
|
||||
|
||||
return {
|
||||
name: clusterPrefix ?? 'all',
|
||||
nodeCount,
|
||||
recordCount,
|
||||
tombstoneCount,
|
||||
};
|
||||
};
|
||||
|
||||
const printSimulationResult = (result: SimulationResult): void => {
|
||||
console.log(`\n== ${result.testName} ==`);
|
||||
|
||||
if (result.recordsDeleted) {
|
||||
console.log(` Records deleted: YES (${result.roundsToDeleteRecords} rounds)`);
|
||||
} else {
|
||||
console.log(` Records deleted: NO`);
|
||||
}
|
||||
console.log(` Total rounds run: ${result.totalRounds}`);
|
||||
|
||||
console.log(` Final State:`);
|
||||
|
||||
for (const cluster of result.clusters) {
|
||||
const clusterLabel = cluster.name === 'all' ? 'Network' : `Cluster ${cluster.name}`;
|
||||
console.log(` ${clusterLabel} (${cluster.nodeCount} nodes):`);
|
||||
console.log(` Records: ${cluster.recordCount}`);
|
||||
console.log(` Tombstones: ${cluster.tombstoneCount}`);
|
||||
}
|
||||
};
|
||||
|
||||
interface ConvergenceResult<Data> {
|
||||
network: NetworkState<Data>;
|
||||
recordsDeleted: boolean;
|
||||
roundsToDeleteRecords: number;
|
||||
totalRounds: number;
|
||||
}
|
||||
|
||||
const runToConvergence = <Data>(
|
||||
network: NetworkState<Data>,
|
||||
recordId: string,
|
||||
maxRounds: number,
|
||||
extraRoundsAfterDeletion: number = 100
|
||||
): ConvergenceResult<Data> => {
|
||||
let rounds = 0;
|
||||
let state = network;
|
||||
let recordsDeleted = false;
|
||||
let roundsToDeleteRecords = 0;
|
||||
|
||||
// Phase 1: Run until records are deleted
|
||||
while (rounds < maxRounds && !recordsDeleted) {
|
||||
const stats = getClusterStats(state, recordId);
|
||||
if (stats.recordCount === 0) {
|
||||
recordsDeleted = true;
|
||||
roundsToDeleteRecords = rounds;
|
||||
}
|
||||
state = gossipRounds(state, recordId, 10);
|
||||
rounds += 10;
|
||||
}
|
||||
|
||||
// Phase 2: Continue running to let tombstones converge
|
||||
let extraRounds = 0;
|
||||
while (extraRounds < extraRoundsAfterDeletion) {
|
||||
state = gossipRounds(state, recordId, 10);
|
||||
extraRounds += 10;
|
||||
rounds += 10;
|
||||
}
|
||||
|
||||
return {
|
||||
network: state,
|
||||
recordsDeleted,
|
||||
roundsToDeleteRecords,
|
||||
totalRounds: rounds,
|
||||
};
|
||||
};
|
||||
|
||||
const addRecordToNetwork = <Data>(network: NetworkState<Data>, nodeId: string, recordId: string, data: Data): NetworkState<Data> => {
|
||||
const node = network.nodes.get(nodeId);
|
||||
if (!node) return network;
|
||||
|
||||
const newRecords = new Map(node.records);
|
||||
newRecords.set(recordId, createRecord(recordId, data, nodeId));
|
||||
const newNodes = new Map(network.nodes);
|
||||
newNodes.set(nodeId, { ...node, records: newRecords });
|
||||
return { nodes: newNodes };
|
||||
};
|
||||
|
||||
const addTombstoneToNetwork = <Data>(network: NetworkState<Data>, nodeId: string, recordId: string): NetworkState<Data> => {
|
||||
const node = network.nodes.get(nodeId);
|
||||
if (!node) return network;
|
||||
|
||||
const record = node.records.get(recordId);
|
||||
if (!record) return network;
|
||||
|
||||
const newTombstones = new Map(node.tombstones);
|
||||
newTombstones.set(recordId, createTombstone(record, nodeId));
|
||||
const newNodes = new Map(network.nodes);
|
||||
newNodes.set(nodeId, { ...node, tombstones: newTombstones });
|
||||
return { nodes: newNodes };
|
||||
};
|
||||
|
||||
const testSingleNodeDeletion = (): void => {
|
||||
const trials = 50;
|
||||
const maxRounds = 99999;
|
||||
let deletedCount = 0;
|
||||
let totalDeletionRounds = 0;
|
||||
let totalRounds = 0;
|
||||
let finalRecords = 0;
|
||||
let finalTombstones = 0;
|
||||
|
||||
for (let trial = 0; trial < trials; trial++) {
|
||||
let network = createNetwork<string>(15, 0.4);
|
||||
const recordId = `test-${trial}`;
|
||||
|
||||
network = addRecordToNetwork(network, "node-0", recordId, "Test Data");
|
||||
network = gossipRounds(network, recordId, 20);
|
||||
network = addTombstoneToNetwork(network, "node-0", recordId);
|
||||
|
||||
const result = runToConvergence(network, recordId, maxRounds);
|
||||
|
||||
if (result.recordsDeleted) {
|
||||
deletedCount++;
|
||||
totalDeletionRounds += result.roundsToDeleteRecords;
|
||||
}
|
||||
totalRounds += result.totalRounds;
|
||||
|
||||
const stats = getClusterStats(result.network, recordId);
|
||||
finalRecords += stats.recordCount;
|
||||
finalTombstones += stats.tombstoneCount;
|
||||
}
|
||||
|
||||
printSimulationResult({
|
||||
testName: `Single Node Deletion (${trials} trials)`,
|
||||
recordsDeleted: deletedCount === trials,
|
||||
roundsToDeleteRecords: deletedCount > 0 ? Math.round(totalDeletionRounds / deletedCount) : 0,
|
||||
totalRounds: Math.round(totalRounds / trials),
|
||||
clusters: [{
|
||||
name: 'all',
|
||||
nodeCount: 15 * trials,
|
||||
recordCount: finalRecords,
|
||||
tombstoneCount: finalTombstones,
|
||||
}],
|
||||
});
|
||||
};
|
||||
|
||||
const testEarlyTombstoneCreation = (): void => {
|
||||
const maxRounds = 99999;
|
||||
let network = createNetwork<string>(20, 0.4);
|
||||
const recordId = "early-tombstone";
|
||||
|
||||
// Only propagate record for 3 rounds before creating tombstone
|
||||
network = addRecordToNetwork(network, "node-0", recordId, "Test");
|
||||
network = gossipRounds(network, recordId, 3);
|
||||
network = addTombstoneToNetwork(network, "node-0", recordId);
|
||||
|
||||
const result = runToConvergence(network, recordId, maxRounds);
|
||||
|
||||
printSimulationResult({
|
||||
testName: "Early Tombstone (record partially propagated)",
|
||||
recordsDeleted: result.recordsDeleted,
|
||||
roundsToDeleteRecords: result.roundsToDeleteRecords,
|
||||
totalRounds: result.totalRounds,
|
||||
clusters: [getClusterStats(result.network, recordId)],
|
||||
});
|
||||
};
|
||||
|
||||
const testBridgedNetwork = (): void => {
|
||||
const maxRounds = 99999;
|
||||
const clusterSize = 15;
|
||||
let network = createBridgedNetwork<string>(clusterSize, 0.5);
|
||||
const recordId = "bridged-record";
|
||||
|
||||
network = addRecordToNetwork(network, "cluster-a-0", recordId, "Test Data");
|
||||
network = gossipRounds(network, recordId, 20);
|
||||
network = addTombstoneToNetwork(network, "cluster-a-0", recordId);
|
||||
|
||||
const result = runToConvergence(network, recordId, maxRounds);
|
||||
|
||||
printSimulationResult({
|
||||
testName: "Bridged Network (two clusters with single connection)",
|
||||
recordsDeleted: result.recordsDeleted,
|
||||
roundsToDeleteRecords: result.roundsToDeleteRecords,
|
||||
totalRounds: result.totalRounds,
|
||||
clusters: [
|
||||
getClusterStats(result.network, recordId, "cluster-a"),
|
||||
getClusterStats(result.network, recordId, "cluster-b"),
|
||||
],
|
||||
});
|
||||
};
|
||||
|
||||
const testConcurrentTombstones = (): void => {
|
||||
const maxRounds = 99999;
|
||||
let network = createNetwork<string>(20, 0.4);
|
||||
const recordId = "concurrent-delete";
|
||||
|
||||
network = addRecordToNetwork(network, "node-0", recordId, "Test Data");
|
||||
network = gossipRounds(network, recordId, 30);
|
||||
|
||||
network = addTombstoneToNetwork(network, "node-0", recordId);
|
||||
network = addTombstoneToNetwork(network, "node-5", recordId);
|
||||
network = addTombstoneToNetwork(network, "node-10", recordId);
|
||||
|
||||
const result = runToConvergence(network, recordId, maxRounds);
|
||||
|
||||
printSimulationResult({
|
||||
testName: "Concurrent Tombstones (3 nodes delete same record)",
|
||||
recordsDeleted: result.recordsDeleted,
|
||||
roundsToDeleteRecords: result.roundsToDeleteRecords,
|
||||
totalRounds: result.totalRounds,
|
||||
clusters: [getClusterStats(result.network, recordId)],
|
||||
});
|
||||
};
|
||||
|
||||
const testNetworkPartitionHeal = (): void => {
|
||||
const maxRounds = 99999;
|
||||
const clusterSize = 10;
|
||||
let network = createBridgedNetwork<string>(clusterSize, 0.5);
|
||||
const recordId = "partition-test";
|
||||
|
||||
network = addRecordToNetwork(network, "cluster-a-0", recordId, "Test Data");
|
||||
network = gossipRounds(network, recordId, 30);
|
||||
|
||||
// Partition the network
|
||||
const bridgeA = network.nodes.get("cluster-a-0")!;
|
||||
const bridgeB = network.nodes.get("cluster-b-0")!;
|
||||
const newBridgeAPeers = bridgeA.peerIds.filter(p => p !== "cluster-b-0");
|
||||
const newBridgeBPeers = bridgeB.peerIds.filter(p => p !== "cluster-a-0");
|
||||
|
||||
let partitionedNodes = new Map(network.nodes);
|
||||
partitionedNodes.set("cluster-a-0", { ...bridgeA, peerIds: newBridgeAPeers });
|
||||
partitionedNodes.set("cluster-b-0", { ...bridgeB, peerIds: newBridgeBPeers });
|
||||
network = { nodes: partitionedNodes };
|
||||
|
||||
network = addTombstoneToNetwork(network, "cluster-a-0", recordId);
|
||||
|
||||
// Run during partition
|
||||
const partitionResult = runToConvergence(network, recordId, 500);
|
||||
network = partitionResult.network;
|
||||
|
||||
// Heal the network
|
||||
const healedBridgeA = network.nodes.get("cluster-a-0")!;
|
||||
const healedBridgeB = network.nodes.get("cluster-b-0")!;
|
||||
let healedNodes = new Map(network.nodes);
|
||||
healedNodes.set("cluster-a-0", addPeerToNode(healedBridgeA, "cluster-b-0"));
|
||||
healedNodes.set("cluster-b-0", addPeerToNode(healedBridgeB, "cluster-a-0"));
|
||||
network = { nodes: healedNodes };
|
||||
|
||||
const result = runToConvergence(network, recordId, maxRounds);
|
||||
|
||||
printSimulationResult({
|
||||
testName: "Network Partition and Heal",
|
||||
recordsDeleted: result.recordsDeleted,
|
||||
roundsToDeleteRecords: partitionResult.roundsToDeleteRecords + result.roundsToDeleteRecords,
|
||||
totalRounds: partitionResult.totalRounds + result.totalRounds,
|
||||
clusters: [
|
||||
getClusterStats(result.network, recordId, "cluster-a"),
|
||||
getClusterStats(result.network, recordId, "cluster-b"),
|
||||
],
|
||||
});
|
||||
};
|
||||
|
||||
const testSparseNetwork = (): void => {
|
||||
const trials = 20;
|
||||
const maxRounds = 99999;
|
||||
let deletedCount = 0;
|
||||
let totalDeletionRounds = 0;
|
||||
let totalRounds = 0;
|
||||
let finalRecords = 0;
|
||||
let finalTombstones = 0;
|
||||
|
||||
for (let trial = 0; trial < trials; trial++) {
|
||||
let network = createNetwork<string>(25, 0.15);
|
||||
const recordId = `sparse-${trial}`;
|
||||
|
||||
network = addRecordToNetwork(network, "node-0", recordId, "Test");
|
||||
network = gossipRounds(network, recordId, 50);
|
||||
network = addTombstoneToNetwork(network, "node-0", recordId);
|
||||
|
||||
const result = runToConvergence(network, recordId, maxRounds);
|
||||
|
||||
if (result.recordsDeleted) {
|
||||
deletedCount++;
|
||||
totalDeletionRounds += result.roundsToDeleteRecords;
|
||||
}
|
||||
totalRounds += result.totalRounds;
|
||||
|
||||
const stats = getClusterStats(result.network, recordId);
|
||||
finalRecords += stats.recordCount;
|
||||
finalTombstones += stats.tombstoneCount;
|
||||
}
|
||||
|
||||
printSimulationResult({
|
||||
testName: `Sparse Network (${trials} trials, 15% connectivity)`,
|
||||
recordsDeleted: deletedCount === trials,
|
||||
roundsToDeleteRecords: deletedCount > 0 ? Math.round(totalDeletionRounds / deletedCount) : 0,
|
||||
totalRounds: Math.round(totalRounds / trials),
|
||||
clusters: [{
|
||||
name: 'all',
|
||||
nodeCount: 25 * trials,
|
||||
recordCount: finalRecords,
|
||||
tombstoneCount: finalTombstones,
|
||||
}],
|
||||
});
|
||||
};
|
||||
|
||||
const runAllTests = (): void => {
|
||||
console.log("=== HyperLogLog Tombstone Simulation ===");
|
||||
|
||||
testSingleNodeDeletion();
|
||||
testEarlyTombstoneCreation();
|
||||
testBridgedNetwork();
|
||||
testConcurrentTombstones();
|
||||
testNetworkPartitionHeal();
|
||||
testSparseNetwork();
|
||||
|
||||
console.log("\n=== Simulation Complete ===");
|
||||
};
|
||||
|
||||
runAllTests();
|
||||
Loading…
Add table
Add a link
Reference in a new issue