Created: 2025-01-09 Version: 2.0 Status: Implementation Planning Author: Milo with TQ
Version 2 restructures the implementation to prioritize visibility and control. We build monitoring first, add test controls second, and only connect to real memory creation after validating the system works. This approach ensures we can see what's happening at every step and tune the system based on observed behavior.
// Ensure all Memory nodes have required properties for monitoring
// Required properties for energy system:
{
id: string (unique),
content: string,
tier: string ('working' | 'shortTerm' | 'longTerm' | 'archived*' | 'expired*'),
energy: float,
created: datetime,
lastAccessed: datetime,
accessCount: integer,
sessionId: string (optional),
instanceId: string (optional),
promotedFrom: string (optional),
isTest: boolean (default false), // Critical for safe testing
validFrom: datetime,
validTo: datetime (null for current)
}
// Additional monitoring metadata
{
lastDecay: datetime, // When energy was last decayed
lastPromotion: datetime, // When last promoted
decayRate: float, // Current decay rate applied
peakEnergy: float, // Highest energy achieved
promotionAttempts: integer // Times evaluated for promotion
}
# Memory Status Overview
brainBridge --memory-status
# Returns: tier distribution, total count, test vs real ratio
# Energy Analysis
brainBridge --memory-energy [--tier=working|shortTerm|longTerm] [--test-only]
# Returns: energy histogram, average energy, decay predictions
# Promotion Activity
brainBridge --memory-promotions [--last-hours=24]
# Returns: recent promotions, success rate, blocked promotions
# System Health
brainBridge --memory-health
# Returns: configuration status, last run times, error counts
# Detailed Memory Inspection
brainBridge --memory-inspect <id>
# Returns: full memory properties, relationships, history
// MEMORY STATUS QUERY
MATCH (m:Memory)
WHERE m.validTo IS NULL
RETURN
m.tier as tier,
m.isTest as isTest,
count(m) as count,
avg(m.energy) as avgEnergy,
max(m.energy) as maxEnergy,
min(m.energy) as minEnergy,
percentileCont(m.energy, 0.5) as medianEnergy
ORDER BY tier;
// ENERGY DISTRIBUTION QUERY
MATCH (m:Memory)
WHERE m.validTo IS NULL AND m.tier = $tier
WITH m.energy as energy
RETURN
floor(energy) as energyBucket,
count(*) as frequency
ORDER BY energyBucket;
// PROMOTION READINESS QUERY
MATCH (config:ConsolidationConfig)
WHERE config.validTo IS NULL
WITH config
MATCH (m:Memory)
WHERE m.validTo IS NULL
RETURN
m.tier as currentTier,
CASE
WHEN m.tier = 'working' AND m.energy > config.workingToShortThreshold THEN 'ready'
WHEN m.tier = 'shortTerm' AND m.energy > config.shortToLongThreshold THEN 'ready'
ELSE 'not ready'
END as promotionStatus,
count(m) as count;
// DECAY PREDICTION QUERY
MATCH (config:ConsolidationConfig)
WHERE config.validTo IS NULL
WITH config
MATCH (m:Memory)
WHERE m.validTo IS NULL AND m.energy IS NOT NULL
WITH m, config,
CASE m.tier
WHEN 'working' THEN config.workingDecay
WHEN 'shortTerm' THEN config.shortTermDecay
WHEN 'longTerm' THEN config.longTermDecay
END as decayConstant,
duration.between(m.lastAccessed, datetime()).hours as hoursSinceAccess
RETURN
m.id,
m.tier,
m.energy as currentEnergy,
m.energy * exp(-1 * hoursSinceAccess * decayConstant) as predictedEnergy,
CASE
WHEN m.energy * exp(-1 * hoursSinceAccess * decayConstant) < 0.1 THEN 'will expire'
ELSE 'will survive'
END as prediction;
# Create test memory with specific properties
brainBridge --test-memory "content" [--energy=1.0] [--tier=working]
# Returns: created memory ID
# Boost test memory (simulate access)
brainBridge --boost-memory <id> [--amount=1.0]
# Returns: new energy level
# List all test memories
brainBridge --list-test-memories
# Returns: all memories where isTest=true
# Delete test memories
brainBridge --delete-test-memories [--all | --id=<id>]
# Returns: count deleted
# Create test scenario
brainBridge --test-scenario <scenario-name>
# Scenarios: 'high-access', 'gradual-decay', 'promotion-ready', 'cluster-formation'
// CREATE TEST MEMORY
CREATE (m:Memory:TestMemory {
id: randomUUID(),
content: $content,
tier: coalesce($tier, 'working'),
energy: coalesce($energy, 1.0),
created: datetime(),
lastAccessed: datetime(),
accessCount: 1,
sessionId: $sessionId,
instanceId: 'test-harness',
isTest: true, // Critical flag
validFrom: datetime(),
validTo: null
})
RETURN m;
// BOOST TEST MEMORY
MATCH (m:Memory {id: $memoryId, isTest: true})
WHERE m.validTo IS NULL
SET m.energy = m.energy + $boostAmount,
m.lastAccessed = datetime(),
m.accessCount = m.accessCount + 1,
m.peakEnergy = CASE
WHEN m.energy > coalesce(m.peakEnergy, 0)
THEN m.energy
ELSE m.peakEnergy
END
RETURN m;
// DELETE TEST MEMORIES
MATCH (m:Memory {isTest: true})
WHERE m.validTo IS NULL
AND ($deleteAll = true OR m.id = $memoryId)
SET m.validTo = datetime()
RETURN count(m) as deletedCount;
// TEST SCENARIO: HIGH ACCESS
UNWIND range(1, 5) as i
CREATE (m:Memory:TestMemory {
id: 'test-high-access-' + toString(i),
content: 'High access test memory ' + toString(i),
tier: 'working',
energy: toFloat(i), // Varying energy levels
created: datetime() - duration({hours: i}),
lastAccessed: datetime() - duration({minutes: i * 10}),
accessCount: 10 - i,
isTest: true,
validFrom: datetime(),
validTo: null
})
RETURN count(m) as created;
# Individual consolidation processes
brainBridge --decay-energy [--test-only]
brainBridge --promote-memories [--test-only] [--tier=working|shortTerm]
brainBridge --cluster-memories [--test-only]
brainBridge --clean-expired [--test-only]
# Combined consolidation
brainBridge --consolidate-all [--test-only] [--dry-run]
# Force specific memory promotion
brainBridge --force-promote <id>
# Reset memory energy
brainBridge --reset-energy <id> --energy=<value>
// DECAY ENERGY (with test isolation)
MATCH (config:ConsolidationConfig)
WHERE config.validTo IS NULL
WITH config
MATCH (m:Memory)
WHERE m.validTo IS NULL
AND m.energy IS NOT NULL
AND ($testOnly = false OR m.isTest = true)
WITH m, config,
CASE m.tier
WHEN 'working' THEN config.workingDecay
WHEN 'shortTerm' THEN config.shortTermDecay
WHEN 'longTerm' THEN config.longTermDecay
END as decayConstant,
duration.between(coalesce(m.lastDecay, m.lastAccessed), datetime()).hours as hoursSinceDecay
SET m.energy = m.energy * exp(-1 * hoursSinceDecay * decayConstant),
m.lastDecay = datetime()
RETURN count(m) as memoriesDecayed, avg(m.energy) as avgEnergyAfterDecay;
// PROMOTE MEMORIES (with test isolation and validation)
MATCH (config:ConsolidationConfig)
WHERE config.validTo IS NULL
WITH config
MATCH (m:Memory)
WHERE m.validTo IS NULL
AND ($testOnly = false OR m.isTest = true)
AND (
(m.tier = 'working' AND m.energy > config.workingToShortThreshold) OR
(m.tier = 'shortTerm' AND m.energy > config.shortToLongThreshold)
)
AND NOT EXISTS((m)-[:PROMOTED_TO]->())
WITH m, config,
CASE m.tier
WHEN 'working' THEN 'shortTerm'
WHEN 'shortTerm' THEN 'longTerm'
END as newTier
LIMIT toInteger(config.promotionBatchSize)
CREATE (promoted:Memory {
id: randomUUID(),
content: m.content,
tier: newTier,
energy: m.energy,
created: m.created,
promotedFrom: m.id,
lastAccessed: datetime(),
accessCount: m.accessCount,
isTest: m.isTest,
validFrom: datetime(),
validTo: null,
lastPromotion: datetime()
})
CREATE (m)-[:PROMOTED_TO {at: datetime(), energy: m.energy}]->(promoted)
SET m.tier = 'archived' + substring(m.tier, 0, 1) + substring(m.tier, 1),
m.validTo = datetime(),
m.promotionAttempts = coalesce(m.promotionAttempts, 0) + 1
RETURN count(promoted) as memoriesPromoted, collect(promoted.tier)[0] as toTier;
// DRY RUN MODE (preview without changes)
MATCH (config:ConsolidationConfig)
WHERE config.validTo IS NULL
WITH config
MATCH (m:Memory)
WHERE m.validTo IS NULL
AND ($testOnly = false OR m.isTest = true)
RETURN
m.id,
m.tier,
m.energy,
CASE
WHEN m.tier = 'working' AND m.energy > config.workingToShortThreshold THEN 'would promote to shortTerm'
WHEN m.tier = 'shortTerm' AND m.energy > config.shortToLongThreshold THEN 'would promote to longTerm'
WHEN m.energy < 0.1 THEN 'would expire'
ELSE 'no action'
END as action;
// Mark session as ended
MATCH (s:Session {id: $sessionId})
SET s.ended = datetime()
WITH s
// Trigger consolidation for session memories
MATCH (m:Memory {sessionId: s.id, tier: 'working'})
WHERE m.validTo IS NULL AND m.energy > 1.5 // Lower threshold at boundary
// Queue for promotion
SET m.queuedForPromotion = true
RETURN count(m) as memoriesQueued;
// Modified memory creation for real memories (after testing)
CREATE (m:Memory {
id: randomUUID(),
content: $content,
type: $type, // 'crystallization', 'learning', etc.
tier: 'working',
energy: 1.0,
created: datetime(),
lastAccessed: datetime(),
accessCount: 1,
sessionId: $sessionId,
instanceId: $instanceId,
isTest: false, // Real memory
validFrom: datetime(),
validTo: null
})
RETURN m;
NOT YET - First prove the system works manually
Future automation will include:
// Old memories continue to work
MATCH (c:Crystallization)
WHERE c.validTo IS NULL AND NOT EXISTS(c.tier)
// These remain untouched
// New memories use energy system
CREATE (m:Memory {tier: 'working', energy: 1.0, ...})
// These enter the new system
// Calculate initial energy for old memories
MATCH (c:Crystallization)
WHERE c.validTo IS NULL AND NOT EXISTS(c.tier)
SET c.tier = 'longTerm', // All old crystallizations are long-term
c.energy = 10.0, // High energy (they survived)
c.lastAccessed = coalesce(c.lastAccessed, c.created),
c.accessCount = 1,
c.isTest = false
RETURN count(c) as migratedMemories;
Based on monitoring, adjust these parameters:
// If too many memories promote too quickly
INCREASE workingToShortThreshold (default: 2.0)
INCREASE shortToLongThreshold (default: 5.0)
// If memories decay too fast
DECREASE workingDecay (default: 0.5)
DECREASE shortTermDecay (default: 0.05)
// If consolidation is too slow
INCREASE promotionBatchSize (default: 10)
DECREASE promotionIntervalMinutes (default: 10)
// If clusters form too aggressively
INCREASE coactivationThreshold (default: 3)
INCREASE semanticSimilarityThreshold (default: 0.7)
End of Implementation Plan v2