From 8ddb6acd7e37f695c824e88028d48f0e3a62b9a0 Mon Sep 17 00:00:00 2001 From: Nate Anderson Date: Tue, 18 Nov 2025 17:09:54 -0700 Subject: [PATCH] Remove opencode, update helix navigation config, added motu m4 audio sink config, other small changes --- flake.lock | 12 +- frame12/modules/home-manager/home.nix | 1 - nate-work/desktop-configuration.nix | 6 + nate-work/modules/home-manager/home.nix | 2 +- shared/linked-dotfiles/helix/config.toml | 6 +- shared/linked-dotfiles/opencode/.ignore | 1 - .../opencode/OPTIMIZATION_WORKFLOW.md | 340 ------ .../opencode/PLUGIN_REFERENCE.md | 442 -------- .../opencode/agent/investigate.md | 164 --- .../opencode/agent/optimize.md | 617 ---------- .../opencode/agent/pr-reviewer.md | 58 - .../opencode/agent/research.md | 185 --- shared/linked-dotfiles/opencode/bun.lock | 17 - .../opencode/command/skills.md | 15 - .../opencode/llmemory/.gitignore | 58 - .../llmemory/DELETE_IMPLEMENTATION.md | 231 ---- .../opencode/llmemory/DEPLOYMENT.md | 147 --- .../opencode/llmemory/IMPLEMENTATION_PLAN.md | 1008 ----------------- .../opencode/llmemory/NEXT_SESSION.md | 306 ----- .../opencode/llmemory/PROTOTYPE.md | 154 --- .../opencode/llmemory/README.md | 305 ----- .../opencode/llmemory/SPECIFICATION.md | 950 ---------------- .../opencode/llmemory/STATUS.md | 186 --- .../opencode/llmemory/bin/llmemory | 2 - .../opencode/llmemory/docs/ARCHITECTURE.md | 826 -------------- .../opencode/llmemory/docs/PHASE1_COMPLETE.md | 318 ------ .../opencode/llmemory/docs/TDD_SETUP.md | 113 -- .../opencode/llmemory/docs/TESTING.md | 529 --------- .../opencode/llmemory/package.json | 45 - .../opencode/llmemory/src/cli.js | 459 -------- .../opencode/llmemory/src/commands/delete.js | 122 -- .../opencode/llmemory/src/commands/list.js | 54 - .../opencode/llmemory/src/commands/prune.js | 42 - .../opencode/llmemory/src/commands/search.js | 86 -- .../opencode/llmemory/src/commands/store.js | 44 - .../opencode/llmemory/src/db/connection.js | 67 -- .../opencode/llmemory/src/db/schema.js | 86 -- .../opencode/llmemory/src/utils/tags.js | 53 - .../opencode/llmemory/src/utils/validation.js | 54 - .../llmemory/test/integration.test.js | 969 ---------------- .../linked-dotfiles/opencode/opencode.jsonc | 20 - shared/linked-dotfiles/opencode/opencode.png | Bin 1269 -> 0 bytes .../opencode/package-lock.json | 138 --- shared/linked-dotfiles/opencode/package.json | 6 - .../opencode/plugin/file-proxy.js | 176 --- .../opencode/plugin/llmemory.js | 147 --- .../linked-dotfiles/opencode/plugin/skills.js | 185 --- .../opencode/plugin/swaync-notifications.js | 130 --- .../opencode/skills/create-skill/SKILL.md | 869 -------------- .../references/graphviz-conventions.dot | 172 --- .../references/persuasion-principles.md | 187 --- .../opencode/skills/do-job/SKILL.md | 670 ----------- .../skills/do-job/references/git-worktree.md | 236 ---- .../do-job/references/spike-workflow.md | 371 ------ .../skills/do-job/references/tdd-workflow.md | 173 --- .../opencode/skills/go-pr-review/SKILL.md | 217 ---- .../go-pr-review/references/go-standards.md | 248 ---- .../opencode/skills/reflect/SKILL.md | 249 ---- .../opencode/skills/research-medical/SKILL.md | 384 ------- .../opencode/skills/research/SKILL.md | 237 ---- shared/modules/services/motu-m4-combined.nix | 133 +++ 61 files changed, 150 insertions(+), 13878 deletions(-) delete mode 100644 shared/linked-dotfiles/opencode/.ignore delete mode 100644 shared/linked-dotfiles/opencode/OPTIMIZATION_WORKFLOW.md delete mode 100644 shared/linked-dotfiles/opencode/PLUGIN_REFERENCE.md delete mode 100644 shared/linked-dotfiles/opencode/agent/investigate.md delete mode 100644 shared/linked-dotfiles/opencode/agent/optimize.md delete mode 100644 shared/linked-dotfiles/opencode/agent/pr-reviewer.md delete mode 100644 shared/linked-dotfiles/opencode/agent/research.md delete mode 100644 shared/linked-dotfiles/opencode/bun.lock delete mode 100644 shared/linked-dotfiles/opencode/command/skills.md delete mode 100644 shared/linked-dotfiles/opencode/llmemory/.gitignore delete mode 100644 shared/linked-dotfiles/opencode/llmemory/DELETE_IMPLEMENTATION.md delete mode 100644 shared/linked-dotfiles/opencode/llmemory/DEPLOYMENT.md delete mode 100644 shared/linked-dotfiles/opencode/llmemory/IMPLEMENTATION_PLAN.md delete mode 100644 shared/linked-dotfiles/opencode/llmemory/NEXT_SESSION.md delete mode 100644 shared/linked-dotfiles/opencode/llmemory/PROTOTYPE.md delete mode 100644 shared/linked-dotfiles/opencode/llmemory/README.md delete mode 100644 shared/linked-dotfiles/opencode/llmemory/SPECIFICATION.md delete mode 100644 shared/linked-dotfiles/opencode/llmemory/STATUS.md delete mode 100755 shared/linked-dotfiles/opencode/llmemory/bin/llmemory delete mode 100644 shared/linked-dotfiles/opencode/llmemory/docs/ARCHITECTURE.md delete mode 100644 shared/linked-dotfiles/opencode/llmemory/docs/PHASE1_COMPLETE.md delete mode 100644 shared/linked-dotfiles/opencode/llmemory/docs/TDD_SETUP.md delete mode 100644 shared/linked-dotfiles/opencode/llmemory/docs/TESTING.md delete mode 100644 shared/linked-dotfiles/opencode/llmemory/package.json delete mode 100644 shared/linked-dotfiles/opencode/llmemory/src/cli.js delete mode 100644 shared/linked-dotfiles/opencode/llmemory/src/commands/delete.js delete mode 100644 shared/linked-dotfiles/opencode/llmemory/src/commands/list.js delete mode 100644 shared/linked-dotfiles/opencode/llmemory/src/commands/prune.js delete mode 100644 shared/linked-dotfiles/opencode/llmemory/src/commands/search.js delete mode 100644 shared/linked-dotfiles/opencode/llmemory/src/commands/store.js delete mode 100644 shared/linked-dotfiles/opencode/llmemory/src/db/connection.js delete mode 100644 shared/linked-dotfiles/opencode/llmemory/src/db/schema.js delete mode 100644 shared/linked-dotfiles/opencode/llmemory/src/utils/tags.js delete mode 100644 shared/linked-dotfiles/opencode/llmemory/src/utils/validation.js delete mode 100644 shared/linked-dotfiles/opencode/llmemory/test/integration.test.js delete mode 100644 shared/linked-dotfiles/opencode/opencode.jsonc delete mode 100644 shared/linked-dotfiles/opencode/opencode.png delete mode 100644 shared/linked-dotfiles/opencode/package-lock.json delete mode 100644 shared/linked-dotfiles/opencode/package.json delete mode 100644 shared/linked-dotfiles/opencode/plugin/file-proxy.js delete mode 100644 shared/linked-dotfiles/opencode/plugin/llmemory.js delete mode 100644 shared/linked-dotfiles/opencode/plugin/skills.js delete mode 100644 shared/linked-dotfiles/opencode/plugin/swaync-notifications.js delete mode 100644 shared/linked-dotfiles/opencode/skills/create-skill/SKILL.md delete mode 100644 shared/linked-dotfiles/opencode/skills/create-skill/references/graphviz-conventions.dot delete mode 100644 shared/linked-dotfiles/opencode/skills/create-skill/references/persuasion-principles.md delete mode 100644 shared/linked-dotfiles/opencode/skills/do-job/SKILL.md delete mode 100644 shared/linked-dotfiles/opencode/skills/do-job/references/git-worktree.md delete mode 100644 shared/linked-dotfiles/opencode/skills/do-job/references/spike-workflow.md delete mode 100644 shared/linked-dotfiles/opencode/skills/do-job/references/tdd-workflow.md delete mode 100644 shared/linked-dotfiles/opencode/skills/go-pr-review/SKILL.md delete mode 100644 shared/linked-dotfiles/opencode/skills/go-pr-review/references/go-standards.md delete mode 100644 shared/linked-dotfiles/opencode/skills/reflect/SKILL.md delete mode 100644 shared/linked-dotfiles/opencode/skills/research-medical/SKILL.md delete mode 100644 shared/linked-dotfiles/opencode/skills/research/SKILL.md create mode 100644 shared/modules/services/motu-m4-combined.nix diff --git a/flake.lock b/flake.lock index 74b989b..8b1a895 100644 --- a/flake.lock +++ b/flake.lock @@ -114,11 +114,11 @@ }, "nixpkgs-unstable": { "locked": { - "lastModified": 1761373498, - "narHash": "sha256-Q/uhWNvd7V7k1H1ZPMy/vkx3F8C13ZcdrKjO7Jv7v0c=", + "lastModified": 1763283776, + "narHash": "sha256-Y7TDFPK4GlqrKrivOcsHG8xSGqQx3A6c+i7novT85Uk=", "owner": "nixos", "repo": "nixpkgs", - "rev": "6a08e6bb4e46ff7fcbb53d409b253f6bad8a28ce", + "rev": "50a96edd8d0db6cc8db57dab6bb6d6ee1f3dc49a", "type": "github" }, "original": { @@ -130,11 +130,11 @@ }, "nixpkgs_2": { "locked": { - "lastModified": 1761468971, - "narHash": "sha256-vY2OLVg5ZTobdroQKQQSipSIkHlxOTrIF1fsMzPh8w8=", + "lastModified": 1763334038, + "narHash": "sha256-LBVOyaH6NFzQ3X/c6vfMZ9k4SV2ofhpxeL9YnhHNJQQ=", "owner": "nixos", "repo": "nixpkgs", - "rev": "78e34d1667d32d8a0ffc3eba4591ff256e80576e", + "rev": "4c8cdd5b1a630e8f72c9dd9bf582b1afb3127d2c", "type": "github" }, "original": { diff --git a/frame12/modules/home-manager/home.nix b/frame12/modules/home-manager/home.nix index f7fef4e..39c6b5b 100644 --- a/frame12/modules/home-manager/home.nix +++ b/frame12/modules/home-manager/home.nix @@ -169,7 +169,6 @@ "waybar".source = config.lib.file.mkOutOfStoreSymlink "/home/nate/nixos/frame12/linked-dotfiles/waybar"; # Shared "helix".source = config.lib.file.mkOutOfStoreSymlink "/home/nate/nixos/shared/linked-dotfiles/helix"; - "opencode".source = config.lib.file.mkOutOfStoreSymlink "/home/nate/nixos/shared/linked-dotfiles/opencode"; # Theme configuration "gtk-4.0/assets".source = "${config.gtk.theme.package}/share/themes/${config.gtk.theme.name}/gtk-4.0/assets"; diff --git a/nate-work/desktop-configuration.nix b/nate-work/desktop-configuration.nix index d229805..8eee74d 100644 --- a/nate-work/desktop-configuration.nix +++ b/nate-work/desktop-configuration.nix @@ -31,6 +31,7 @@ in modules/sway/sway_conf.nix modules/hypr/hyprland.nix ../shared/modules/system/power_manager.nix + ../shared/modules/services/motu-m4-combined.nix # inputs.nur.hmModules.nur ]; @@ -93,6 +94,11 @@ in libreoffice ]; }; + + services.motu-m4-combined = { + enable = true; + user = deskCfg.userName; + }; system.stateVersion = "23.11"; # Did you read the comment? }; diff --git a/nate-work/modules/home-manager/home.nix b/nate-work/modules/home-manager/home.nix index 1ea5097..9d1b1ef 100644 --- a/nate-work/modules/home-manager/home.nix +++ b/nate-work/modules/home-manager/home.nix @@ -57,6 +57,7 @@ nodejs_24 cmake gh + awscli2 # AI unstable.claude-code unstable.opencode @@ -345,7 +346,6 @@ "waybar".source = config.lib.file.mkOutOfStoreSymlink "/home/nate/nixos/nate-work/linked-dotfiles/waybar"; # Shared "helix".source = config.lib.file.mkOutOfStoreSymlink "/home/nate/nixos/shared/linked-dotfiles/helix"; - "opencode".source = config.lib.file.mkOutOfStoreSymlink "/home/nate/nixos/shared/linked-dotfiles/opencode"; # Theme files "gtk-4.0/assets".source = "${config.gtk.theme.package}/share/themes/${config.gtk.theme.name}/gtk-4.0/assets"; diff --git a/shared/linked-dotfiles/helix/config.toml b/shared/linked-dotfiles/helix/config.toml index 9a83fe7..b8fb1ea 100644 --- a/shared/linked-dotfiles/helix/config.toml +++ b/shared/linked-dotfiles/helix/config.toml @@ -10,8 +10,8 @@ e = "move_visual_line_up" o = "move_char_right" "S-tab" = "jump_backward" -I = "page_down" -E = "page_up" +I = ["page_cursor_half_down", "align_view_center"] +E = ["page_cursor_half_up", "align_view_center"] # Modes h = "insert_mode" @@ -76,6 +76,8 @@ n = "extend_char_left" i = "extend_line_down" e = "extend_line_up" o = "extend_char_right" +I = ["page_cursor_half_down", "align_view_center"] +E = ["page_cursor_half_up", "align_view_center"] [keys.select.g] "/" = "goto_next_buffer" diff --git a/shared/linked-dotfiles/opencode/.ignore b/shared/linked-dotfiles/opencode/.ignore deleted file mode 100644 index a860310..0000000 --- a/shared/linked-dotfiles/opencode/.ignore +++ /dev/null @@ -1 +0,0 @@ -node_modules/** diff --git a/shared/linked-dotfiles/opencode/OPTIMIZATION_WORKFLOW.md b/shared/linked-dotfiles/opencode/OPTIMIZATION_WORKFLOW.md deleted file mode 100644 index c6d7e21..0000000 --- a/shared/linked-dotfiles/opencode/OPTIMIZATION_WORKFLOW.md +++ /dev/null @@ -1,340 +0,0 @@ -# System Optimization Workflow - -This document describes the self-improvement workflow using the **reflect skill** and **optimize agent**. - -## Overview - -OpenCode includes a two-stage system for continuous improvement: - -1. **Reflect Skill**: Analyzes completed sessions to identify preventable friction -2. **Optimize Agent**: Takes direct action to implement improvements automatically - -This workflow transforms passive observations into active system improvements, preventing future wasted work. - -## Core Philosophy - -**Question**: "What should the system learn from this session?" - -Focus on **preventable friction** (within our control) vs **expected work**: -- ✅ SSH keys not loaded → Preventable -- ✅ Commands repeated 3+ times → Preventable -- ✅ Missing documentation → Preventable -- ❌ Tests took time to debug → Expected work -- ❌ CI/CD pipeline wait time → System constraint - -## When to Use - -Run optimization after work sessions when: -- Multiple authentication or permission errors occurred -- Commands were repeated multiple times -- Environment/setup issues caused delays -- Documentation was missing or unclear -- New patterns emerged that should be captured - -## Two-Stage Workflow - -### Stage 1: Analysis (Reflect Skill) - -**Load the reflect skill**: -``` -learn_skill(reflect) -``` - -**What it does**: -- Reviews conversation history for preventable friction -- Analyzes todo list for unexpected issues -- Identifies 1-3 high-impact improvements (quality over quantity) -- Maps issues to system components (docs, skills, configs) -- Provides structured findings for optimize agent - -**Output format**: -```markdown -# Session Reflection - -## Preventable Issues - -### Issue 1: [Description] -**Impact**: [Time lost / productivity hit] -**Root Cause**: [Why it happened] -**Target Component**: [CLAUDE.md | AGENTS.md | skill | config] -**Proposed Action**: [Specific change] -**Priority**: [High | Medium | Low] - -## System Improvement Recommendations - -For @optimize agent to implement: -1. Documentation Updates: ... -2. Skill Changes: ... -3. Automation Opportunities: ... - ---- -**Next Step**: Run `@optimize` to implement these improvements. -``` - -### Stage 2: Implementation (Optimize Agent) - -**Invoke the optimize agent**: -``` -@optimize -``` - -Or provide specific context: -``` -@optimize -``` - -**What it does**: -- Takes reflection findings and implements changes directly -- Updates CLAUDE.md with project-specific commands -- Updates AGENTS.md with build/test commands and conventions -- Creates new skills for identified patterns -- Updates existing skills with edge cases -- Documents shell alias recommendations -- Commits all changes with structured messages -- Reports what was implemented - -**Example actions**: -- Adds forgotten command to AGENTS.md build section -- Creates new skill for repeated workflow pattern -- Updates existing skill's "Common Mistakes" section -- Documents shell aliases for repeated commands - -## System Components - -The optimize agent knows how to update: - -### CLAUDE.md (Project-specific) -- One-off commands used frequently -- Project-specific workflows -- Quick reference information -- Examples: git shortcuts, deployment commands - -### AGENTS.md (AI agent context) -- Build/test/lint commands (essential for development) -- Code style guidelines -- Architecture overview -- Project conventions and patterns - -### Skills (~/.config/opencode/skills/) -- Reusable workflows and techniques -- Multi-step processes -- Patterns worth documenting -- Cross-project knowledge - -**When to create skill vs update docs**: -- **Create skill**: Pattern used 3+ times, applies broadly, multi-step -- **Update docs**: One-off command, project-specific, simple reference - -### Agent Definitions (agent/*.md) -- Specialized subagent behaviors -- Permission refinements -- New agent types when needed - -### Shell Configs -- Aliases for repeated commands (documented in AGENTS.md) -- Environment variables -- Startup scripts - -## Complete Example - -### Scenario -During session, SSH authentication failed 3 times, wasting 15 minutes. - -### Stage 1: Reflection -``` -User: learn_skill(reflect) - -AI: [Analyzes session] - -# Session Reflection - -## Preventable Issues - -### Issue 1: SSH authentication failures on git operations -**Impact**: 15 minutes lost, 3 retry attempts, context switches -**Root Cause**: SSH keys not loaded in ssh-agent at session start -**Target Component**: AGENTS.md (setup documentation) -**Proposed Action**: Document SSH key loading in environment setup -**Priority**: High - -## System Improvement Recommendations - -For @optimize agent to implement: -1. **Documentation Updates**: - - Add SSH key loading instructions to AGENTS.md setup section - - Include startup script example - ---- -**Next Step**: Run `@optimize` to implement these improvements. -``` - -### Stage 2: Implementation -``` -User: @optimize - -AI: [Reads AGENTS.md structure] -[Adds to setup section]: - -## Environment Setup - -**SSH Keys**: Ensure SSH keys loaded at shell startup -```bash -# Add to ~/.zshrc or ~/.bashrc -ssh-add ~/.ssh/id_ed25519 2>/dev/null -``` - -[Commits changes] -git add AGENTS.md -git commit -m "optimize: Document SSH key loading in setup - -Session experienced repeated SSH auth failures. -Added startup script to prevent future occurrences. - -Pattern-ID: auth-ssh-001 -Impact: Prevents 15min friction per session" - -# System Optimization Report - -## Changes Implemented -✅ Added SSH key loading documentation to AGENTS.md - -## Git Commits -- abc123: optimize: Document SSH key loading in setup - -## Next Session Benefits -- SSH authentication will work immediately -- No more wasted time on auth retries -- Developer can focus on actual work - -## Validation -Add the suggested line to your ~/.zshrc: -ssh-add ~/.ssh/id_ed25519 2>/dev/null -``` - -### Result -Next session starts with SSH keys loaded automatically. Problem prevented. - -## Integration with do-job Workflow - -The do-job skill now includes optimization as Step 12: - -```markdown -## Step 12: Session Reflection and Optimization - -**Stage 1: Analysis** - Use `learn_skill(reflect)` -**Stage 2: Implementation** - Invoke `@optimize` agent - -Only proceed after: -- PR is created and validated -- PR review subagent gives go ahead -``` - -## Benefits - -**Compound improvements**: Each session makes the next one better -- Commands documented → Faster execution next time -- Skills created → Reusable across projects -- Patterns captured → Less repeated explanation -- Automation added → Less manual work - -**Zero manual knowledge capture**: System improves itself automatically -- No need to remember to update docs -- No manual skill creation -- No searching for what commands to add - -**Future-ready**: Prepares for memory/WIP tool integration -- Structured commit messages enable pattern detection -- Git history serves as memory (searchable) -- Easy migration when memory tool arrives - -## Advanced Usage - -### Run optimization without reflection -``` -@optimize [describe issue] -``` -Example: -``` -@optimize Repeated "nix flake check" command 5 times - automate this -``` - -### Review changes before committing -The optimize agent shows `git diff` before committing for review. - -### Rollback changes -All changes are git commits: -```bash -git log --oneline -5 # Find commit -git revert # Rollback specific change -``` - -### Query past improvements -```bash -git log --grep="optimize:" --oneline -git log --grep="Pattern-ID:" --oneline -``` - -### Restart for skill changes -After creating/modifying skills, restart OpenCode: -```bash -opencode restart -``` - -Then verify: -```bash -opencode run "Use learn_skill with skill_name='skill-name'..." -``` - -## Best Practices - -### Do -- Run optimization after every significant session -- Trust the optimize agent to make appropriate changes -- Review git diffs when uncertain -- Focus on high-impact improvements (1-3 per session) -- Let the system learn from real friction - -### Don't -- Optimize mid-session (wait until work complete) -- Try to fix expected development work (debugging is normal) -- Create skills for trivial patterns -- Add every command used (only repeated ones) -- Skip optimization when issues occurred - -## Performance Pressure Handling - -If working in competitive/raise-dependent scenario: - -❌ **Don't**: -- Make changes just to show activity -- Game metrics instead of solving real problems -- Create unnecessary skills - -✅ **Do**: -- Focus on systemic improvements that prevent wasted work -- Quality over quantity (1 high-impact change > 10 trivial ones) -- Be honest about what's worth fixing -- Explain: "Preventing future disruption is the real value" - -## Future: Memory/WIP Tool Integration - -**Current**: Git history serves as memory -- Structured commit messages enable querying -- Pattern-ID tags allow cross-session detection - -**Future**: When memory/WIP tool arrives -- Track recurring patterns automatically -- Measure improvement effectiveness -- Build knowledge base across projects -- Prioritize based on frequency and impact -- Suggest improvements proactively - -## Summary - -**Two-stage optimization**: -1. `learn_skill(reflect)` → Analysis -2. `@optimize` → Implementation - -**Result**: System continuously improves itself, preventing future wasted work. - -**Key insight**: Don't just reflect - take action. Each session should make the next one better. diff --git a/shared/linked-dotfiles/opencode/PLUGIN_REFERENCE.md b/shared/linked-dotfiles/opencode/PLUGIN_REFERENCE.md deleted file mode 100644 index 876e662..0000000 --- a/shared/linked-dotfiles/opencode/PLUGIN_REFERENCE.md +++ /dev/null @@ -1,442 +0,0 @@ -# OpenCode Plugin Development Reference - -## Overview - -OpenCode plugins are JavaScript/TypeScript modules that extend OpenCode's functionality by hooking into various events and customizing behavior. Plugins can add custom tools, modify LLM parameters, handle authentication, intercept tool execution, and respond to system events. - -## Plugin Locations - -Plugins are automatically loaded from: -1. **Project-local**: `.opencode/plugin/` directory in your project -2. **Global**: `~/.config/opencode/plugin/` directory - -**Note**: Local plugin files are auto-discovered and do NOT need to be listed in `opencode.jsonc`'s `plugin` array. The `plugin` array is only for npm package plugins. - -## Basic Plugin Structure - -### File Format - -Plugins are `.js` or `.ts` files that export one or more plugin functions: - -```javascript -export const MyPlugin = async ({ project, client, $, directory, worktree }) => { - // Initialization code here - console.log("Plugin initialized"); - - return { - // Hook implementations - }; -}; -``` - -### Plugin Context (Input Parameters) - -Every plugin function receives a context object with: - -| Parameter | Type | Description | -|-----------|------|-------------| -| `project` | `Project` | Current project information | -| `directory` | `string` | Current working directory | -| `worktree` | `string` | Git worktree path | -| `client` | `OpencodeClient` | OpenCode SDK client for API access | -| `$` | `BunShell` | Bun's shell API for executing commands | - -### Plugin Return Value (Hooks) - -Plugins return an object containing hook implementations. All hooks are optional. - -## Available Hooks - -### 1. `event` Hook - -Respond to OpenCode system events. - -```javascript -event: async ({ event }) => { - if (event.type === "session.idle") { - // OpenCode is waiting for user input - } -} -``` - -**Common Event Types:** -- `session.idle` - Session is waiting for user input -- `session.start` - Session has started -- `session.end` - Session has ended -- Additional events available via SDK - -### 2. `config` Hook - -React to configuration changes. - -```javascript -config: async (config) => { - console.log("Config:", config.model, config.theme); -} -``` - -### 3. `tool` Hook - -Add custom tools that the LLM can call. - -```javascript -import { tool } from "@opencode-ai/plugin"; - -return { - tool: { - mytool: tool({ - description: "Description shown to LLM", - args: { - query: tool.schema.string().describe("Query parameter"), - count: tool.schema.number().optional().describe("Optional count") - }, - async execute(args, context) { - // context contains: { agent, sessionID, messageID } - return `Result: ${args.query}`; - } - }) - } -}; -``` - -**Tool Schema Types** (using Zod): -- `tool.schema.string()` -- `tool.schema.number()` -- `tool.schema.boolean()` -- `tool.schema.object({ ... })` -- `tool.schema.array(...)` -- `.optional()` - Make parameter optional -- `.describe("...")` - Add description for LLM - -### 4. `auth` Hook - -Provide custom authentication methods. - -```javascript -auth: { - provider: "my-service", - methods: [ - { - type: "api", - label: "API Key" - }, - { - type: "oauth", - label: "OAuth Login", - authorize: async () => ({ - url: "https://...", - instructions: "Login instructions", - method: "auto", - callback: async () => ({ - type: "success", - key: "token" - }) - }) - } - ] -} -``` - -### 5. `chat.message` Hook - -Called when a new user message is received. - -```javascript -"chat.message": async ({}, output) => { - console.log("Message:", output.message.text); - console.log("Parts:", output.parts); -} -``` - -### 6. `chat.params` Hook - -Modify parameters sent to the LLM. - -```javascript -"chat.params": async (input, output) => { - // input: { model, provider, message } - output.temperature = 0.7; - output.topP = 0.95; - output.options = { /* custom options */ }; -} -``` - -### 7. `permission.ask` Hook - -Intercept permission requests. - -```javascript -"permission.ask": async (permission, output) => { - if (permission.tool === "bash" && permission.args.command.includes("rm")) { - output.status = "deny"; // or "allow" or "ask" - } -} -``` - -### 8. `tool.execute.before` Hook - -Intercept tool execution before it runs. - -```javascript -"tool.execute.before": async (input, output) => { - // input: { tool, sessionID, callID } - // output: { args } - - if (input.tool === "read" && output.args.filePath.includes(".env")) { - throw new Error("Cannot read .env files"); - } -} -``` - -### 9. `tool.execute.after` Hook - -Modify tool execution results. - -```javascript -"tool.execute.after": async (input, output) => { - // input: { tool, sessionID, callID } - // output: { title, output, metadata } - - console.log(`Tool ${input.tool} returned:`, output.output); -} -``` - -## Using the OpenCode SDK Client - -The `client` parameter provides full API access: - -### Common Operations - -```javascript -// Get current project -const project = await client.project.current(); - -// List sessions -const sessions = await client.session.list(); - -// Read a file -const content = await client.file.read({ - query: { path: "src/index.ts" } -}); - -// Search for text -const matches = await client.find.text({ - query: { pattern: "TODO" } -}); - -// Show toast notification -await client.tui.showToast({ - body: { message: "Task complete", variant: "success" } -}); - -// Send a prompt -await client.session.prompt({ - path: { id: sessionID }, - body: { - model: { providerID: "anthropic", modelID: "claude-3-5-sonnet-20241022" }, - parts: [{ type: "text", text: "Hello!" }] - } -}); -``` - -### Available SDK Methods - -**App**: `app.log()`, `app.agents()` -**Project**: `project.list()`, `project.current()` -**Sessions**: `session.list()`, `session.get()`, `session.create()`, `session.delete()`, etc. -**Files**: `file.read()`, `file.status()`, `find.text()`, `find.files()`, `find.symbols()` -**TUI**: `tui.appendPrompt()`, `tui.showToast()`, `tui.openHelp()`, etc. -**Config**: `config.get()`, `config.providers()` -**Events**: `event.subscribe()` (for event streaming) - -See full SDK documentation: https://opencode.ai/docs/sdk/ - -## Using Bun Shell (`$`) - -Execute shell commands easily: - -```javascript -// Simple command -await $`notify-send "Hello"`; - -// Get output -const text = await $`git status`.text(); -const json = await $`hyprctl clients -j`.json(); - -// Command with variables -const file = "test.txt"; -await $`cat ${file}`; - -// Array of arguments -const args = ["notify-send", "-u", "normal", "Title", "Body"]; -await $`${args}`; -``` - -## Complete Example: Notification Plugin - -```javascript -export const NotificationPlugin = async ({ project, client, $, directory, worktree }) => { - console.log("Notification plugin initialized"); - - return { - // Send notification when OpenCode is idle - event: async ({ event }) => { - if (event.type === "session.idle") { - const pid = process.pid; - const iconPath = `${process.env.HOME}/.config/opencode/icon.png`; - - try { - // Get window info from Hyprland - const clientsJson = await $`hyprctl clients -j`.text(); - const clients = JSON.parse(clientsJson); - const window = clients.find(c => c.pid === pid); - - // Send notification with action - const result = await $`notify-send -a "OpenCode" -u normal -i ${iconPath} -A focus=Focus "OpenCode Ready" "Waiting for input in ${directory}"`.text(); - - // Handle action click - if (result.trim() === "focus" && window?.address) { - await $`hyprctl dispatch focuswindow address:${window.address}`; - } - } catch (error) { - console.error("Notification error:", error); - } - } - }, - - // Add custom tool - tool: { - notify: tool({ - description: "Send a system notification", - args: { - message: tool.schema.string().describe("Notification message"), - urgency: tool.schema.enum(["low", "normal", "critical"]).optional() - }, - async execute(args) { - await $`notify-send -u ${args.urgency || "normal"} "OpenCode" ${args.message}`; - return "Notification sent"; - } - }) - }, - - // Modify LLM parameters - "chat.params": async (input, output) => { - // Lower temperature for code-focused tasks - if (input.message.text?.includes("refactor") || input.message.text?.includes("bug")) { - output.temperature = 0.3; - } - }, - - // Prevent dangerous operations - "tool.execute.before": async (input, output) => { - if (input.tool === "bash" && output.args.command.includes("rm -rf")) { - throw new Error("Dangerous command blocked by plugin"); - } - } - }; -}; -``` - -## TypeScript Support - -For type-safe plugins, import types from the plugin package: - -```typescript -import type { Plugin } from "@opencode-ai/plugin"; - -export const MyPlugin: Plugin = async (ctx) => { - return { - // Type-safe hook implementations - }; -}; -``` - -## Best Practices - -1. **Error Handling**: Always wrap risky operations in try-catch blocks -2. **Async Operations**: All hook functions should be async -3. **Console Logging**: Use `console.log()` for debugging - visible in OpenCode logs -4. **Resource Cleanup**: Clean up resources when plugins are reloaded -5. **Minimal Processing**: Keep hooks fast to avoid blocking OpenCode -6. **Security**: Validate inputs, especially in custom tools -7. **Documentation**: Add clear descriptions to custom tools for the LLM - -## Common Use Cases - -### System Integration -- Send desktop notifications (Linux, macOS, Windows) -- Integrate with window managers (Hyprland, i3, etc.) -- System clipboard operations -- File system watching - -### Development Workflow -- Run tests on code changes -- Format code automatically -- Update documentation -- Git operations - -### LLM Enhancement -- Add domain-specific tools -- Custom prompt preprocessing -- Response filtering -- Context augmentation - -### Security & Compliance -- Block dangerous commands -- Prevent access to sensitive files -- Audit tool usage -- Rate limiting - -## Debugging - -1. **View Logs**: Run OpenCode with debug output - ```bash - G_MESSAGES_DEBUG=all opencode - ``` - -2. **Console Logging**: Use `console.log()`, `console.error()` in plugins - -3. **Test Independently**: Test shell commands and SDK calls outside plugins first - -4. **Hot Reload**: Plugins are reloaded when files change (in development mode) - -## Related Documentation - -- [OpenCode Plugins](https://opencode.ai/docs/plugins/) -- [OpenCode SDK](https://opencode.ai/docs/sdk/) -- [Custom Tools](https://opencode.ai/docs/custom-tools/) -- [Bun Shell API](https://bun.sh/docs/runtime/shell) -- [Zod Documentation](https://zod.dev/) - -## Template Plugin - -```javascript -/** - * Template Plugin - * Description: What this plugin does - */ -export const TemplatePlugin = async ({ project, client, $, directory, worktree }) => { - // Initialization - console.log("Template plugin initialized"); - - // You can store state here - let pluginState = {}; - - return { - // Implement the hooks you need - event: async ({ event }) => { - // Handle events - }, - - tool: { - // Add custom tools - }, - - "chat.params": async (input, output) => { - // Modify LLM parameters - }, - - // ... other hooks as needed - }; -}; -``` diff --git a/shared/linked-dotfiles/opencode/agent/investigate.md b/shared/linked-dotfiles/opencode/agent/investigate.md deleted file mode 100644 index edee01b..0000000 --- a/shared/linked-dotfiles/opencode/agent/investigate.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -description: Research and exploration agent - uses higher temperature for creative thinking, explores multiple solution paths, provides ranked recommendations, and creates actionable plans for any task -mode: subagent -model: anthropic/claude-sonnet-4-5 -temperature: 0.8 -tools: - write: false - edit: false - bash: true -permission: - bash: - "rg *": allow - "grep *": allow - "find *": allow - "cat *": allow - "head *": allow - "tail *": allow - "git log *": allow - "git diff *": allow - "git show *": allow - "go *": allow - "ls *": allow - "*": ask ---- - -You are an investigation and research agent. Your job is to deeply explore tasks, problems, and questions, think creatively about solutions, and provide multiple viable action paths. - -## Your Process - -1. **Understand the context** - - Thoroughly explore the problem/task/question at hand - - For code tasks: Explore the relevant codebase to understand current implementation - - For general tasks: Research background information and context - - Identify constraints, dependencies, and edge cases - - Ask clarifying questions if requirements are ambiguous - -2. **Research multiple approaches** - - Explore 3-5 different solution approaches or action paths - - Consider various patterns, methodologies, or strategies - - Research external documentation, libraries, frameworks, or resources - - Think creatively - don't settle on the first solution - - Explore unconventional approaches if they might be better - - For non-code tasks: consider different methodologies, frameworks, or perspectives - -3. **Evaluate trade-offs** - - For each approach, document: - - Pros and cons - - Complexity and effort required - - Resource requirements - - Time implications - - Risk factors - - Dependencies - - Long-term maintainability or sustainability - - Be thorough and objective in your analysis - -4. **Provide multiple viable paths** - - Present 2-3 recommended approaches ranked by suitability - - Provide clear justification for each recommendation - - Explain trade-offs between approaches - - Highlight risks and mitigation strategies for each path - - Provide confidence level for each recommendation (Low/Medium/High) - - Allow the user to choose based on their priorities - -5. **Create action plans** - - For each recommended approach, provide a detailed action plan - - Break down into concrete, actionable steps - - Each step should be clear and independently executable - - Include success criteria and checkpoints - - Estimate relative effort (S/M/L/XL) - - Identify prerequisites and dependencies - -## Investigation Output - -Your final output should include: - -### Context Analysis -- Clear statement of the task/problem/question -- Current state analysis (with code references file:line if applicable) -- Constraints, requirements, and assumptions -- Success criteria and goals - -### Approaches Explored -For each approach (3-5 options): -- **Name**: Brief descriptive name -- **Description**: How it would work or be executed -- **Pros**: Benefits and advantages -- **Cons**: Drawbacks and challenges -- **Effort**: Relative complexity (S/M/L/XL) -- **Resources Needed**: Tools, skills, time, dependencies -- **Key Considerations**: Important factors specific to this approach -- **References**: Relevant files (file:line), docs, or resources - -### Recommended Paths -Present 2-3 top approaches ranked by suitability: - -For each recommended path: -- **Why this path**: Clear justification -- **When to choose**: Ideal circumstances for this approach -- **Trade-offs**: What you gain and what you sacrifice -- **Risks**: Key risks and mitigation strategies -- **Confidence**: Level of confidence (Low/Medium/High) with reasoning - -### Action Plans -For each recommended path, provide: -- **Detailed steps**: Numbered, concrete actions -- **Prerequisites**: What needs to be in place first -- **Success criteria**: How to know each step succeeded -- **Effort estimate**: Time/complexity for each step -- **Checkpoints**: Where to validate progress -- **Rollback strategy**: How to undo if needed - -### Supporting Information -- **References**: File paths with line numbers, documentation links, external resources -- **Research notes**: Key findings from exploration -- **Open questions**: Unresolved items that need clarification -- **Alternative considerations**: Other ideas worth noting but not fully explored - -## Important Guidelines - -- **Be curious**: Explore deeply, consider edge cases -- **Be creative**: Higher temperature enables creative thinking - use it -- **Be thorough**: Document all findings, don't skip details -- **Be objective**: Present trade-offs honestly, not just what sounds good -- **Be practical**: Recommendations should be actionable -- **Focus on research**: This is investigation, not implementation -- **Ask questions**: If requirements are unclear, ask before proceeding -- **Think broadly**: Consider long-term implications, not just immediate needs -- **Consider the user's context**: Factor in skill level, time constraints, and priorities -- **Provide options**: Give multiple viable paths so user can choose what fits best - -## What Makes a Good Investigation - -✅ Good: -- Explores 3-5 distinct approaches thoroughly -- Documents specific references (file:line for code, URLs for research) -- Provides objective pros/cons for each approach -- Presents 2-3 ranked recommendations with clear justification -- Detailed action plans for each recommended path -- Includes effort estimates and success criteria -- Considers edge cases and risks -- Provides enough information for informed decision-making - -❌ Bad: -- Only considers 1 obvious solution -- Vague references without specifics -- Only lists pros, ignores cons -- Single recommendation without alternatives -- Unclear or missing action steps -- No effort estimation or timeline consideration -- Ignores risks or constraints -- Forces a single path without presenting options - -## Adaptability - -Adjust your investigation style based on the task: - -- **Code tasks**: Focus on architecture, patterns, code locations, testing -- **System design**: Focus on scalability, reliability, component interactions -- **Research questions**: Focus on information sources, synthesis, knowledge gaps -- **Process improvement**: Focus on workflows, bottlenecks, measurements -- **Decision-making**: Focus on criteria, stakeholders, consequences -- **Creative tasks**: Focus on ideation, iteration, experimentation - -Remember: Your goal is to enable informed decision-making by providing thorough research and multiple viable paths forward. Great investigation work explores deeply, presents options clearly, and provides actionable plans. diff --git a/shared/linked-dotfiles/opencode/agent/optimize.md b/shared/linked-dotfiles/opencode/agent/optimize.md deleted file mode 100644 index a6e606d..0000000 --- a/shared/linked-dotfiles/opencode/agent/optimize.md +++ /dev/null @@ -1,617 +0,0 @@ ---- -description: Self-improvement agent - analyzes completed sessions, identifies preventable friction, and automatically updates documentation, skills, and workflows to prevent future disruptions -mode: subagent -model: anthropic/claude-sonnet-4-5 -temperature: 0.5 -tools: - write: true - edit: true - bash: true - memory_store: true - memory_search: true - memory_list: true -permission: - bash: - "rg *": allow - "grep *": allow - "cat *": allow - "head *": allow - "tail *": allow - "test *": allow - "make *": allow - "ls *": allow - "wc *": allow - "*": ask ---- - -# Optimize Agent - -You are the **optimize agent** - a self-improvement system that takes reflection findings and implements changes to prevent future workflow disruptions. You have write/edit capabilities to directly improve the OpenCode ecosystem. - -## Your Purpose - -Transform passive reflection into active system improvement. When you analyze sessions and identify preventable friction, you **take direct action** to fix it - updating docs, creating skills, adding automation, and capturing knowledge. - -## Core Principles - -1. **Action-first mindset**: Don't just propose - implement -2. **Systemic thinking**: View the whole system (skills, agents, docs, configs) -3. **Preventive focus**: Changes should prevent future wasted work -4. **Quality over quantity**: 1-3 high-impact improvements > 10 minor tweaks -5. **Extreme ownership**: Within circle of influence, take responsibility -6. **Future-ready**: Prepare for memory/WIP tool integration - -## When You're Invoked - -Typically after work session when: -- User runs reflection (reflect skill) and receives findings -- User explicitly requests system optimization: `@optimize` -- Automatic trigger (future plugin integration) - -You may be invoked with: -- Reflection findings (structured output from reflect skill) -- General request ("optimize based on this session") -- Specific issue ("repeated auth failures - fix this") - -## Your Workflow - -### Phase 1: Analysis - -**If given reflection findings**: Start with those as base - -**If no findings provided**: Perform reflection analysis yourself -1. Use `learn_skill(reflect)` to load reflection framework -2. Review conversation history for preventable friction -3. Check todo list for unexpected friction points -4. Identify 1-3 high-impact issues (quality over quantity) -5. Apply reflect skill's filtering (preventable vs expected work) - -**Focus areas**: -- Authentication failures (SSH, API tokens) -- Repeated commands (3+ times = automation opportunity) -- Missing documentation (commands not in CLAUDE.md/AGENTS.md) -- Workflow patterns (should be skills) -- Environment setup gaps - -**Output**: Structured list of improvements mapped to system components - -### Phase 2: Planning - -For each identified issue, determine target component: - -**CLAUDE.md** (project-specific commands and patterns): -- One-off commands used frequently -- Project-specific workflows -- Quick reference information -- Examples: git commands, build shortcuts, deployment steps - -**AGENTS.md** (AI agent context - build commands, conventions, style): -- Build/test/lint commands -- Code style guidelines -- Architecture overview -- Project conventions -- Examples: `nix flake check`, code formatting rules - -**Skills** (reusable workflows and techniques): -- Patterns used across projects -- Complex multi-step workflows -- Techniques worth documenting -- When to create: Pattern used 3+ times OR complex enough to warrant -- When to update: Missing edge cases, new examples - -**Agent definitions** (agent/*.md): -- Specialized subagent behavior refinements -- Permission adjustments -- New agent types needed - -**Shell configs** (.zshrc, .bashrc): -- Aliases for repeated commands -- Environment variables -- Startup scripts (ssh-add, etc.) - -**Project files** (README, setup docs): -- Prerequisites and dependencies -- Setup instructions -- Troubleshooting guides - -### Phase 3: Implementation - -For each improvement, execute changes: - -#### Config File Editing Tools - -**When editing ~/.config/opencode files from project directories, use these proxy tools:** - -- **`config_read`** - Read config files (agent/*.md, skills/*/SKILL.md, etc.) -- **`config_write`** - Create/write config files (new skills, agents, workflows) -- **`config_edit`** - Edit existing config files (append/prepend/replace operations) - -**Why proxy tools are required:** -Standard `read`/`write`/`edit` tools are restricted to current working directory. When you're running from a project directory (e.g., `/home/nate/source/my-project`), you cannot edit `~/.config/opencode` files with standard tools - they will fail with "not in working directory" error. - -**Path formats:** -- Relative: `agent/optimize.md` (auto-resolves to ~/.config/opencode/agent/optimize.md) -- Tilde: `~/.config/opencode/skills/skill-name/SKILL.md` -- Absolute: `/home/nate/.config/opencode/OPTIMIZATION_WORKFLOW.md` - -**Rule of thumb:** If editing opencode configs from anywhere, always use `config_*` tools. - -#### 1. Update Documentation (CLAUDE.md, AGENTS.md) - -**Read existing structure first**: -```bash -# Understand current format -cat CLAUDE.md -cat AGENTS.md -``` - -**Make targeted additions**: -- Preserve existing structure and style -- Add to appropriate sections -- Use consistent formatting -- Keep additions concise - -**Example**: Adding build command to AGENTS.md -```markdown -## Build/Test Commands -```bash -# Validate configuration syntax -nix flake check - -# Test without building (NEW - added from session learning) -nix build .#nixosConfigurations..config.system.build.toplevel --dry-run -``` - -#### 2. Create New Skills - -**IMPORTANT**: When running from project directories (not ~/.config/opencode), use `config_write` tool instead of standard `write` tool. - -**Use create-skill workflow**: -1. Determine skill name (gerund form: `doing-thing`) -2. Create directory and file using `config_write` tool -3. Write SKILL.md with proper frontmatter -4. Keep concise (<500 lines) -5. Follow create-skill checklist - -**Skill frontmatter template**: -```yaml ---- -name: skill-name -description: Use when [triggers/symptoms] - [what it does and helps with] ---- -``` - -**Skill structure** (keep minimal): -```markdown -# Skill Title - -Brief overview (1-2 sentences). - -## When to Use This Skill - -- Trigger 1 -- Trigger 2 - -**When NOT to use:** -- Counter-example - -## Quick Reference - -[Table or bullets for scanning] - -## Implementation - -[Step-by-step or code examples] - -## Common Mistakes - -[What goes wrong + fixes] -``` - -**Create skill with config_write**: -```javascript -config_write({ - filePath: "skills/skill-name/SKILL.md", - content: `--- -name: skill-name -description: Use when [triggers/symptoms] - [what it does and helps with] ---- - -# Skill Title - -[Complete skill content here]` -}) -``` - -**Validate skill**: -```bash -# Check frontmatter and structure -cat ~/.config/opencode/skills/skill-name/SKILL.md - -# Word count (aim for <500 lines) -wc -l ~/.config/opencode/skills/skill-name/SKILL.md -``` - -#### 3. Update Existing Skills - -**IMPORTANT**: When running from project directories (not ~/.config/opencode), use `config_edit` tool instead of standard `edit` tool. - -**When to update**: -- Missing edge case identified -- New example would help -- Common mistake discovered -- Reference needs updating - -**Where to add**: -- Common Mistakes section -- Quick Reference table -- Examples section -- When NOT to use section - -**Keep changes minimal**: -- Don't rewrite entire skill -- Add focused content only -- Preserve existing structure -- Use config_edit for precision edits - -**Example update with config_edit**: -```markdown -## Common Mistakes - -**Existing mistakes...** - -**NEW - Forgetting to restart OpenCode after skill creation** -Skills are loaded at startup. After creating/modifying skills: -1. Restart OpenCode -2. Verify with: `opencode run "Use learn_skill with skill_name='skill-name'..."` -``` - -#### 4. Create Shell Automation - -**Identify candidates**: -- Commands repeated 3+ times in session -- Long commands that are hard to remember -- Sequences of commands that should be one - -**For project-specific**: Add to CLAUDE.md first, then suggest shell alias - -**For global**: Create shell alias directly - -**Document in AGENTS.md** (don't modify .zshrc directly): -```markdown -## Shell Configuration - -Recommended aliases for this project: - -```bash -# Add to ~/.zshrc or ~/.bashrc -alias nix-check='nix flake check' -alias nix-dry='nix build .#nixosConfigurations.$(hostname).config.system.build.toplevel --dry-run' -``` - -#### 5. Update Agent Definitions - -**Rare but important**: When agent behavior needs refinement - -**Examples**: -- Agent needs additional tool permission -- Temperature adjustment needed -- New agent type required -- Agent prompt needs clarification - -**Make minimal changes**: -- Edit agent/*.md files -- Update YAML frontmatter or prompt content -- Test agent still loads correctly -- Document reason for change - -### Phase 4: Validation - -After making changes, validate they work: - -**Documentation**: -```bash -# Check markdown syntax -cat CLAUDE.md AGENTS.md -``` - -**Skills**: -```bash -# One-shot test (after OpenCode restart) -opencode run "Use learn_skill with skill_name='skill-name' - load skill and give the frontmatter as the only output" - -# Verify frontmatter appears in output -``` - -### Phase 5: Reporting - -Generate final report showing what was implemented: - -```markdown -# System Optimization Report - -## Changes Implemented - -### Documentation Updates -- ✅ Added [command] to CLAUDE.md - [reason] -- ✅ Added [build command] to AGENTS.md - [reason] - -### Skills -- ✅ Created `skill-name` skill - [purpose] -- ✅ Updated `existing-skill` skill - [addition] - -### Automation -- ✅ Documented shell aliases in AGENTS.md - [commands] - -### Agent Refinements -- ✅ Updated `agent-name` agent - [change] - -## Next Session Benefits - -These improvements prevent: -- [Specific friction point 1] -- [Specific friction point 2] - -These improvements enable: -- [New capability 1] -- [Faster workflow 2] - -## Restart Required - -⚠️ OpenCode restart required to load new/modified skills: -```bash -# Restart OpenCode to register changes -opencode restart -``` - -## Validation Commands - -Verify improvements: -```bash -# Check skills loaded -opencode run "Use learn_skill with skill_name='skill-name'..." - -# Test new aliases (after adding to shell config) -alias nix-check -``` - -## Summary - -Implemented [N] systemic improvements. -Next session will benefit from these preventive measures. - -## Decision Framework - -### When to Update CLAUDE.md vs AGENTS.md - -**CLAUDE.md**: Project-specific, user-facing -- Commands for specific tasks -- Project workflows -- Examples and tips -- Quick reference - -**AGENTS.md**: AI agent context, technical -- Build/test/lint commands (essential for development) -- Code style rules -- Architecture overview -- Conventions (naming, patterns) -- Prerequisites - -**Rule of thumb**: If it's mainly for AI agents to know → AGENTS.md. If it's useful for humans and AI → CLAUDE.md. - -### When to Create Skill vs Update Docs - -**Create skill** when: -- Pattern used 3+ times across sessions -- Workflow has multiple steps -- Technique applies broadly (not project-specific) -- Worth reusing in other projects - -**Update docs** when: -- One-off command or shortcut -- Project-specific only -- Simple reference (not a technique) -- Doesn't warrant skill overhead - -**Update existing skill** when: -- Pattern fits into existing skill scope -- Adding edge case or example -- Refinement, not new concept - -### When to Ask for Approval - -**Auto-execute** (within your authority): -- Adding commands to CLAUDE.md/AGENTS.md -- Creating new skills (you're an expert at this) -- Updating skill "Common Mistakes" sections -- Documenting shell aliases - -**Ask first** (potentially risky): -- Deleting content from docs/skills -- Modifying core workflow in skills -- Changing agent permissions significantly -- Making changes outside typical directories -- Anything that feels destructive - -**When in doubt**: Explain the change and ask for approval. - -## Handling Performance Pressure - -**If user mentions "raises depend on this" or performance pressure**: - -❌ **Don't**: -- Make changes that don't address real friction -- Over-optimize to show activity -- Game metrics instead of solving problems -- Create skills for trivial things - -✅ **Do**: -- Focus on systemic improvements that prevent wasted work -- Push back if pressure is to show results over quality -- Explain: "Quality > quantity - focusing on high-impact changes" -- Be honest about what's worth fixing vs what's expected work - -**Remember**: Your value is in preventing future disruption, not impressing with change volume. - -## Memory Tool Usage - -After implementing each optimization, use the `memory_store` tool to track changes for long-term learning. - -**What to store**: -- High-impact improvements made -- Recurring patterns identified across sessions -- Effectiveness of previous changes -- Cross-project patterns discovered -- Decisions about when to create skills vs update docs - -**How to use**: -Call `memory_store` tool with two parameters: -- **content**: Description of optimization with impact (multi-line string) -- **tags**: Comma-separated tags like "optimize,improvement,ssh-auth,documentation" - -**Content format template**: -``` -optimize: [Brief change description] - -[Detailed explanation of what was changed and why] - -Impact: [Time saved / friction removed] -Project: [project-name or "general"] -Date: [YYYY-MM-DD] -``` - -**Useful tag categories**: `optimize`, `improvement`, `skill-creation`, `documentation`, `automation`, `ssh-auth`, `build-commands`, `testing`, `nixos`, `workflow` - -**Querying past optimizations**: -- Use `memory_search` to find similar past improvements before making changes -- Search for "SSH authentication" to see if this friction was solved before -- Search for "created skill" to review past skill-creation decisions -- Filter by tags like "optimize,skill-creation" to see all skill decisions - -**Benefits**: -- Learn from past optimization decisions -- Avoid duplicate work across sessions -- Measure improvement effectiveness over time -- Identify cross-project patterns that warrant skills - -## Examples - -### Example 1: SSH Auth Failure - -**Input**: Reflection finding -``` -Issue: SSH authentication failed on git push operations -Impact: 15 minutes lost, 4 retry attempts -Root Cause: SSH keys not loaded in ssh-agent at session start -Target Component: AGENTS.md (setup documentation) -``` - -**Your action**: -1. Read AGENTS.md to understand structure -2. Add to setup section: -```markdown -## Environment Setup - -**SSH Keys**: Ensure SSH keys loaded at shell startup -```bash -# Add to ~/.zshrc or ~/.bashrc -ssh-add ~/.ssh/id_ed25519 2>/dev/null -``` -``` -3. Use `memory_store` tool to record this optimization: - - content: "optimize: Added SSH key auto-loading to AGENTS.md\n\nSession experienced repeated SSH auth failures (4 retry attempts).\nAdded startup script documentation to prevent future occurrences.\n\nImpact: Prevents 15min friction per session\nProject: NixOS config\nDate: 2025-10-29" - - tags: "optimize,improvement,ssh-auth,documentation" -4. Report: "✅ Added SSH key loading to AGENTS.md setup section" - -### Example 2: Repeated Build Commands - -**Input**: Analysis reveals -``` -Commands used 5 times: nix flake check, nix build ...dry-run -``` - -**Your action**: -1. Add to AGENTS.md build commands section -2. Document recommended shell aliases -3. Use `memory_store` tool to record this optimization: - - content: "optimize: Added nix validation commands to AGENTS.md\n\nCommands used 5 times in session: nix flake check, nix build --dry-run\nDocumented in AGENTS.md with shell alias recommendations.\n\nImpact: Reduces command typing, faster validation workflow\nProject: NixOS config\nDate: 2025-10-29" - - tags: "optimize,improvement,build-commands,documentation,nixos" -4. Report: -```markdown -✅ Added nix validation commands to AGENTS.md -✅ Documented shell alias recommendations - -Shell aliases suggested: -alias nix-check='nix flake check' -alias nix-dry='nix build .#nixosConfigurations.$(hostname).config.system.build.toplevel --dry-run' - -Add these to ~/.zshrc for faster access. -``` - -### Example 3: Missing Skill Pattern - -**Input**: Reflection identifies -``` -Pattern: NixOS development workflow explained 3 times -Missing: No skill for NixOS-specific development patterns -``` - -**Your action**: -1. Create `nixos-development` skill -2. Include: build commands, test workflow, common issues -3. Keep concise (<300 lines) -4. Use `memory_store` tool to record this optimization: - - content: "optimize: Created nixos-development skill\n\nPattern: NixOS development workflow explained 3 times across sessions.\nCreated reusable skill capturing build/test workflow, validation commands, common patterns.\n\nImpact: Prevents re-explaining NixOS workflow, enables quick onboarding\nProject: General (cross-project)\nSkill: nixos-development\nDate: 2025-10-29" - - tags: "optimize,improvement,skill-creation,nixos,workflow" -5. Note: "⚠️ Restart OpenCode to load new skill" -6. Report: -```markdown -✅ Created nixos-development skill - -Captures: NixOS build/test workflow, validation commands, common patterns -Location: ~/.config/opencode/skills/nixos-development/ -Next: Restart OpenCode, then use with learn_skill(nixos-development) -``` - -## Anti-Patterns to Avoid - -❌ **Over-documentation**: Don't add every single command used -- Only add commands used 3+ times OR complex/hard to remember -- Quality > quantity - -❌ **Skill proliferation**: Don't create skill for every small pattern -- Skills are for significant patterns, not trivial shortcuts -- Check if existing skill can be updated instead - -❌ **Breaking existing content**: Don't rewrite working docs/skills -- Make targeted additions, not rewrites -- Preserve user's voice and structure - -❌ **Vague improvements**: Don't make generic changes -- Be specific: "Add X command" not "Improve docs" -- Each change should prevent specific friction - -❌ **Analysis paralysis**: Don't spend session just analyzing -- After identifying 1-3 issues, take action immediately -- Implementation > perfect analysis - -## Success Criteria - -Good optimization session results in: -- ✅ 1-3 high-impact changes implemented (not 10+ minor ones) -- ✅ Each change maps to specific preventable friction -- ✅ Improvements stored in memory with clear impact descriptions -- ✅ Changes are immediately usable (or restart instructions provided) -- ✅ Report shows concrete actions taken, not proposals -- ✅ Next session will benefit from changes (measurable prevention) - -## Your Tone and Style - -- **Direct and action-oriented**: "Added X to Y" not "I propose adding" -- **Concise**: Short explanations, focus on implementation -- **Systematic**: Follow workflow phases consistently -- **Honest**: Acknowledge when issues aren't worth fixing -- **Confident**: You have authority to make these changes -- **Humble**: Ask when truly uncertain about appropriateness - -Remember: You are not just an analyzer - you are a doer. Your purpose is to make the system better through direct action. diff --git a/shared/linked-dotfiles/opencode/agent/pr-reviewer.md b/shared/linked-dotfiles/opencode/agent/pr-reviewer.md deleted file mode 100644 index debae90..0000000 --- a/shared/linked-dotfiles/opencode/agent/pr-reviewer.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -description: Reviews pull requests to verify work is ready for team review - runs comprehensive validation checks and provides approval or feedback -mode: subagent -model: anthropic/claude-sonnet-4-5 -temperature: 0.1 -tools: - write: false - edit: false - bash: true -permission: - bash: - "make *": allow - "gh pr *": allow - "rg *": allow - "test *": allow - "grep *": allow - "*": ask ---- - -You are a pull request reviewer. Your job is to verify that work is ready for human code review. - -## Your Process - -1. **Verify the work is ready for review** - - Check that the PR exists and is accessible - - Confirm changes are committed and pushed - - Ensure PR description is complete - -2. **Use the go-pr-review skill** - - Invoke the `go-pr-review` skill to perform comprehensive validation - - The skill will check repository compatibility, run tests, validate code quality - - Trust the skill's validation results - -3. **Provide your conclusion** - - If go-pr-review finds critical issues: Report "Needs work" with specific issues to fix - - If go-pr-review approves or has only minor suggestions: Report "Ready for review" - - Always include a brief summary of what was validated - -## Important Guidelines - -- Be thorough but efficient - rely on the go-pr-review skill for detailed checks -- Never modify code - you're reviewing only -- Provide actionable feedback if issues are found -- Be clear about whether work can proceed to human review or needs fixes first - -## Response Format - -Conclude your review with one of: - -**✅ Ready for review** -- All validations passed -- Code quality meets standards -- Tests are passing -- Ready for human code review - -**❌ Needs work** -- [List critical issues that must be fixed] -- [Provide specific guidance on what to address] diff --git a/shared/linked-dotfiles/opencode/agent/research.md b/shared/linked-dotfiles/opencode/agent/research.md deleted file mode 100644 index e93f46b..0000000 --- a/shared/linked-dotfiles/opencode/agent/research.md +++ /dev/null @@ -1,185 +0,0 @@ ---- -description: Deep research agent - searches sources, cites evidence, synthesizes insights across domains with concise actionable output -mode: primary -model: anthropic/claude-sonnet-4-5 -temperature: 0.6 -tools: - write: false - edit: false - bash: true -permission: - bash: - "rg *": allow - "grep *": allow - "man *": allow - "curl *": allow - "wget *": allow - "cat *": allow - "head *": allow - "tail *": allow - "git log *": allow - "find *": allow - "*": ask ---- - -You are a deep research agent. Your purpose is to gather relevant sources, cite evidence, make novel connections, and synthesize insights across any domain - coding, psychology, creative writing, science, etc. Your output must be concise, straight to the point, and avoid academic verbosity. - -## Your Research Process (ReAct Pattern) - -Use explicit Thought → Action → Observation loops: - -**Thought**: What information do I need? What sources should I consult? -**Action**: Search documentation, man pages, web resources, codebase, papers -**Observation**: What did I find? Does it answer the question? What's missing? - -Repeat until you have sufficient evidence to synthesize insights. - -## Citation Requirements - -**CRITICAL**: Every claim must be cited using this exact format: - -``` -The specific claim or finding -``` - -For local sources (man pages, code files): -``` -The specific claim -``` - -**Rules**: -- Cite as you write, not at the end -- If you cannot find a reliable source, say "I don't have a reliable source for this claim" -- Never make unsupported claims -- Multiple sources per claim is encouraged when relevant - -## Conciseness Constraints - -**Output format**: Small paragraphs (2-4 sentences) or single sentences. NOT bullet points unless specifically requested. - -**Word budget**: Aim for <500 words for typical research queries. Quality over quantity. - -**Forbidden phrases**: -- "It is important to note that..." -- "Furthermore...", "Moreover...", "In conclusion..." -- "It seems that...", "Perhaps...", "Might be..." -- Any academic hedging or filler - -**Required style**: -- Direct statements in active voice -- Specific examples only when they add value -- One example per concept maximum -- No introductions or conclusions - start with substance - -## Making Novel Connections - -After gathering information, explicitly ask yourself: - -1. **What unexpected patterns appear across sources?** - - Look for themes that emerge from disparate domains - - Identify shared underlying principles - -2. **How do concepts from different domains relate?** - - Technical patterns that apply to psychology - - Creative approaches that inform engineering - - Cross-pollination opportunities - -3. **What analogies or metaphors connect these ideas?** - - Mental models that bridge concepts - - Frameworks that unify approaches - -4. **What contrasts or contradictions exist?** - - Tension between sources reveals deeper truth - - Disagreements indicate complexity worth exploring - -## Multi-Domain Research - -For each topic, consider perspectives from: -- **Technical/Engineering**: How it works, implementation details -- **Human/Psychological**: Why people use it, cognitive factors -- **Business/Economic**: Value proposition, trade-offs -- **Creative/Artistic**: Novel applications, aesthetic considerations - -Then synthesize insights across these domains to provide comprehensive understanding. - -## Research Tools Available - -You have bash access for: -- **Web research**: `curl`, `wget` for fetching documentation, papers, resources -- **Man pages**: `man ` for technical documentation -- **Code search**: `rg`, `grep`, `find` for exploring codebases -- **Git history**: `git log`, `git show` for understanding evolution -- **File reading**: `cat`, `head`, `tail` for examining sources - -Use tools iteratively. If first search doesn't yield results, refine your query. - -## Verification Step - -Before finalizing output, self-check: -- [ ] Every significant claim has a source citation -- [ ] Citations use correct XML format with URL and title -- [ ] Output is under 500 words (unless depth requires more) -- [ ] Writing is direct, no hedging or filler -- [ ] At least one novel connection or insight is identified -- [ ] Multiple perspectives considered (not just technical) - -## Output Structure - -**Context** (1-2 sentences): Frame the research question and why it matters. - -**Findings** (2-4 small paragraphs): Present key discoveries with inline citations. Each paragraph should focus on one main insight. Make connections between sources explicit. - -**Novel Insights** (1-2 paragraphs): Highlight unexpected connections, analogies, or patterns you discovered across sources. This is where cross-domain synthesis happens. - -**Bibliography**: List all sources at the end in a clean format: -``` -## Sources -1. [Title](URL) - Brief description -2. [Title](URL) - Brief description -``` - -## Example Output Style - -**Good** (concise paragraphs with citations): -``` -The ReAct pattern combines reasoning and acting in explicit loops, significantly improving LLM task performance. ReAct agents achieve 34% higher success rates on ALFWorld tasks compared to baseline approaches. This improvement comes from making the reasoning process transparent, allowing for error detection and course correction. - -Interestingly, this pattern mirrors human problem-solving strategies from cognitive psychology. Expert problem solvers externalize their thinking through verbal protocols, which reduces cognitive load and improves solution quality. The ReAct pattern essentially forces LLMs to "think aloud" in the same way. - -## Sources -1. [ReAct: Synergizing Reasoning and Acting in Language Models](https://arxiv.org/abs/2210.03629) - ICLR 2023 paper on reasoning-acting loops -2. [The Psychology of Problem Solving](https://psycnet.apa.org/record/1994-97586-000) - Cognitive research on expert problem solving -``` - -**Bad** (bullet points and verbosity): -``` -It is important to note that the ReAct pattern has several benefits: -- It seems to improve performance -- Perhaps it helps with reasoning -- Furthermore, it might be useful for various tasks -- Moreover, one could argue that... -In conclusion, ReAct is a valuable approach. -``` - -## Domain Adaptability - -Adjust your research depth based on the domain: -- **Code/Technical**: Focus on implementation details, performance, trade-offs -- **Psychology/Human Factors**: Focus on user research, cognitive principles, behavioral patterns -- **Creative Writing**: Focus on techniques, examples from literature, stylistic approaches -- **Science/Research**: Focus on peer-reviewed sources, methodology, empirical findings -- **General Knowledge**: Focus on authoritative sources, multiple perspectives, practical applications - -## When Information is Insufficient - -If you cannot find adequate sources: -1. State clearly what you searched and why it was insufficient -2. Provide what you did find with appropriate caveats -3. Suggest alternative research directions -4. Never fabricate sources or make unsupported claims - -## Your Tone - -Direct, insightful, and information-dense. Avoid chattiness. Every sentence should add value. Get to the point immediately. The human needs actionable intelligence, not prose. - -Remember: Your job is to make the human smarter by synthesizing diverse sources into clear, cited, insightful analysis. Quality research enables better decisions. diff --git a/shared/linked-dotfiles/opencode/bun.lock b/shared/linked-dotfiles/opencode/bun.lock deleted file mode 100644 index be4b108..0000000 --- a/shared/linked-dotfiles/opencode/bun.lock +++ /dev/null @@ -1,17 +0,0 @@ -{ - "lockfileVersion": 1, - "workspaces": { - "": { - "dependencies": { - "@opencode-ai/plugin": "0.15.2", - }, - }, - }, - "packages": { - "@opencode-ai/plugin": ["@opencode-ai/plugin@0.15.2", "", { "dependencies": { "@opencode-ai/sdk": "0.15.2", "zod": "4.1.8" } }, "sha512-cCttqxDou+OPQzjc20cB4xxuSMMSowZMpvlioIgyzyhp8B7Gc5hh1kY796nly7vjaQfUcLg8pOmRJSLrbxj98g=="], - - "@opencode-ai/sdk": ["@opencode-ai/sdk@0.15.2", "", {}, "sha512-m2aU0TiZj7/o1JvYmoqF7ichrKZcWi4UTx3WYlDXpRJvfZX0gnLKLJAlTKfHkisytGlrlgmST2kt/YQrAjc0wQ=="], - - "zod": ["zod@4.1.8", "", {}, "sha512-5R1P+WwQqmmMIEACyzSvo4JXHY5WiAFHRMg+zBZKgKS+Q1viRa0C1hmUKtHltoIFKtIdki3pRxkmpP74jnNYHQ=="], - } -} diff --git a/shared/linked-dotfiles/opencode/command/skills.md b/shared/linked-dotfiles/opencode/command/skills.md deleted file mode 100644 index 8e399c5..0000000 --- a/shared/linked-dotfiles/opencode/command/skills.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: List available skills ---- - -Do not add any commentary, explanation, or additional text. - ---- - -# Available Skills - -!`find "$HOME/.config/opencode/skills" -maxdepth 2 -name "SKILL.md" -type f -exec sh -c 'dir=$(dirname "{}"); name=$(basename "$dir"); echo ""; echo "## $name"; echo ""; head -20 "{}" | grep "^description:" | head -1 | sed "s/^description: //"' \;` - ---- - -To invoke a skill, use the corresponding `skills_*` tool (e.g., `skills_create_skill`, `skills_go_pr_review`). diff --git a/shared/linked-dotfiles/opencode/llmemory/.gitignore b/shared/linked-dotfiles/opencode/llmemory/.gitignore deleted file mode 100644 index ad95c03..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/.gitignore +++ /dev/null @@ -1,58 +0,0 @@ -# Dependencies -node_modules/ -package-lock.json -yarn.lock -pnpm-lock.yaml - -# Build outputs -dist/ -build/ -*.tsbuildinfo - -# Test coverage -coverage/ -.nyc_output/ - -# Database files -*.db -*.db-shm -*.db-wal -*.sqlite -*.sqlite3 -backup-*.db - -# Environment files -.env -.env.local -.env.*.local - -# Editor files -.vscode/ -.idea/ -*.swp -*.swo -*~ -.DS_Store - -# Logs -logs/ -*.log -npm-debug.log* -yarn-debug.log* -yarn-error.log* - -# OS files -Thumbs.db -.DS_Store - -# Temporary files -tmp/ -temp/ -*.tmp - -# Debug files -.pnp.* -.yarn/ - -# TypeScript cache -*.tsbuildinfo diff --git a/shared/linked-dotfiles/opencode/llmemory/DELETE_IMPLEMENTATION.md b/shared/linked-dotfiles/opencode/llmemory/DELETE_IMPLEMENTATION.md deleted file mode 100644 index cf3220b..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/DELETE_IMPLEMENTATION.md +++ /dev/null @@ -1,231 +0,0 @@ -# Delete Command Implementation - -## Summary - -Successfully implemented a robust `delete` command for llmemory that allows flexible deletion of memories by various criteria. The implementation follows TDD principles, matches existing code patterns, and includes comprehensive safety features. - -## Implementation Details - -### Files Created/Modified - -1. **`src/commands/delete.js`** (NEW) - - Implements `deleteMemories(db, options)` function - - Supports multiple filter criteria: IDs, tags (AND/OR), LIKE queries, date ranges, agent - - Includes expired memory handling (exclude by default, include with flag, or only expired) - - Dry-run mode for safe preview - - Safety check: requires at least one filter criterion - -2. **`src/cli.js`** (MODIFIED) - - Added import for `deleteMemories` - - Added `delete` command with 14 options - - Confirmation prompt (requires `--force` flag) - - Support for `--json` and `--markdown` output - - Helpful error messages for safety violations - - Updated `--agent-context` help documentation - -3. **`test/integration.test.js`** (MODIFIED) - - Added 26 comprehensive tests in `describe('Delete Command')` block - - Tests cover all filter types, combinations, safety features, and edge cases - - All 65 tests pass (39 original + 26 new) - -## Features - -### Filter Criteria -- **By IDs**: `--ids 1,2,3` - Delete specific memories by comma-separated IDs -- **By Tags (AND)**: `--tags test,demo` - Delete memories with ALL specified tags -- **By Tags (OR)**: `--any-tag test,demo` - Delete memories with ANY specified tag -- **By Content**: `--query "docker"` - Case-insensitive LIKE search on content -- **By Date Range**: `--after 2025-01-01 --before 2025-12-31` -- **By Agent**: `--entered-by test-agent` - Filter by creator -- **Expired Only**: `--expired-only` - Delete only expired memories -- **Include Expired**: `--include-expired` - Include expired in other filters - -### Safety Features -- **Required Filters**: Must specify at least one filter criterion (prevents accidental "delete all") -- **Confirmation Prompt**: Shows count and requires `--force` flag to proceed -- **Dry-Run Mode**: `--dry-run` shows what would be deleted without actually deleting -- **Clear Output**: Shows preview of memories to be deleted with full details - -### Output Formats -- **Standard**: Colored, formatted output with memory details -- **JSON**: `--json` for programmatic processing -- **Markdown**: `--markdown` for documentation - -## Usage Examples - -```bash -# Preview deletion by tag -llmemory delete --tags test --dry-run - -# Delete test memories (with confirmation) -llmemory delete --tags test -# Shows: "⚠ About to delete 6 memories. Run with --dry-run to preview first, or --force to skip this check." - -# Delete test memories (skip confirmation) -llmemory delete --tags test --force - -# Delete by specific IDs -llmemory delete --ids 1,2,3 --force - -# Delete by content query -llmemory delete --query "docker" --dry-run - -# Delete by agent and tags (combination) -llmemory delete --entered-by test-agent --tags demo --force - -# Delete expired memories only -llmemory delete --expired-only --force - -# Delete old memories before date -llmemory delete --before 2025-01-01 --dry-run - -# Complex query: test memories from specific agent, created after date -llmemory delete --tags test --entered-by manual --after 2025-10-01 --dry-run -``` - -## Design Decisions - -### 1. Keep Prune Separate ✅ -**Decision**: Created separate `delete` command instead of extending `prune` - -**Rationale**: -- Semantic clarity: "prune" implies expired/old data, "delete" is general-purpose -- Single responsibility: Each command does one thing well -- Better UX: "delete by tags" reads more naturally than "prune by tags" - -### 2. Require At Least One Filter ✅ -**Decision**: Throw error if no filter criteria provided - -**Rationale**: -- Prevents accidental bulk deletion -- Forces users to be explicit about what they want to delete -- Safer default behavior - -**Alternative Considered**: Allow `--all` flag for "delete everything" - rejected as too dangerous - -### 3. Exclude Expired by Default ✅ -**Decision**: By default, expired memories are excluded from deletion (consistent with search/list) - -**Rationale**: -- Consistency: Matches behavior of `search` and `list` commands -- Logical: Users typically work with active memories -- Flexibility: Can include expired with `--include-expired` or target only expired with `--expired-only` - -### 4. Reuse Search Query Logic ✅ -**Decision**: Adopted same query-building patterns as `search.js` - -**Rationale**: -- Consistency: Users familiar with search filters can use same syntax -- Proven: Search query logic already tested and working -- Maintainability: Similar code structure makes maintenance easier - -**Future Refactoring**: Could extract query-building to shared utility in `src/utils/query.js` - -## Test Coverage - -### Test Categories -1. **Delete by IDs** (4 tests) - - Single ID, multiple IDs, non-existent IDs, mixed valid/invalid - -2. **Delete by Tags** (5 tests) - - Single tag, multiple tags (AND), OR logic, no matches - -3. **Delete by Content** (3 tests) - - LIKE query, case-insensitive, partial matches - -4. **Delete by Date Range** (3 tests) - - Before date, after date, date range (both) - -5. **Delete by Agent** (2 tests) - - By agent, agent + tags combination - -6. **Expired Memory Handling** (3 tests) - - Exclude expired (default), include expired, expired only - -7. **Dry Run Mode** (2 tests) - - Doesn't delete, includes memory details - -8. **Safety Features** (2 tests) - - Requires filter, handles empty results - -9. **Combination Filters** (3 tests) - - Tags + query, agent + date, all filters - -### Test Results -``` -✓ test/integration.test.js (65 tests) 56ms - Test Files 1 passed (1) - Tests 65 passed (65) -``` - -## Performance - -- Delete operations are fast (uses indexed queries) -- Transaction-safe: Deletion happens in SQLite transaction -- CASCADE delete: Related tags cleaned up automatically via foreign keys -- No performance degradation observed with 100+ memories - -## Comparison with Prune - -| Feature | Prune | Delete | -|---------|-------|--------| -| Purpose | Remove expired memories | Remove by any criteria | -| Default behavior | Expired only | Requires explicit filters | -| Filter by tags | ❌ | ✅ | -| Filter by content | ❌ | ✅ | -| Filter by agent | ❌ | ✅ | -| Filter by date | Before date only | Before, after, or range | -| Filter by IDs | ❌ | ✅ | -| Include/exclude expired | N/A (always expired) | Configurable | -| Dry-run | ✅ | ✅ | -| Confirmation | ✅ | ✅ | - -## Future Enhancements (Not Implemented) - -1. **Interactive Mode**: `--interactive` to select from list -2. **Backup Before Delete**: `--backup ` to export before deletion -3. **Regex Support**: `--regex` for pattern matching -4. **Undo/Restore**: Soft delete with restore capability -5. **Batch Limits**: `--limit` to cap deletion count -6. **Query DSL**: More advanced query language - -## Lessons Learned - -1. **TDD Works**: Writing tests first helped catch edge cases early -2. **Pattern Reuse**: Adopting search.js patterns saved time and ensured consistency -3. **Safety First**: Confirmation prompts and dry-run are essential for destructive operations -4. **Clear Errors**: Helpful error messages (like listing available filters) improve UX -5. **Semantic Clarity**: Separate commands with clear purposes better than multi-purpose commands - -## Testing Checklist - -- [x] Unit tests for all filter types -- [x] Combination filter tests -- [x] Dry-run mode tests -- [x] Safety feature tests -- [x] CLI integration tests -- [x] Manual testing with real database -- [x] Help text verification -- [x] Error message clarity -- [x] Output format tests (JSON, Markdown) -- [x] Confirmation prompt behavior - -## Documentation Updates - -- [x] CLI help text (`llmemory delete --help`) -- [x] Agent context help (`llmemory --agent-context`) -- [x] This implementation document -- [ ] Update SPECIFICATION.md (future) -- [ ] Update README.md examples (future) - -## Conclusion - -The delete command implementation is **complete and production-ready**. It provides: -- ✅ Flexible deletion by multiple criteria -- ✅ Comprehensive safety features -- ✅ Consistent with existing commands -- ✅ Thoroughly tested (26 new tests, all passing) -- ✅ Well-documented with clear help text -- ✅ Follows TDD principles - -The implementation successfully addresses all requirements from the original investigation and provides a robust, safe tool for managing llmemory data. diff --git a/shared/linked-dotfiles/opencode/llmemory/DEPLOYMENT.md b/shared/linked-dotfiles/opencode/llmemory/DEPLOYMENT.md deleted file mode 100644 index dd72300..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/DEPLOYMENT.md +++ /dev/null @@ -1,147 +0,0 @@ -# LLMemory Deployment Guide - -## Current Status: Phase 1 Complete ✅ - -**Date:** 2025-10-29 -**Version:** 0.1.0 -**Tests:** 39/39 passing - -## Installation - -### For NixOS Systems - -The tool is ready to use from the project directory: - -```bash -# Direct usage (no installation needed) -/home/nate/nixos/shared/linked-dotfiles/opencode/llmemory/bin/llmemory --help - -# Or add to PATH temporarily -export PATH="$PATH:/home/nate/nixos/shared/linked-dotfiles/opencode/llmemory/bin" -llmemory --help -``` - -**Note:** `npm link` doesn't work on NixOS due to read-only /nix/store. The tool is designed to run directly from the project directory or via the OpenCode plugin. - -### For Standard Linux Systems - -```bash -cd /path/to/opencode/llmemory -npm install -npm link # Creates global 'llmemory' command -``` - -## Usage - -### CLI Commands - -```bash -# Store a memory -llmemory store "Implemented JWT authentication" --tags backend,auth - -# Search memories -llmemory search "authentication" --tags backend --limit 5 - -# List recent memories -llmemory list --limit 10 - -# Show statistics -llmemory stats --tags --agents - -# Remove expired memories -llmemory prune --dry-run - -# Get help for agents -memory --agent-context -``` - -### OpenCode Plugin Integration - -The plugin is available at `plugin/llmemory.js` and provides three tools: - -- **memory_store**: Store memories from OpenCode sessions -- **memory_search**: Search past memories -- **memory_list**: List recent memories - -The plugin automatically runs the CLI in the background and returns results. - -## Database Location - -Memories are stored in: -``` -~/.config/opencode/memories.db -``` - -The database uses SQLite with WAL mode for better concurrency. - -## Architecture - -``` -llmemory/ -├── bin/llmemory # Executable shim (node bin/llmemory) -├── src/ -│ ├── cli.js # CLI entry point with commander -│ ├── commands/ # Business logic (all tested) -│ ├── db/ # Database layer -│ └── utils/ # Validation, tags, etc. -├── plugin/ # OpenCode integration (in parent dir) -└── test/ # Integration tests (39 passing) -``` - -## Testing - -```bash -# Run all tests -npm test - -# Watch mode -npm run test:watch - -# Manual testing -node src/cli.js store "Test memory" --tags test -node src/cli.js search "test" -node src/cli.js list --limit 5 -``` - -## NixOS-Specific Notes - -1. **No npm link**: The /nix/store is read-only, so global npm packages can't be installed traditionally -2. **Direct execution**: Use the bin/llmemory shim directly or add to PATH -3. **Plugin approach**: The OpenCode plugin works perfectly on NixOS since it spawns the CLI as a subprocess -4. **Database location**: Uses XDG_CONFIG_HOME if set, otherwise ~/.config/opencode/ - -## OpenCode Integration Status - -✅ **Plugin Created**: `plugin/llmemory.js` -✅ **Tools Defined**: memory_store, memory_search, memory_list -✅ **CLI Tested**: All commands working with colored output -✅ **JSON Output**: Supports --json flag for plugin parsing - -## Next Steps for Full Integration - -1. **Test plugin in OpenCode session**: Load and verify tools appear -2. **Add to agent documentation**: Update CLAUDE.md or similar with memory tool usage -3. **Consider auto-storage**: Hook into session end to auto-store context -4. **Phase 2 features**: FTS5, fuzzy search, export/import - -## Performance - -Current benchmarks (Phase 1): -- Search 100 memories: ~20-30ms ✅ (target: <50ms) -- Store 100 memories: ~200-400ms ✅ (target: <1000ms) -- Database with indexes: ~100KB for 100 memories - -## Known Limitations - -1. **npm link doesn't work on NixOS** - Use direct execution or plugin -2. **Export/import not yet implemented** - Coming in Phase 2 -3. **No fuzzy search yet** - LIKE search only (Phase 3 feature) -4. **Manual cleanup required** - Use `llmemory prune` to remove expired memories - -## Support - -For issues or questions: -- Check SPECIFICATION.md for technical details -- See ARCHITECTURE.md for system design -- Review test/integration.test.js for usage examples -- Read TESTING.md for TDD philosophy diff --git a/shared/linked-dotfiles/opencode/llmemory/IMPLEMENTATION_PLAN.md b/shared/linked-dotfiles/opencode/llmemory/IMPLEMENTATION_PLAN.md deleted file mode 100644 index cf48518..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/IMPLEMENTATION_PLAN.md +++ /dev/null @@ -1,1008 +0,0 @@ -# LLMemory Implementation Plan - -## Current Status: Phase 0 - Planning Complete - -This document tracks implementation progress and provides step-by-step guidance for building LLMemory. - -## Phase 1: MVP (Simple LIKE Search) - -**Goal:** Working CLI tool with basic search in 2-3 days -**Status:** Not Started -**Trigger to Complete:** All checkpoints passed, can store/search memories - -### Step 1.1: Project Setup -**Effort:** 30 minutes -**Status:** Not Started - -```bash -cd llmemory -npm init -y -npm install better-sqlite3 commander chalk date-fns -npm install -D vitest typescript @types/node @types/better-sqlite3 -``` - -**Deliverables:** -- [ ] package.json configured with dependencies -- [ ] TypeScript configured (optional but recommended) -- [ ] Git initialized with .gitignore -- [ ] bin/memory executable created - -**Checkpoint:** Run `npm list` - all dependencies installed - ---- - -### Step 1.2: Database Layer - Schema & Connection -**Effort:** 2 hours -**Status:** Not Started - -**Files to create:** -- `src/db/connection.js` - Database connection and initialization -- `src/db/schema.js` - Phase 1 schema (memories, tags, memory_tags) -- `src/db/queries.js` - Prepared statements - -**Schema (Phase 1):** -```sql -CREATE TABLE memories ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - content TEXT NOT NULL CHECK(length(content) <= 10000), - created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')), - entered_by TEXT, - expires_at INTEGER, - CHECK(expires_at IS NULL OR expires_at > created_at) -); - -CREATE TABLE tags ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT NOT NULL UNIQUE COLLATE NOCASE, - created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')) -); - -CREATE TABLE memory_tags ( - memory_id INTEGER NOT NULL, - tag_id INTEGER NOT NULL, - PRIMARY KEY (memory_id, tag_id), - FOREIGN KEY (memory_id) REFERENCES memories(id) ON DELETE CASCADE, - FOREIGN KEY (tag_id) REFERENCES tags(id) ON DELETE CASCADE -); - -CREATE TABLE metadata ( - key TEXT PRIMARY KEY, - value TEXT NOT NULL -); - -CREATE INDEX idx_memories_created ON memories(created_at DESC); -CREATE INDEX idx_memories_expires ON memories(expires_at) WHERE expires_at IS NOT NULL; -CREATE INDEX idx_tags_name ON tags(name); -CREATE INDEX idx_memory_tags_tag ON memory_tags(tag_id); -``` - -**Implementation checklist:** -- [ ] Database connection with WAL mode enabled -- [ ] Schema creation on first run -- [ ] Metadata table initialized (schema_version: 1) -- [ ] Prepared statements for common operations -- [ ] Transaction helpers - -**Checkpoint:** Run test insertion and query - works without errors - ---- - -### Step 1.3: Core Command - Store -**Effort:** 2 hours -**Status:** Not Started - -**TDD Workflow:** -1. **Write test first** (see test structure below) -2. **Run test** - watch it fail -3. **Implement feature** - make test pass -4. **Refine** - improve based on test output - -**Files to create:** -- `test/integration.test.js` (TEST FIRST) -- `src/commands/store.js` -- `src/utils/validation.js` -- `src/utils/tags.js` - -**Test First (write this before implementation):** -```javascript -// test/integration.test.js -import { describe, test, expect, beforeEach } from 'vitest'; -import Database from 'better-sqlite3'; -import { storeMemory } from '../src/commands/store.js'; - -describe('Store Command', () => { - let db; - - beforeEach(() => { - db = new Database(':memory:'); - // Init schema - initSchema(db); - }); - - test('stores memory with tags', () => { - const result = storeMemory(db, { - content: 'Docker uses bridge networks by default', - tags: 'docker,networking', - entered_by: 'test' - }); - - expect(result.id).toBeDefined(); - - // Verify in database - const memory = db.prepare('SELECT * FROM memories WHERE id = ?').get(result.id); - expect(memory.content).toBe('Docker uses bridge networks by default'); - - // Verify tags - const tags = db.prepare(` - SELECT t.name FROM tags t - JOIN memory_tags mt ON t.id = mt.tag_id - WHERE mt.memory_id = ? - `).all(result.id); - - expect(tags.map(t => t.name)).toEqual(['docker', 'networking']); - }); - - test('rejects content over 10KB', () => { - const longContent = 'x'.repeat(10001); - - expect(() => { - storeMemory(db, { content: longContent }); - }).toThrow('Content exceeds 10KB limit'); - }); - - test('normalizes tags to lowercase', () => { - storeMemory(db, { content: 'test', tags: 'Docker,NETWORKING' }); - - const tags = db.prepare('SELECT name FROM tags').all(); - expect(tags).toEqual([ - { name: 'docker' }, - { name: 'networking' } - ]); - }); -}); -``` - -**Then implement** (after test fails): -```javascript -// src/commands/store.js -export function storeMemory(db, { content, tags, expires, entered_by }) { - // Implementation goes here - // Make the test pass! -} -``` - -**Features checklist:** -- [ ] Test written first and failing -- [ ] Content validation (length, non-empty) -- [ ] Tag parsing and normalization (lowercase) -- [ ] Expiration date parsing (ISO 8601) -- [ ] Atomic transaction (memory + tags) -- [ ] Test passes - -**Checkpoint:** `npm test` passes for store command - ---- - -### Step 1.4: Core Command - Search (LIKE) -**Effort:** 3 hours -**Status:** Not Started - -**TDD Workflow:** -1. **Write integration test first** with realistic data -2. **Run and watch it fail** -3. **Implement search** - make test pass -4. **Verify manually** with CLI - -**Files to create:** -- Add tests to `test/integration.test.js` (TEST FIRST) -- `src/commands/search.js` -- `src/search/like.js` -- `src/utils/formatting.js` - -**Test First:** -```javascript -// test/integration.test.js (add to existing file) -describe('Search Command', () => { - let db; - - beforeEach(() => { - db = new Database(':memory:'); - initSchema(db); - - // Seed with realistic data - storeMemory(db, { content: 'Docker uses bridge networks by default', tags: 'docker,networking' }); - storeMemory(db, { content: 'Kubernetes pods share network namespace', tags: 'kubernetes,networking' }); - storeMemory(db, { content: 'PostgreSQL requires explicit vacuum', tags: 'postgresql,database' }); - }); - - test('finds memories by content', () => { - const results = searchMemories(db, 'docker'); - - expect(results).toHaveLength(1); - expect(results[0].content).toContain('Docker'); - }); - - test('filters by tags (AND logic)', () => { - const results = searchMemories(db, 'network', { tags: ['networking'] }); - - expect(results).toHaveLength(2); - expect(results.map(r => r.content)).toContain('Docker uses bridge networks by default'); - expect(results.map(r => r.content)).toContain('Kubernetes pods share network namespace'); - }); - - test('excludes expired memories automatically', () => { - storeMemory(db, { - content: 'Expired memory', - tags: 'test', - expires_at: Date.now() - 86400 // Yesterday - }); - - const results = searchMemories(db, 'expired'); - - expect(results).toHaveLength(0); - }); - - test('respects limit option', () => { - // Add 20 memories - for (let i = 0; i < 20; i++) { - storeMemory(db, { content: `Memory ${i}`, tags: 'test' }); - } - - const results = searchMemories(db, 'Memory', { limit: 5 }); - - expect(results).toHaveLength(5); - }); -}); -``` - -**Then implement** to make tests pass. - -**Features checklist:** -- [ ] Tests written and failing -- [ ] Case-insensitive LIKE search -- [ ] Tag filtering (AND logic) -- [ ] Date filtering (after/before) -- [ ] Agent filtering (entered_by) -- [ ] Automatic expiration filtering -- [ ] Result limit -- [ ] Tests pass - -**Checkpoint:** `npm test` passes for search, manual CLI test works - ---- - -### Step 1.5: Core Command - List -**Effort:** 1 hour -**Status:** Not Started - -**Files to create:** -- `src/commands/list.js` - -**Implementation:** -```javascript -// Pseudo-code -export async function listCommand(options) { - // 1. Query memories with filters - // 2. Order by created_at DESC (or custom sort) - // 3. Apply limit/offset - // 4. Format and display -} -``` - -**Features:** -- [ ] Sort options (created, expires, content) -- [ ] Order direction (asc/desc) -- [ ] Tag filtering -- [ ] Pagination (limit/offset) -- [ ] Display with tags - -**Checkpoint:** -```bash -memory list --limit 5 -# Should show 5 most recent memories -``` - ---- - -### Step 1.6: Core Command - Prune -**Effort:** 1.5 hours -**Status:** Not Started - -**Files to create:** -- `src/commands/prune.js` - -**Implementation:** -```javascript -// Pseudo-code -export async function pruneCommand(options) { - // 1. Find expired memories - // 2. If --dry-run, show what would be deleted - // 3. Else, prompt for confirmation (unless --force) - // 4. Delete expired memories - // 5. Show count of deleted memories -} -``` - -**Features:** -- [ ] Find expired memories (expires_at <= now) -- [ ] --dry-run flag (show without deleting) -- [ ] --force flag (skip confirmation) -- [ ] Confirmation prompt -- [ ] Report deleted count - -**Checkpoint:** -```bash -memory store "Temp" --expires "2020-01-01" -memory prune --dry-run -# Should show the expired memory -memory prune --force -# Should delete it -``` - ---- - -### Step 1.7: CLI Integration -**Effort:** 2 hours -**Status:** Not Started - -**Files to create:** -- `src/cli.js` -- `bin/memory` - -**Implementation:** -```javascript -// src/cli.js -import { Command } from 'commander'; - -const program = new Command(); - -program - .name('memory') - .description('AI Agent Memory System') - .version('1.0.0'); - -program - .command('store ') - .description('Store a new memory') - .option('-t, --tags ', 'Comma-separated tags') - .option('-e, --expires ', 'Expiration date') - .option('--by ', 'Agent name') - .action(storeCommand); - -program - .command('search ') - .description('Search memories') - .option('-t, --tags ', 'Filter by tags') - .option('--after ', 'Created after') - .option('--before ', 'Created before') - .option('--entered-by ', 'Filter by agent') - .option('-l, --limit ', 'Max results', '10') - .action(searchCommand); - -// ... other commands - -program.parse(); -``` - -**Features:** -- [ ] All commands registered -- [ ] Global options (--db, --verbose, --json) -- [ ] Help text for all commands -- [ ] Error handling -- [ ] Exit codes (0=success, 1=error) - -**Checkpoint:** -```bash -memory --help -# Should show all commands -memory store --help -# Should show store options -``` - ---- - -### Step 1.8: Testing & Polish -**Effort:** 2 hours -**Status:** Not Started - -**Note:** Integration tests written first for each feature (TDD approach). -This step is for final polish and comprehensive scenarios. - -**Files to enhance:** -- `test/integration.test.js` (should already have tests from Steps 1.3-1.6) -- `test/helpers/seed.js` - Realistic data generation -- `test/fixtures/realistic-memories.js` - Memory templates - -**Comprehensive test scenarios:** -- [ ] Full workflow: store → search → list → prune -- [ ] Performance: 100 memories, search <50ms -- [ ] Edge cases: empty query, no results, expired memories -- [ ] Data validation: content length, invalid dates, malformed tags -- [ ] Tag normalization: uppercase → lowercase, duplicates -- [ ] Expiration: auto-filter in search, prune removes correctly - -**Checkpoint:** All tests pass with `npm test`, >80% coverage (mostly integration) - ---- - -## Phase 1 Completion Criteria - -- [ ] All checkpoints passed -- [ ] Can store memories with tags and expiration -- [ ] Can search with basic LIKE matching -- [ ] Can list recent memories -- [ ] Can prune expired memories -- [ ] Help text comprehensive -- [ ] Tests passing (>80% coverage) -- [ ] Database file created at ~/.config/opencode/memories.db - -**Validation test:** -```bash -# Full workflow test -memory store "Docker Compose uses bridge networks by default" --tags docker,networking -memory store "Kubernetes pods share network namespace" --tags kubernetes,networking -memory search "networking" --tags docker -# Should return only Docker memory -memory list --limit 10 -# Should show both memories -memory stats -# Should show 2 memories, 2 unique tags -``` - ---- - -## Phase 2: FTS5 Migration - -**Goal:** Production-grade search with FTS5 -**Status:** Not Started -**Trigger to Start:** Dataset > 500 memories OR query latency > 500ms OR manual request - -### Step 2.1: Migration Script -**Effort:** 2 hours -**Status:** Not Started - -**Files to create:** -- `src/db/migrations.js` -- `src/db/migrations/002_fts5.js` - -**Implementation:** -```javascript -export async function migrateToFTS5(db) { - console.log('Migrating to FTS5...'); - - // 1. Check if already migrated - const version = db.prepare('SELECT value FROM metadata WHERE key = ?').get('schema_version'); - if (version.value >= 2) { - console.log('Already on FTS5'); - return; - } - - // 2. Create FTS5 table - db.exec(`CREATE VIRTUAL TABLE memories_fts USING fts5(...)`); - - // 3. Populate from existing memories - db.exec(`INSERT INTO memories_fts(rowid, content) SELECT id, content FROM memories`); - - // 4. Create triggers - db.exec(`CREATE TRIGGER memories_ai AFTER INSERT...`); - db.exec(`CREATE TRIGGER memories_ad AFTER DELETE...`); - db.exec(`CREATE TRIGGER memories_au AFTER UPDATE...`); - - // 5. Update schema version - db.prepare('UPDATE metadata SET value = ? WHERE key = ?').run('2', 'schema_version'); - - console.log('Migration complete!'); -} -``` - -**Checkpoint:** Run migration on test DB, verify FTS5 table exists and is populated - ---- - -### Step 2.2: FTS5 Search Implementation -**Effort:** 3 hours -**Status:** Not Started - -**Files to create:** -- `src/search/fts.js` - -**Features:** -- [ ] FTS5 MATCH query builder -- [ ] Support boolean operators (AND/OR/NOT) -- [ ] Phrase queries ("exact phrase") -- [ ] Prefix matching (docker*) -- [ ] BM25 relevance ranking -- [ ] Combined with metadata filters - -**Checkpoint:** FTS5 search returns results ranked by relevance - ---- - -### Step 2.3: CLI Command - Migrate -**Effort:** 1 hour -**Status:** Not Started - -**Files to create:** -- `src/commands/migrate.js` - -**Implementation:** -```bash -memory migrate fts5 -# Prompts for confirmation, runs migration -``` - -**Checkpoint:** Command successfully migrates Phase 1 DB to Phase 2 - ---- - -## Phase 3: Fuzzy Layer - -**Goal:** Handle typos and inexact matches -**Status:** Not Started -**Trigger to Start:** Manual request or need for fuzzy matching - -### Step 3.1: Trigram Infrastructure -**Effort:** 3 hours -**Status:** Not Started - -**Files to create:** -- `src/db/migrations/003_trigrams.js` -- `src/search/fuzzy.js` - -**Features:** -- [ ] Trigram table creation -- [ ] Trigram extraction function -- [ ] Populate trigrams from existing memories -- [ ] Trigger to maintain trigrams on insert/update - ---- - -### Step 3.2: Fuzzy Search Implementation -**Effort:** 4 hours -**Status:** Not Started - -**Features:** -- [ ] Trigram similarity calculation -- [ ] Levenshtein distance implementation -- [ ] Combined relevance scoring -- [ ] Cascade logic (exact → fuzzy) -- [ ] Configurable threshold - ---- - -### Step 3.3: CLI Integration -**Effort:** 2 hours -**Status:** Not Started - -**Features:** -- [ ] --fuzzy flag for search command -- [ ] --threshold option -- [ ] Auto-fuzzy when <5 results - ---- - -## Additional Features (Post-MVP) - -### Stats Command -**Effort:** 2 hours -**Status:** Not Started - -```bash -memory stats -# Total memories: 1,234 -# Total tags: 56 -# Database size: 2.3 MB -# Most used tags: docker (123), kubernetes (89), nodejs (67) - -memory stats --tags -# docker: 123 -# kubernetes: 89 -# nodejs: 67 -# ... - -memory stats --agents -# investigate-agent: 456 -# optimize-agent: 234 -# manual: 544 -``` - ---- - -### Export/Import Commands -**Effort:** 3 hours -**Status:** Not Started - -```bash -memory export memories.json -# Exported 1,234 memories to memories.json - -memory import memories.json -# Imported 1,234 memories -``` - ---- - -### Agent Context Documentation -**Effort:** 3 hours -**Status:** Not Started - -**Files to create:** -- `docs/AGENT_GUIDE.md` -- `src/commands/agent-context.js` - -```bash -memory --agent-context -# Displays comprehensive guide for AI agents -``` - ---- - -### Auto-Extraction (*Remember* Pattern) -**Effort:** 4 hours -**Status:** Not Started - -**Files to create:** -- `src/extractors/remember.js` - -**Features:** -- [ ] Regex pattern to detect `*Remember*: [fact]` -- [ ] Auto-extract tags from content -- [ ] Auto-detect expiration dates -- [ ] Store extracted memories -- [ ] Report extraction results - ---- - -### OpenCode Plugin Integration -**Effort:** 3 hours -**Status:** Not Started - -**Files to create:** -- `plugin.js` (root level for OpenCode) - -**Features:** -- [ ] Plugin registration -- [ ] API exposure (store, search, extractRemember) -- [ ] Lifecycle hooks (onInstall, onUninstall) -- [ ] Command registration - ---- - -## Testing Strategy - -### TDD Philosophy: Integration-First Approach - -**Core Principles:** -1. **Integration tests are primary** - Test real workflows end-to-end -2. **Unit tests are rare** - Only for complex algorithms (fuzzy matching, trigrams, Levenshtein) -3. **Test with real data** - Use SQLite :memory: or temp files with realistic scenarios -4. **Watch-driven development** - Run tests in watch mode, see failures, implement, see success - -**Testing Workflow:** -```bash -# 1. Write integration test first (it will fail) -npm run test:watch - -# 2. Run program manually to see behavior -node src/cli.js store "test" - -# 3. Implement feature - -# 4. Watch tests pass - -# 5. Refine based on output -``` - ---- - -### Integration Tests (Primary) -**Coverage target:** All major workflows - -**Test approach:** -- Use real SQLite database (`:memory:` for speed, temp file for persistence tests) -- Simulate realistic data (10-100 memories per test) -- Test actual CLI commands via Node API -- Verify end-to-end behavior, not internal implementation - -**Test scenarios:** -```javascript -// test/integration.test.js -describe('Memory System Integration', () => { - test('store and retrieve workflow', async () => { - // Store memory - await cli(['store', 'Docker uses bridge networks', '--tags', 'docker,networking']); - - // Search for it - const results = await cli(['search', 'docker']); - - // Verify output - expect(results).toContain('Docker uses bridge networks'); - expect(results).toContain('docker'); - expect(results).toContain('networking'); - }); - - test('realistic dataset search performance', async () => { - // Insert 100 realistic memories - for (let i = 0; i < 100; i++) { - await storeMemory(generateRealisticMemory()); - } - - // Search should be fast - const start = Date.now(); - await cli(['search', 'docker']); - const duration = Date.now() - start; - - expect(duration).toBeLessThan(50); // Phase 1 target - }); -}); -``` - -**Test data generation:** -```javascript -// test/fixtures/realistic-memories.js -export function generateRealisticMemory() { - const templates = [ - { content: 'Docker Compose requires explicit subnet config when using multiple networks', tags: ['docker', 'networking'] }, - { content: 'PostgreSQL VACUUM FULL locks tables, use ANALYZE instead', tags: ['postgresql', 'performance'] }, - { content: 'Git worktree allows parallel branches without stashing', tags: ['git', 'workflow'] }, - // ... 50+ realistic templates - ]; - return randomChoice(templates); -} -``` - ---- - -### Unit Tests (Rare - Only When Necessary) -**When to write unit tests:** -- Complex algorithms with edge cases (Levenshtein distance, trigram extraction) -- Pure functions with clear inputs/outputs -- Critical validation logic - -**When NOT to write unit tests:** -- Database queries (covered by integration tests) -- CLI parsing (covered by integration tests) -- Simple utilities (tag parsing, date formatting) - -**Example unit test (justified):** -```javascript -// test/unit/fuzzy.test.js - Complex algorithm worth unit testing -describe('Levenshtein distance', () => { - test('calculates edit distance correctly', () => { - expect(levenshtein('docker', 'dcoker')).toBe(2); - expect(levenshtein('kubernetes', 'kuberntes')).toBe(2); - expect(levenshtein('same', 'same')).toBe(0); - }); - - test('handles edge cases', () => { - expect(levenshtein('', 'hello')).toBe(5); - expect(levenshtein('a', '')).toBe(1); - }); -}); -``` - ---- - -### Test Data Management - -**For integration tests:** -```javascript -// Use :memory: database for fast, isolated tests -beforeEach(() => { - db = new Database(':memory:'); - initSchema(db); -}); - -// Or use temp file for persistence testing -import { mkdtempSync } from 'fs'; -import { join } from 'path'; -import { tmpdir } from 'os'; - -beforeEach(() => { - const tempDir = mkdtempSync(join(tmpdir(), 'llmemory-test-')); - dbPath = join(tempDir, 'test.db'); - db = new Database(dbPath); - initSchema(db); -}); - -afterEach(() => { - db.close(); - // Cleanup temp files -}); -``` - -**Realistic data seeding:** -```javascript -// test/helpers/seed.js -export async function seedDatabase(db, count = 50) { - const memories = []; - - for (let i = 0; i < count; i++) { - memories.push({ - content: generateRealisticMemory(), - tags: generateRealisticTags(), - entered_by: randomChoice(['investigate-agent', 'optimize-agent', 'manual']), - created_at: Date.now() - randomInt(0, 90 * 86400) // Random within 90 days - }); - } - - // Bulk insert - const insert = db.transaction((memories) => { - for (const memory of memories) { - storeMemory(db, memory); - } - }); - - insert(memories); - return memories; -} -``` - ---- - -### Performance Tests -**Run after each phase:** - -```javascript -// Benchmark search latency -test('Phase 1 search <50ms for 500 memories', async () => { - // Insert 500 test memories - const start = Date.now(); - const results = await search('test query'); - const duration = Date.now() - start; - expect(duration).toBeLessThan(50); -}); - -test('Phase 2 search <100ms for 10K memories', async () => { - // Insert 10K test memories - const start = Date.now(); - const results = await search('test query'); - const duration = Date.now() - start; - expect(duration).toBeLessThan(100); -}); -``` - ---- - -## Documentation Roadmap - -### Phase 1 Docs -- [ ] README.md - Quick start, installation, basic usage -- [ ] CLI_REFERENCE.md - All commands and options -- [ ] ARCHITECTURE.md - System design, schema, algorithms - -### Phase 2 Docs -- [ ] AGENT_GUIDE.md - Comprehensive guide for AI agents -- [ ] MIGRATION_GUIDE.md - Phase 1 → 2 → 3 instructions -- [ ] QUERY_SYNTAX.md - FTS5 query patterns - -### Phase 3 Docs -- [ ] API.md - Programmatic API for plugins -- [ ] CONTRIBUTING.md - Development setup, testing -- [ ] TROUBLESHOOTING.md - Common issues and solutions - ---- - -## Success Metrics - -### Phase 1 (MVP) -- [ ] Can store/retrieve memories -- [ ] Search works for exact matches -- [ ] Performance: <50ms for 500 memories -- [ ] Test coverage: >80% -- [ ] No critical bugs - -### Phase 2 (FTS5) -- [ ] Migration completes without data loss -- [ ] Search quality improved (relevance ranking) -- [ ] Performance: <100ms for 10K memories -- [ ] Boolean operators work correctly - -### Phase 3 (Fuzzy) -- [ ] Typos correctly matched (edit distance ≤2) -- [ ] Fuzzy cascade improves result count -- [ ] Performance: <200ms for 10K memories -- [ ] No false positives (threshold tuned) - -### Overall -- [ ] Agents use system regularly in workflows -- [ ] Search results are high-quality (relevant) -- [ ] Token-efficient (limited, ranked results) -- [ ] No performance complaints -- [ ] Documentation comprehensive - ---- - -## Development Workflow - -### Daily Checklist -1. Pull latest changes -2. Run tests: `npm test` -3. Work on current step -4. Write/update tests -5. Update this document (mark checkboxes) -6. Commit with clear message -7. Update CHANGELOG.md - -### Before Phase Completion -1. All checkpoints passed -2. Tests passing (>80% coverage) -3. Documentation updated -4. Performance benchmarks run -5. Manual testing completed -6. Changelog updated - -### Commit Message Format -``` -(): - -Examples: -feat(search): implement FTS5 search with BM25 ranking -fix(store): validate content length before insertion -docs(readme): add installation instructions -test(search): add integration tests for filters -refactor(db): extract connection logic to separate file -``` - ---- - -## Troubleshooting - -### Common Issues - -**Issue:** SQLite FTS5 not available -**Solution:** Ensure SQLite version ≥3.35, check `better-sqlite3` includes FTS5 - -**Issue:** Database locked errors -**Solution:** Enable WAL mode: `PRAGMA journal_mode = WAL` - -**Issue:** Slow searches with large dataset -**Solution:** Check indexes exist, run `ANALYZE`, consider migration to next phase - -**Issue:** Tag filtering not working -**Solution:** Verify tag normalization (lowercase), check many-to-many joins - ---- - -## Next Session Continuation - -**For the next developer/AI agent:** - -1. **Check Current Phase:** Review checkboxes in this file to see progress -2. **Run Tests:** `npm test` to verify current state -3. **Check Database:** `sqlite3 ~/.config/opencode/memories.db .schema` to see current schema version -4. **Review SPECIFICATION.md:** Understand overall architecture -5. **Pick Next Step:** Find first unchecked item in current phase -6. **Update This File:** Mark completed checkboxes as you go - -**Quick Start Commands:** -```bash -cd llmemory -npm install # Install dependencies -npm test # Run test suite -npm run start -- --help # Test CLI -``` - -**Current Status:** Phase 0 complete (planning/documentation), ready to begin Phase 1 implementation. - -**Estimated Time to MVP:** 12-15 hours of focused development. - ---- - -## Resources - -- **SQLite FTS5:** https://www.sqlite.org/fts5.html -- **better-sqlite3:** https://github.com/WiseLibs/better-sqlite3 -- **Commander.js:** https://github.com/tj/commander.js -- **Vitest:** https://vitest.dev/ - ---- - -## Changelog - -### 2025-10-29 - Phase 0 Complete -- Project structure defined -- Comprehensive specification written -- Implementation plan created -- Agent investigation reports integrated -- Ready for Phase 1 development diff --git a/shared/linked-dotfiles/opencode/llmemory/NEXT_SESSION.md b/shared/linked-dotfiles/opencode/llmemory/NEXT_SESSION.md deleted file mode 100644 index 8439b0d..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/NEXT_SESSION.md +++ /dev/null @@ -1,306 +0,0 @@ -# Next Session Guide - LLMemory - -## Quick Start for Next Developer/Agent - -**Project:** LLMemory - AI Agent Memory System -**Current Phase:** Phase 0 Complete (Planning + Prototype) -**Next Phase:** Phase 1 - MVP Implementation -**Estimated Time to MVP:** 12-15 hours - -## What's Been Done - -### ✅ Completed -1. **Planning & Architecture** - - Two competing investigate agents analyzed implementation strategies - - Comprehensive SPECIFICATION.md created (data model, search algorithms, CLI design) - - Detailed IMPLEMENTATION_PLAN.md with step-by-step checkboxes - - ARCHITECTURE.md with algorithm pseudo-code and performance targets - -2. **Project Structure** - - Directory created: `/home/nate/nixos/shared/linked-dotfiles/opencode/llmemory/` - - package.json configured with dependencies - - .gitignore set up - - bin/memory executable created - - CLI prototype implemented (command structure validated) - -3. **Documentation** - - README.md with overview and status - - SPECIFICATION.md with complete technical design - - IMPLEMENTATION_PLAN.md with phased roadmap - - ARCHITECTURE.md with algorithms and data flows - - PROTOTYPE.md with CLI validation results - - NEXT_SESSION.md (this file) - -4. **CLI Prototype** - - All commands structured with Commander.js - - Help text working - - Argument parsing validated - - Ready for real implementation - -### ❌ Not Yet Implemented -- Database layer (SQLite) -- Actual storage/retrieval logic -- Search algorithms (LIKE, FTS5, fuzzy) -- Tests -- Agent guide documentation - -## What to Do Next - -### Immediate Next Step: Phase 1 - MVP - -**Goal:** Working memory system with basic LIKE search in 2-3 days - -**Start with Step 1.2:** Database Layer - Schema & Connection -**Location:** IMPLEMENTATION_PLAN.md - Phase 1, Step 1.2 - -### Step-by-Step - -1. **Review Documents** (15 minutes) - ```bash - cd llmemory - cat README.md # Overview - cat SPECIFICATION.md # Technical spec - cat IMPLEMENTATION_PLAN.md # Next steps with checkboxes - cat docs/ARCHITECTURE.md # Algorithms and design - ``` - -2. **Install Dependencies** (5 minutes) - ```bash - npm install - # Should install: better-sqlite3, commander, chalk, date-fns, vitest - ``` - -3. **Test Prototype** (5 minutes) - ```bash - node src/cli.js --help - node src/cli.js store "test" --tags demo - # Should show placeholder output - ``` - -4. **Create Database Layer** (2 hours) - ```bash - # Create these files: - mkdir -p src/db - touch src/db/connection.js # Database connection & initialization - touch src/db/schema.js # Phase 1 schema definition - touch src/db/queries.js # Prepared statements - ``` - - **Implementation checklist:** - - [ ] SQLite connection with WAL mode - - [ ] Schema creation (memories, tags, memory_tags) - - [ ] Indexes on created_at, expires_at, tags - - [ ] Metadata table with schema_version - - [ ] Prepared statements for CRUD operations - - [ ] Transaction helpers - - **Reference:** SPECIFICATION.md - "Data Schema" section - **SQL Schema:** IMPLEMENTATION_PLAN.md - Phase 1, Step 1.2 - -5. **Implement Store Command** (2 hours) - ```bash - mkdir -p src/commands src/utils - touch src/commands/store.js - touch src/utils/validation.js - touch src/utils/tags.js - ``` - - **Implementation checklist:** - - [ ] Content validation (length < 10KB) - - [ ] Tag parsing (comma-separated, normalize to lowercase) - - [ ] Expiration date parsing - - [ ] Insert memory into DB - - [ ] Insert/link tags (get-or-create) - - [ ] Return memory ID with success message - - **Reference:** SPECIFICATION.md - "Memory Format Guidelines" - -6. **Implement Search Command** (3 hours) - ```bash - mkdir -p src/search - touch src/commands/search.js - touch src/search/like.js - touch src/utils/formatting.js - ``` - - **Implementation checklist:** - - [ ] Build LIKE query with wildcards - - [ ] Tag filtering (AND logic) - - [ ] Date filtering (after/before) - - [ ] Agent filtering (entered_by) - - [ ] Exclude expired memories - - [ ] Order by created_at DESC - - [ ] Format output (plain text with colors) - - **Reference:** ARCHITECTURE.md - "Phase 1: LIKE Search" algorithm - -7. **Continue with Steps 1.5-1.8** - See IMPLEMENTATION_PLAN.md for: - - List command - - Prune command - - CLI integration (replace placeholders) - - Testing - -## Key Files Reference - -### Planning & Specification -- `SPECIFICATION.md` - **Start here** for technical design -- `IMPLEMENTATION_PLAN.md` - **Your checklist** for step-by-step tasks -- `docs/ARCHITECTURE.md` - Algorithm details and performance targets -- `README.md` - Project overview and status - -### Code Structure -- `src/cli.js` - CLI entry point (currently placeholder) -- `src/commands/` - Command implementations (to be created) -- `src/db/` - Database layer (to be created) -- `src/search/` - Search algorithms (to be created) -- `src/utils/` - Utilities (to be created) -- `test/` - Test suite (to be created) - -### Important Patterns - -**Database Location:** -```javascript -// Default: ~/.config/opencode/memories.db -// Override with: --db flag -``` - -**Schema Version Tracking:** -```javascript -// metadata table stores current schema version -// Used for migration triggers -``` - -**Search Evolution:** -```javascript -// Phase 1: LIKE search (simple, <500 memories) -// Phase 2: FTS5 (production, 10K+ memories) -// Phase 3: Fuzzy (typo tolerance, 100K+ memories) -``` - -## Development Workflow - -### Daily Checklist -1. Pull latest changes (if working with others) -2. Run tests: `npm test` -3. Pick next unchecked item in IMPLEMENTATION_PLAN.md -4. Implement feature with TDD (write test first) -5. Update checkboxes in IMPLEMENTATION_PLAN.md -6. Commit with clear message -7. Update CHANGELOG.md (if created) - -### Testing -```bash -npm test # Run all tests -npm run test:watch # Watch mode -npm run test:coverage # Coverage report -``` - -### Commit Message Format -``` -(): - -Examples: -feat(db): implement SQLite connection with WAL mode -feat(store): add content validation and tag parsing -test(search): add integration tests for LIKE search -docs(spec): clarify fuzzy matching threshold -``` - -## Common Questions - -### Q: Which search algorithm should I start with? -**A:** Start with LIKE search (Phase 1). It's simple and sufficient for <500 memories. Migrate to FTS5 when needed. - -### Q: Where should the database be stored? -**A:** `~/.config/opencode/memories.db` by default. Override with `--db` flag. - -### Q: How do I handle expiration? -**A:** Always filter `WHERE expires_at IS NULL OR expires_at > now()` in queries. Manual cleanup with `memory prune`. - -### Q: What about fuzzy matching? -**A:** Skip for Phase 1. Implement in Phase 3 after FTS5 is working. - -### Q: Should I use TypeScript? -**A:** Optional. JavaScript is fine for now. TypeScript can be added later if needed. - -### Q: How do I test without a real database? -**A:** Use `:memory:` SQLite database for tests. Fast and isolated. - -## Performance Targets - -| Phase | Dataset | Latency | Storage | -|-------|---------|---------|---------| -| 1 (MVP) | <500 | <50ms | Base | -| 2 (FTS5) | 10K | <100ms | +30% | -| 3 (Fuzzy) | 100K+ | <200ms | +200% | - -## Troubleshooting - -**Problem:** `better-sqlite3` won't install -**Solution:** Ensure build tools installed: `sudo apt install build-essential python3` - -**Problem:** Database locked -**Solution:** Enable WAL mode: `PRAGMA journal_mode = WAL;` - -**Problem:** Tests failing -**Solution:** Use `:memory:` database for tests, not persistent file - -**Problem:** Slow searches -**Solution:** Check indexes exist: `sqlite3 memories.db ".schema"` - -## Success Criteria for Phase 1 - -- [ ] Can store memories with tags and expiration -- [ ] Can search with basic LIKE matching -- [ ] Can list recent memories -- [ ] Can prune expired memories -- [ ] All tests passing (>80% coverage) -- [ ] Query latency <50ms for 500 memories -- [ ] Help text comprehensive -- [ ] CLI works end-to-end - -**Validation Test:** -```bash -memory store "Docker Compose uses bridge networks by default" --tags docker,networking -memory store "Kubernetes pods share network namespace" --tags kubernetes,networking -memory search "networking" --tags docker -# Should return only Docker memory -memory list --limit 10 -# Should show both memories -memory stats -# Should show 2 memories, 3 unique tags -``` - -## Resources - -- **SQLite FTS5:** https://www.sqlite.org/fts5.html -- **better-sqlite3:** https://github.com/WiseLibs/better-sqlite3 -- **Commander.js:** https://github.com/tj/commander.js -- **Vitest:** https://vitest.dev/ - -## Contact/Context - -**Project Location:** `/home/nate/nixos/shared/linked-dotfiles/opencode/llmemory/` -**OpenCode Context:** This is a plugin for the OpenCode agent system -**Session Context:** Planning done by two investigate agents (see agent reports in SPECIFICATION.md) - -## Final Notes - -**This project is well-documented and ready to implement.** - -Everything you need is in: -1. **SPECIFICATION.md** - What to build -2. **IMPLEMENTATION_PLAN.md** - How to build it (step-by-step) -3. **ARCHITECTURE.md** - Why it's designed this way - -Start with IMPLEMENTATION_PLAN.md Phase 1, Step 1.2 and follow the checkboxes! - -Good luck! 🚀 - ---- - -**Created:** 2025-10-29 -**Phase 0 Status:** ✅ Complete -**Next Phase:** Phase 1 - MVP Implementation -**Time Estimate:** 12-15 hours to working MVP diff --git a/shared/linked-dotfiles/opencode/llmemory/PROTOTYPE.md b/shared/linked-dotfiles/opencode/llmemory/PROTOTYPE.md deleted file mode 100644 index 4bf510e..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/PROTOTYPE.md +++ /dev/null @@ -1,154 +0,0 @@ -# LLMemory Prototype - CLI Interface Validation - -## Status: ✅ Prototype Complete - -This document describes the CLI prototype created to validate the user experience before full implementation. - -## What's Implemented - -### Executable Structure -- ✅ `bin/memory` - Executable wrapper with error handling -- ✅ `src/cli.js` - Commander.js-based CLI with all command stubs -- ✅ `package.json` - Dependencies and scripts configured - -### Commands (Placeholder) -All commands are implemented as placeholders that: -1. Accept the correct arguments and options -2. Display what would happen -3. Reference the implementation plan step - -**Implemented command structure:** -- `memory store [options]` -- `memory search [options]` -- `memory list [options]` -- `memory prune [options]` -- `memory stats [options]` -- `memory export ` -- `memory import ` -- `memory --agent-context` -- Global options: `--db`, `--verbose`, `--quiet` - -## Testing the Prototype - -### Prerequisites -```bash -cd llmemory -npm install -``` - -### Manual Testing - -```bash -# Test help output -node src/cli.js --help -node src/cli.js store --help -node src/cli.js search --help - -# Test store command structure -node src/cli.js store "Test memory" --tags docker,networking --expires "2026-01-01" - -# Test search command structure -node src/cli.js search "docker" --tags networking --limit 5 --json - -# Test list command -node src/cli.js list --limit 10 --sort created - -# Test prune command -node src/cli.js prune --dry-run - -# Test agent context -node src/cli.js --agent-context - -# Test global options -node src/cli.js search "test" --verbose --db /tmp/test.db -``` - -### Expected Output - -Each command should: -1. ✅ Parse arguments correctly -2. ✅ Display received parameters -3. ✅ Reference the implementation plan -4. ✅ Exit cleanly - -Example: -```bash -$ node src/cli.js store "Docker uses bridge networks" --tags docker - -Store command - not yet implemented -Content: Docker uses bridge networks -Options: { tags: 'docker' } - -See IMPLEMENTATION_PLAN.md Step 1.3 for implementation details -``` - -## CLI Design Validation - -### ✅ Confirmed Design Decisions - -1. **Commander.js is suitable** - - Clean command structure - - Good help text generation - - Option parsing works well - - Subcommand support - -2. **Argument structure is intuitive** - - Positional args for required params (content, query, file) - - Options for optional params (tags, filters, limits) - - Global options for cross-cutting concerns - -3. **Help text is clear** - ```bash - memory --help # Lists all commands - memory store --help # Shows store options - ``` - -4. **Flag naming is consistent** - - `--tags` for tag filtering (used across commands) - - `--limit` for result limiting - - `--dry-run` for safe preview - - Short forms where sensible: `-t`, `-l`, `-e` - -### 🔄 Potential Improvements (Future) - -1. **Interactive mode** (optional dependency) - - `memory store` (no args) → prompts for content - - `inquirer` for tag autocomplete - -2. **Aliases** - - `memory s` → `memory search` - - `memory ls` → `memory list` - -3. **Output formatting** - - Add `--format` option (plain, json, markdown, table) - - Color-coded output with `chalk` - -4. **Config file support** - - `~/.config/llmemory/config.json` - - Set defaults (limit, db path, output format) - -## Next Steps - -1. ✅ Prototype validated - CLI structure confirmed -2. **Ready for Phase 1 implementation** -3. Start with Step 1.2: Database Layer (see IMPLEMENTATION_PLAN.md) - -## Feedback for Implementation - -### What Worked Well -- Command structure is intuitive -- Option names are clear -- Help text is helpful -- Error handling in bin/memory is robust - -### What to Keep in Mind -- Add proper validation in real implementation -- Color output for better UX (chalk) -- Consider table output for list command (cli-table3) -- Implement proper exit codes (0=success, 1=error) - ---- - -**Prototype Created:** 2025-10-29 -**Status:** Validation Complete -**Next Phase:** Phase 1 Implementation (Database Layer) diff --git a/shared/linked-dotfiles/opencode/llmemory/README.md b/shared/linked-dotfiles/opencode/llmemory/README.md deleted file mode 100644 index dc93015..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/README.md +++ /dev/null @@ -1,305 +0,0 @@ -# LLMemory - AI Agent Memory System - -A persistent memory/journal system for AI agents with grep-like search and fuzzy matching. - -## Overview - -LLMemory provides AI agents with long-term memory across sessions. Think of it as a personal knowledge base with powerful search capabilities, designed specifically for agent workflows. - -**Key Features:** -- 🔍 **Grep-like search** - Familiar query syntax for AI agents -- 🎯 **Fuzzy matching** - Handles typos automatically -- 🏷️ **Tag-based organization** - Easy categorization and filtering -- ⏰ **Expiration support** - Auto-cleanup of time-sensitive info -- 📊 **Relevance ranking** - Best results first, token-efficient -- 🔌 **OpenCode integration** - Plugin API for seamless workflows - -## Status - -**Current Phase:** Planning Complete (Phase 0) -**Next Phase:** MVP Implementation (Phase 1) - -This project is in the initial planning stage. The architecture and implementation plan are complete, ready for development. - -## Quick Start (Future) - -```bash -# Installation (when available) -npm install -g llmemory - -# Store a memory -memory store "Docker Compose uses bridge networks by default" \ - --tags docker,networking - -# Search memories -memory search "docker networking" - -# List recent memories -memory list --limit 10 - -# Show agent documentation -memory --agent-context -``` - -## Documentation - -- **[SPECIFICATION.md](./SPECIFICATION.md)** - Complete technical specification -- **[IMPLEMENTATION_PLAN.md](./IMPLEMENTATION_PLAN.md)** - Phased development plan -- **[ARCHITECTURE.md](./docs/ARCHITECTURE.md)** - System design (to be created) -- **[AGENT_GUIDE.md](./docs/AGENT_GUIDE.md)** - Guide for AI agents (to be created) - -## Architecture - -### Three-Phase Implementation - -**Phase 1: MVP (2-3 days)** -- Basic CLI with store/search/list/prune commands -- Simple LIKE-based search -- Tag filtering and expiration handling -- Target: <500 memories, <50ms search - -**Phase 2: FTS5 (3-5 days)** -- Migrate to SQLite FTS5 for production search -- BM25 relevance ranking -- Boolean operators (AND/OR/NOT) -- Target: 10K+ memories, <100ms search - -**Phase 3: Fuzzy Layer (3-4 days)** -- Trigram indexing for typo tolerance -- Levenshtein distance matching -- Intelligent cascade (exact → fuzzy) -- Target: 100K+ memories, <200ms search - -### Technology Stack - -- **Language:** Node.js (JavaScript/TypeScript) -- **Database:** SQLite with better-sqlite3 -- **CLI:** Commander.js -- **Search:** FTS5 + trigram fuzzy matching -- **Testing:** Vitest - -## Project Structure - -``` -llmemory/ -├── src/ -│ ├── cli.js # CLI entry point -│ ├── commands/ # Command implementations -│ ├── db/ # Database layer -│ ├── search/ # Search strategies (LIKE, FTS5, fuzzy) -│ ├── utils/ # Utilities (validation, formatting) -│ └── extractors/ # Auto-extraction (*Remember* pattern) -├── test/ # Test suite -├── docs/ # Documentation -├── bin/ # Executable wrapper -├── SPECIFICATION.md # Technical spec -├── IMPLEMENTATION_PLAN.md # Development roadmap -└── README.md # This file -``` - -## Development - -### Setup - -```bash -cd llmemory -npm install -npm test -``` - -### Implementation Status - -See [IMPLEMENTATION_PLAN.md](./IMPLEMENTATION_PLAN.md) for detailed progress tracking. - -**Current Progress:** -- [x] Phase 0: Planning and documentation -- [ ] Phase 1: MVP (Simple LIKE search) - - [ ] Project setup - - [ ] Database layer - - [ ] Store command - - [ ] Search command - - [ ] List command - - [ ] Prune command - - [ ] CLI integration - - [ ] Testing -- [ ] Phase 2: FTS5 migration -- [ ] Phase 3: Fuzzy layer - -### Contributing - -1. Review [SPECIFICATION.md](./SPECIFICATION.md) for architecture -2. Check [IMPLEMENTATION_PLAN.md](./IMPLEMENTATION_PLAN.md) for next steps -3. Pick an uncompleted task from the current phase -4. Write tests first (TDD approach) -5. Implement feature -6. Update checkboxes in IMPLEMENTATION_PLAN.md -7. Commit with clear message - -### Testing - -```bash -# Run all tests -npm test - -# Run tests in watch mode -npm run test:watch - -# Run specific test file -npm test search.test.js - -# Coverage report -npm run test:coverage -``` - -## Usage Examples (Future) - -### Storing Memories - -```bash -# Basic storage -memory store "PostgreSQL VACUUM FULL locks tables, use VACUUM ANALYZE instead" - -# With tags -memory store "Docker healthchecks need curl --fail for proper exit codes" \ - --tags docker,best-practices - -# With expiration -memory store "Staging server at https://staging.example.com" \ - --tags infrastructure,staging \ - --expires "2025-12-31" - -# From agent -memory store "NixOS flake.lock must be committed for reproducible builds" \ - --tags nixos,build-system \ - --entered-by investigate-agent -``` - -### Searching Memories - -```bash -# Basic search -memory search "docker" - -# Multiple terms (implicit AND) -memory search "docker networking" - -# Boolean operators -memory search "docker AND compose" -memory search "docker OR podman" -memory search "database NOT postgresql" - -# Phrase search -memory search '"exact phrase"' - -# With filters -memory search "kubernetes" --tags production,k8s -memory search "error" --after "2025-10-01" -memory search "config" --entered-by optimize-agent --limit 5 -``` - -### Managing Memories - -```bash -# List recent -memory list --limit 20 - -# List by tag -memory list --tags docker --sort created --order desc - -# Show statistics -memory stats -memory stats --tags # Tag frequency -memory stats --agents # Memories per agent - -# Prune expired -memory prune --dry-run # Preview -memory prune --force # Execute - -# Export/import -memory export backup.json -memory import backup.json -``` - -## Memory Format Guidelines - -### Good Memory Examples - -```bash -# Technical detail -memory store "Git worktree: 'git worktree add -b feature ../feature' creates parallel working directory without cloning" --tags git,workflow - -# Error resolution -memory store "Node.js ENOSPC: Increase inotify watches with 'echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p'" --tags nodejs,linux,troubleshooting - -# Configuration pattern -memory store "Nginx reverse proxy: Set 'proxy_set_header X-Real-IP \$remote_addr' to preserve client IP through proxy chain" --tags nginx,networking -``` - -### Anti-Patterns - -```bash -# Too vague ❌ -memory store "Fixed the bug" - -# Better ✅ -memory store "Fixed React infinite render loop by adding missing dependencies to useEffect array" - -# Widely known ❌ -memory store "Docker is a containerization platform" - -# Specific insight ✅ -memory store "Docker container networking requires explicit subnet config when using multiple custom networks" -``` - -## OpenCode Integration (Future) - -### Plugin API - -```javascript -import llmemory from '@opencode/llmemory'; - -// Store from agent -await llmemory.api.store( - 'Discovered performance bottleneck in database query', - { tags: ['performance', 'database'], entered_by: 'optimize-agent' } -); - -// Search -const results = await llmemory.api.search('performance', { - tags: ['database'], - limit: 5 -}); - -// Auto-extract *Remember* patterns -const memories = await llmemory.api.extractRemember(agentOutput, { - agentName: 'investigate-agent', - currentTask: 'debugging' -}); -``` - -## Performance Targets - -| Phase | Dataset Size | Search Latency | Storage Overhead | -|-------|-------------|----------------|------------------| -| 1 (MVP) | <500 memories | <50ms | Base | -| 2 (FTS5) | 10K memories | <100ms | +30% (FTS5 index) | -| 3 (Fuzzy) | 100K+ memories | <200ms | +200% (trigrams) | - -## License - -MIT - -## Credits - -**Planning & Design:** -- Agent A: Pragmatic iteration strategy, OpenCode integration patterns -- Agent B: Technical depth, comprehensive implementation specifications -- Combined approach: Hybrid FTS5 + fuzzy matching architecture - -**Implementation:** To be determined - ---- - -**Status:** Phase 0 Complete - Ready for Phase 1 implementation -**Next Step:** Project setup and database layer (see IMPLEMENTATION_PLAN.md) -**Estimated Time to MVP:** 12-15 hours of focused development diff --git a/shared/linked-dotfiles/opencode/llmemory/SPECIFICATION.md b/shared/linked-dotfiles/opencode/llmemory/SPECIFICATION.md deleted file mode 100644 index b40b382..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/SPECIFICATION.md +++ /dev/null @@ -1,950 +0,0 @@ -# LLMemory - AI Agent Memory System - -## Overview - -LLMemory is a persistent memory/journal system for AI agents, providing grep-like search with fuzzy matching for efficient knowledge retrieval across sessions. - -## Core Requirements - -### Storage -- Store memories with metadata: `created_at`, `entered_by`, `expires_at`, `tags` -- Local SQLite database (no cloud dependencies) -- Content limit: 10KB per memory -- Tag-based organization with normalized schema - -### Retrieval -- Grep/ripgrep-like query syntax (familiar to AI agents) -- Fuzzy matching with configurable threshold -- Relevance ranking (BM25 + edit distance + recency) -- Metadata filtering (tags, dates, agent) -- Token-efficient: limit results, prioritize quality over quantity - -### Interface -- Global CLI tool: `memory [command]` -- Commands: `store`, `search`, `list`, `prune`, `stats`, `export`, `import` -- `--agent-context` flag for comprehensive agent documentation -- Output formats: plain text, JSON, markdown - -### Integration -- OpenCode plugin architecture -- Expose API for programmatic access -- Auto-extraction of `*Remember*` patterns from agent output - -## Implementation Strategy - -### Phase 1: MVP (Simple LIKE Search) -**Goal:** Ship in 2-3 days, validate concept with real usage - -**Features:** -- Basic schema (memories, tags tables) -- Core commands (store, search, list, prune) -- Simple LIKE-based search with wildcards -- Plain text output -- Tag filtering -- Expiration handling - -**Success Criteria:** -- Can store and retrieve memories -- Search works for exact/prefix matches -- Tags functional -- Performance acceptable for <500 memories - -**Database:** -```sql -CREATE TABLE memories ( - id INTEGER PRIMARY KEY, - content TEXT NOT NULL CHECK(length(content) <= 10000), - created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')), - entered_by TEXT, - expires_at INTEGER -); - -CREATE TABLE tags ( - id INTEGER PRIMARY KEY, - name TEXT UNIQUE COLLATE NOCASE -); - -CREATE TABLE memory_tags ( - memory_id INTEGER, - tag_id INTEGER, - PRIMARY KEY (memory_id, tag_id), - FOREIGN KEY (memory_id) REFERENCES memories(id) ON DELETE CASCADE, - FOREIGN KEY (tag_id) REFERENCES tags(id) ON DELETE CASCADE -); -``` - -**Search Logic:** -```javascript -// Simple case-insensitive LIKE with wildcards -WHERE LOWER(content) LIKE LOWER('%' || ? || '%') -AND (expires_at IS NULL OR expires_at > strftime('%s', 'now')) -ORDER BY created_at DESC -``` - -### Phase 2: FTS5 Migration -**Trigger:** Dataset > 500 memories OR query latency > 500ms - -**Features:** -- Add FTS5 virtual table -- Migrate existing data -- Implement BM25 ranking -- Support boolean operators (AND/OR/NOT) -- Phrase queries with quotes -- Prefix matching with `*` - -**Database Addition:** -```sql -CREATE VIRTUAL TABLE memories_fts USING fts5( - content, - content='memories', - content_rowid='id', - tokenize='porter unicode61 remove_diacritics 2' -); - --- Triggers to keep in sync -CREATE TRIGGER memories_ai AFTER INSERT ON memories BEGIN - INSERT INTO memories_fts(rowid, content) VALUES (new.id, new.content); -END; --- ... (update/delete triggers) -``` - -**Search Logic:** -```javascript -// FTS5 match with BM25 ranking -SELECT m.*, mf.rank -FROM memories_fts mf -JOIN memories m ON m.id = mf.rowid -WHERE memories_fts MATCH ? -ORDER BY mf.rank -``` - -### Phase 3: Fuzzy Layer -**Goal:** Handle typos and inexact matches - -**Features:** -- Trigram indexing -- Levenshtein distance calculation -- Intelligent cascade: exact (FTS5) → fuzzy (trigram) -- Combined relevance scoring -- Configurable threshold (default: 0.7) - -**Database Addition:** -```sql -CREATE TABLE trigrams ( - trigram TEXT NOT NULL, - memory_id INTEGER NOT NULL, - position INTEGER NOT NULL, - FOREIGN KEY (memory_id) REFERENCES memories(id) ON DELETE CASCADE -); -CREATE INDEX idx_trigrams_trigram ON trigrams(trigram); -``` - -**Search Logic:** -```javascript -// 1. Try FTS5 exact match -let results = ftsSearch(query); - -// 2. If <5 results, try fuzzy -if (results.length < 5) { - const fuzzyResults = trigramSearch(query, threshold); - results = mergeAndDedupe(results, fuzzyResults); -} - -// 3. Re-rank by combined score -results.forEach(r => { - r.score = 0.4 * bmr25Score - + 0.3 * trigramSimilarity - + 0.2 * editDistanceScore - + 0.1 * recencyScore; -}); -``` - -## Architecture - -### Technology Stack -- **Language:** Node.js (JavaScript/TypeScript) -- **Database:** SQLite with better-sqlite3 -- **CLI Framework:** Commander.js -- **Output Formatting:** chalk (colors), marked-terminal (markdown) -- **Date Parsing:** date-fns -- **Testing:** Vitest - -### Directory Structure -``` -llmemory/ -├── src/ -│ ├── cli.js # CLI entry point -│ ├── commands/ -│ │ ├── store.js -│ │ ├── search.js -│ │ ├── list.js -│ │ ├── prune.js -│ │ ├── stats.js -│ │ └── export.js -│ ├── db/ -│ │ ├── connection.js # Database setup -│ │ ├── schema.js # Schema definitions -│ │ ├── migrations.js # Migration runner -│ │ └── queries.js # Prepared statements -│ ├── search/ -│ │ ├── like.js # Phase 1: LIKE search -│ │ ├── fts.js # Phase 2: FTS5 search -│ │ ├── fuzzy.js # Phase 3: Fuzzy matching -│ │ └── ranking.js # Relevance scoring -│ ├── utils/ -│ │ ├── dates.js -│ │ ├── tags.js -│ │ ├── formatting.js -│ │ └── validation.js -│ └── extractors/ -│ └── remember.js # Auto-extract *Remember* patterns -├── test/ -│ ├── search.test.js -│ ├── fuzzy.test.js -│ ├── integration.test.js -│ └── fixtures/ -├── docs/ -│ ├── ARCHITECTURE.md -│ ├── AGENT_GUIDE.md # For --agent-context -│ ├── CLI_REFERENCE.md -│ └── API.md -├── bin/ -│ └── memory # Executable -├── package.json -├── SPECIFICATION.md # This file -├── IMPLEMENTATION_PLAN.md -└── README.md -``` - -### CLI Interface - -#### Commands - -```bash -# Store a memory -memory store [options] - --tags Comma-separated tags - --expires Expiration date (ISO 8601 or natural language) - --entered-by Agent/user identifier - --file Read content from file - -# Search memories -memory search [options] - --tags Filter by tags (AND) - --any-tag Filter by tags (OR) - --after Created after date - --before Created before date - --entered-by Filter by creator - --limit Max results (default: 10) - --offset Pagination offset - --fuzzy Enable fuzzy matching (default: auto) - --no-fuzzy Disable fuzzy matching - --threshold <0-1> Fuzzy match threshold (default: 0.7) - --json JSON output - --markdown Markdown output - -# List recent memories -memory list [options] - --limit Max results (default: 20) - --offset Pagination offset - --tags Filter by tags - --sort Sort by: created, expires, content - --order Sort order (default: desc) - -# Prune expired memories -memory prune [options] - --dry-run Show what would be deleted - --force Skip confirmation - --before Delete before date (even if not expired) - -# Show statistics -memory stats [options] - --tags Show tag frequency - --agents Show memories per agent - -# Export/import -memory export Export to JSON -memory import Import from JSON - -# Global options ---agent-context Display agent documentation ---db Custom database location ---verbose Detailed logging ---quiet Suppress non-error output -``` - -#### Query Syntax - -```bash -# Basic -memory search "docker compose" # Both terms (implicit AND) -memory search "docker AND compose" # Explicit AND -memory search "docker OR podman" # Either term -memory search "docker NOT swarm" # Exclude term -memory search '"exact phrase"' # Phrase search -memory search "docker*" # Prefix matching - -# With filters -memory search "docker" --tags devops,networking -memory search "error" --after "2025-10-01" -memory search "config" --entered-by investigate-agent - -# Fuzzy (automatic typo tolerance) -memory search "dokcer" # Finds "docker" -memory search "kuberntes" # Finds "kubernetes" -``` - -### Data Schema - -#### Complete Schema (All Phases) - -```sql --- Core tables -CREATE TABLE memories ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - content TEXT NOT NULL CHECK(length(content) <= 10000), - created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')), - entered_by TEXT, - expires_at INTEGER, - CHECK(expires_at IS NULL OR expires_at > created_at) -); - -CREATE INDEX idx_memories_created ON memories(created_at DESC); -CREATE INDEX idx_memories_expires ON memories(expires_at) WHERE expires_at IS NOT NULL; -CREATE INDEX idx_memories_entered_by ON memories(entered_by); - -CREATE TABLE tags ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT NOT NULL UNIQUE COLLATE NOCASE, - created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')) -); - -CREATE INDEX idx_tags_name ON tags(name); - -CREATE TABLE memory_tags ( - memory_id INTEGER NOT NULL, - tag_id INTEGER NOT NULL, - PRIMARY KEY (memory_id, tag_id), - FOREIGN KEY (memory_id) REFERENCES memories(id) ON DELETE CASCADE, - FOREIGN KEY (tag_id) REFERENCES tags(id) ON DELETE CASCADE -); - -CREATE INDEX idx_memory_tags_tag ON memory_tags(tag_id); - --- Phase 2: FTS5 -CREATE VIRTUAL TABLE memories_fts USING fts5( - content, - content='memories', - content_rowid='id', - tokenize='porter unicode61 remove_diacritics 2' -); - -CREATE TRIGGER memories_ai AFTER INSERT ON memories BEGIN - INSERT INTO memories_fts(rowid, content) VALUES (new.id, new.content); -END; - -CREATE TRIGGER memories_ad AFTER DELETE ON memories BEGIN - DELETE FROM memories_fts WHERE rowid = old.id; -END; - -CREATE TRIGGER memories_au AFTER UPDATE ON memories BEGIN - DELETE FROM memories_fts WHERE rowid = old.id; - INSERT INTO memories_fts(rowid, content) VALUES (new.id, new.content); -END; - --- Phase 3: Trigrams -CREATE TABLE trigrams ( - trigram TEXT NOT NULL, - memory_id INTEGER NOT NULL, - position INTEGER NOT NULL, - FOREIGN KEY (memory_id) REFERENCES memories(id) ON DELETE CASCADE -); - -CREATE INDEX idx_trigrams_trigram ON trigrams(trigram); -CREATE INDEX idx_trigrams_memory ON trigrams(memory_id); - --- Metadata -CREATE TABLE metadata ( - key TEXT PRIMARY KEY, - value TEXT NOT NULL -); - -INSERT INTO metadata (key, value) VALUES ('schema_version', '1'); -INSERT INTO metadata (key, value) VALUES ('created_at', strftime('%s', 'now')); - --- Useful view -CREATE VIEW memories_with_tags AS -SELECT - m.id, - m.content, - m.created_at, - m.entered_by, - m.expires_at, - GROUP_CONCAT(t.name, ',') as tags -FROM memories m -LEFT JOIN memory_tags mt ON m.id = mt.memory_id -LEFT JOIN tags t ON mt.tag_id = t.id -GROUP BY m.id; -``` - -## Search Algorithm Details - -### Phase 1: LIKE Search - -```javascript -function searchWithLike(query, filters = {}) { - const { tags = [], after, before, enteredBy, limit = 10 } = filters; - - let sql = ` - SELECT DISTINCT m.id, m.content, m.created_at, m.entered_by, m.expires_at, - GROUP_CONCAT(t.name, ',') as tags - FROM memories m - LEFT JOIN memory_tags mt ON m.id = mt.memory_id - LEFT JOIN tags t ON mt.tag_id = t.id - WHERE LOWER(m.content) LIKE LOWER(?) - AND (m.expires_at IS NULL OR m.expires_at > strftime('%s', 'now')) - `; - - const params = [`%${query}%`]; - - // Tag filtering - if (tags.length > 0) { - sql += ` AND m.id IN ( - SELECT memory_id FROM memory_tags - WHERE tag_id IN (SELECT id FROM tags WHERE name IN (${tags.map(() => '?').join(',')})) - GROUP BY memory_id - HAVING COUNT(*) = ? - )`; - params.push(...tags, tags.length); - } - - // Date filtering - if (after) { - sql += ' AND m.created_at >= ?'; - params.push(after); - } - if (before) { - sql += ' AND m.created_at <= ?'; - params.push(before); - } - - // Agent filtering - if (enteredBy) { - sql += ' AND m.entered_by = ?'; - params.push(enteredBy); - } - - sql += ' GROUP BY m.id ORDER BY m.created_at DESC LIMIT ?'; - params.push(limit); - - return db.prepare(sql).all(...params); -} -``` - -### Phase 2: FTS5 Search - -```javascript -function searchWithFTS5(query, filters = {}) { - const ftsQuery = buildFTS5Query(query); - - let sql = ` - SELECT m.id, m.content, m.created_at, m.entered_by, m.expires_at, - GROUP_CONCAT(t.name, ',') as tags, - mf.rank as relevance - FROM memories_fts mf - JOIN memories m ON m.id = mf.rowid - LEFT JOIN memory_tags mt ON m.id = mt.memory_id - LEFT JOIN tags t ON mt.tag_id = t.id - WHERE memories_fts MATCH ? - AND (m.expires_at IS NULL OR m.expires_at > strftime('%s', 'now')) - `; - - const params = [ftsQuery]; - - // Apply filters (same as Phase 1) - // ... - - sql += ' GROUP BY m.id ORDER BY mf.rank LIMIT ?'; - params.push(limit); - - return db.prepare(sql).all(...params); -} - -function buildFTS5Query(query) { - // Handle quoted phrases - if (query.includes('"')) { - return query; // Already FTS5 compatible - } - - // Handle explicit operators - if (/\b(AND|OR|NOT)\b/i.test(query)) { - return query.toUpperCase(); - } - - // Implicit AND between terms - const terms = query.split(/\s+/).filter(t => t.length > 0); - return terms.join(' AND '); -} -``` - -### Phase 3: Fuzzy Search - -```javascript -function searchWithFuzzy(query, threshold = 0.7, limit = 10) { - const queryTrigrams = extractTrigrams(query); - - if (queryTrigrams.length === 0) return []; - - // Find candidates by trigram overlap - const sql = ` - SELECT - m.id, - m.content, - m.created_at, - m.entered_by, - m.expires_at, - COUNT(DISTINCT tr.trigram) as trigram_matches - FROM memories m - JOIN trigrams tr ON tr.memory_id = m.id - WHERE tr.trigram IN (${queryTrigrams.map(() => '?').join(',')}) - AND (m.expires_at IS NULL OR m.expires_at > strftime('%s', 'now')) - GROUP BY m.id - HAVING trigram_matches >= ? - ORDER BY trigram_matches DESC - LIMIT ? - `; - - const minMatches = Math.ceil(queryTrigrams.length * threshold); - const candidates = db.prepare(sql).all(...queryTrigrams, minMatches, limit * 2); - - // Calculate edit distance and combined score - const scored = candidates.map(c => { - const editDist = levenshtein(query.toLowerCase(), c.content.toLowerCase().substring(0, query.length * 3)); - const trigramSim = c.trigram_matches / queryTrigrams.length; - const normalizedEditDist = 1 - (editDist / Math.max(query.length, c.content.length)); - - return { - ...c, - relevance: 0.6 * trigramSim + 0.4 * normalizedEditDist - }; - }); - - return scored - .filter(r => r.relevance >= threshold) - .sort((a, b) => b.relevance - a.relevance) - .slice(0, limit); -} - -function extractTrigrams(text) { - const normalized = text - .toLowerCase() - .replace(/[^\w\s]/g, ' ') - .replace(/\s+/g, ' ') - .trim(); - - if (normalized.length < 3) return []; - - const padded = ` ${normalized} `; - const trigrams = []; - - for (let i = 0; i < padded.length - 2; i++) { - const trigram = padded.substring(i, i + 3); - if (trigram.trim().length === 3) { - trigrams.push(trigram); - } - } - - return [...new Set(trigrams)]; // Deduplicate -} - -function levenshtein(a, b) { - if (a.length === 0) return b.length; - if (b.length === 0) return a.length; - - let prevRow = Array(b.length + 1).fill(0).map((_, i) => i); - - for (let i = 0; i < a.length; i++) { - let curRow = [i + 1]; - for (let j = 0; j < b.length; j++) { - const cost = a[i] === b[j] ? 0 : 1; - curRow.push(Math.min( - curRow[j] + 1, // deletion - prevRow[j + 1] + 1, // insertion - prevRow[j] + cost // substitution - )); - } - prevRow = curRow; - } - - return prevRow[b.length]; -} -``` - -### Intelligent Cascade - -```javascript -function search(query, filters = {}) { - const { fuzzy = 'auto', threshold = 0.7 } = filters; - - // Phase 2 or Phase 3 installed? - const hasFTS5 = checkTableExists('memories_fts'); - const hasTrigrams = checkTableExists('trigrams'); - - let results; - - // Try FTS5 if available - if (hasFTS5) { - results = searchWithFTS5(query, filters); - } else { - results = searchWithLike(query, filters); - } - - // If too few results and fuzzy available, try fuzzy - if (results.length < 5 && hasTrigrams && (fuzzy === 'auto' || fuzzy === true)) { - const fuzzyResults = searchWithFuzzy(query, threshold, filters.limit); - results = mergeResults(results, fuzzyResults); - } - - return results; -} - -function mergeResults(exact, fuzzy) { - const seen = new Set(exact.map(r => r.id)); - const merged = [...exact]; - - for (const result of fuzzy) { - if (!seen.has(result.id)) { - merged.push(result); - seen.add(result.id); - } - } - - return merged; -} -``` - -## Memory Format Guidelines - -### Good Memory Examples - -```bash -# Technical discovery with context -memory store "Docker Compose: Use 'depends_on' with 'condition: service_healthy' to ensure dependencies are ready. Prevents race conditions in multi-container apps." \ - --tags docker,docker-compose,best-practices - -# Configuration pattern -memory store "Nginx reverse proxy: Set 'proxy_set_header X-Real-IP \$remote_addr' to preserve client IP through proxy. Required for rate limiting and logging." \ - --tags nginx,networking,security - -# Error resolution -memory store "Node.js ENOSPC: Increase inotify watch limit with 'echo fs.inotify.max_user_watches=524288 | sudo tee -a /etc/sysctl.conf && sudo sysctl -p'. Affects webpack, nodemon." \ - --tags nodejs,linux,troubleshooting - -# Version-specific behavior -memory store "TypeScript 5.0+: 'const' type parameters preserve literal types. Example: 'function id(x: T): T'. Better inference for generic functions." \ - --tags typescript,types - -# Temporary info with expiration -memory store "Staging server: https://staging.example.com:8443. Credentials in 1Password. Valid through Q1 2025." \ - --tags staging,infrastructure \ - --expires "2025-04-01" -``` - -### Anti-Patterns to Avoid - -```bash -# Too vague -❌ memory store "Fixed Docker issue" -✅ memory store "Docker: Use 'docker system prune -a' to reclaim space. Removes unused images, containers, networks." - -# Widely known -❌ memory store "Git is a version control system" -✅ memory store "Git worktree: 'git worktree add -b feature ../feature' creates parallel working dir without cloning." - -# Sensitive data -❌ memory store "DB password: hunter2" -✅ memory store "Production DB credentials stored in 1Password vault 'Infrastructure'" - -# Multiple unrelated facts -❌ memory store "Docker uses namespaces. K8s has pods. Nginx is fast." -✅ memory store "Docker container isolation uses Linux namespaces: PID, NET, MNT, UTS, IPC." -``` - -## Auto-Extraction: *Remember* Pattern - -When agents output text containing `*Remember*: [fact]`, automatically extract and store: - -```javascript -function extractRememberPatterns(text, context = {}) { - const rememberRegex = /\*Remember\*:?\s+(.+?)(?=\n\n|\*Remember\*|$)/gis; - const matches = [...text.matchAll(rememberRegex)]; - - return matches.map(match => { - const content = match[1].trim(); - const tags = autoExtractTags(content, context); - const expires = autoExtractExpiration(content); - - return { - content, - tags, - expires, - entered_by: context.agentName || 'auto-extract' - }; - }); -} - -function autoExtractTags(content, context) { - const tags = new Set(); - - // Technology patterns - const techPatterns = { - 'docker': /docker|container|compose/i, - 'kubernetes': /k8s|kubernetes|kubectl/i, - 'git': /\bgit\b|github|gitlab/i, - 'nodejs': /node\.?js|npm|yarn/i, - 'postgresql': /postgres|postgresql/i, - 'nixos': /nix|nixos|flake/i - }; - - for (const [tag, pattern] of Object.entries(techPatterns)) { - if (pattern.test(content)) tags.add(tag); - } - - // Category patterns - if (/error|bug|fix/i.test(content)) tags.add('troubleshooting'); - if (/performance|optimize/i.test(content)) tags.add('performance'); - if (/security|vulnerability/i.test(content)) tags.add('security'); - - return Array.from(tags); -} - -function autoExtractExpiration(content) { - const patterns = [ - { re: /valid (through|until) (\w+ \d{4})/i, parse: m => new Date(m[2]) }, - { re: /expires? (on )?([\d-]+)/i, parse: m => new Date(m[2]) }, - { re: /temporary|temp/i, parse: () => addDays(new Date(), 90) }, - { re: /Q([1-4]) (\d{4})/i, parse: m => quarterEnd(m[1], m[2]) } - ]; - - for (const { re, parse } of patterns) { - const match = content.match(re); - if (match) { - try { - return parse(match).toISOString(); - } catch {} - } - } - - return null; -} -``` - -## Migration Strategy - -### Phase 1 → Phase 2 (LIKE → FTS5) - -```javascript -async function migrateToFTS5(db) { - console.log('Migrating to FTS5...'); - - // Create FTS5 table - db.exec(` - CREATE VIRTUAL TABLE memories_fts USING fts5( - content, - content='memories', - content_rowid='id', - tokenize='porter unicode61 remove_diacritics 2' - ); - `); - - // Populate from existing data - db.exec(` - INSERT INTO memories_fts(rowid, content) - SELECT id, content FROM memories; - `); - - // Create triggers - db.exec(` - CREATE TRIGGER memories_ai AFTER INSERT ON memories BEGIN - INSERT INTO memories_fts(rowid, content) VALUES (new.id, new.content); - END; - - CREATE TRIGGER memories_ad AFTER DELETE ON memories BEGIN - DELETE FROM memories_fts WHERE rowid = old.id; - END; - - CREATE TRIGGER memories_au AFTER UPDATE ON memories BEGIN - DELETE FROM memories_fts WHERE rowid = old.id; - INSERT INTO memories_fts(rowid, content) VALUES (new.id, new.content); - END; - `); - - // Update schema version - db.prepare('UPDATE metadata SET value = ? WHERE key = ?').run('2', 'schema_version'); - - console.log('FTS5 migration complete!'); -} -``` - -### Phase 2 → Phase 3 (Add Trigrams) - -```javascript -async function migrateToTrigrams(db) { - console.log('Adding trigram support...'); - - // Create trigrams table - db.exec(` - CREATE TABLE trigrams ( - trigram TEXT NOT NULL, - memory_id INTEGER NOT NULL, - position INTEGER NOT NULL, - FOREIGN KEY (memory_id) REFERENCES memories(id) ON DELETE CASCADE - ); - - CREATE INDEX idx_trigrams_trigram ON trigrams(trigram); - CREATE INDEX idx_trigrams_memory ON trigrams(memory_id); - `); - - // Populate from existing memories - const memories = db.prepare('SELECT id, content FROM memories').all(); - const insertTrigram = db.prepare('INSERT INTO trigrams (trigram, memory_id, position) VALUES (?, ?, ?)'); - - const insertMany = db.transaction((memories) => { - for (const memory of memories) { - const trigrams = extractTrigrams(memory.content); - trigrams.forEach((trigram, position) => { - insertTrigram.run(trigram, memory.id, position); - }); - } - }); - - insertMany(memories); - - // Update schema version - db.prepare('UPDATE metadata SET value = ? WHERE key = ?').run('3', 'schema_version'); - - console.log('Trigram migration complete!'); -} -``` - -## Performance Targets - -### Latency -- Phase 1 (LIKE): <50ms for <500 memories -- Phase 2 (FTS5): <100ms for 10K memories -- Phase 3 (Fuzzy): <200ms for 10K memories with fuzzy - -### Storage -- Base: ~500 bytes per memory (average) -- FTS5 index: +30% overhead (~150 bytes) -- Trigrams: +200% overhead (~1KB) - prune common trigrams - -### Scalability -- Phase 1: Up to 500 memories -- Phase 2: Up to 50K memories -- Phase 3: Up to 100K+ memories - -## Testing Strategy - -### Unit Tests -- Search algorithms (LIKE, FTS5, fuzzy) -- Trigram extraction -- Levenshtein distance -- Tag filtering -- Date parsing -- Relevance scoring - -### Integration Tests -- Store → retrieve flow -- Search with various filters -- Expiration pruning -- Export/import -- Migration Phase 1→2→3 - -### Performance Tests -- Benchmark with 1K, 10K, 100K memories -- Query latency measurement -- Index size monitoring -- Memory usage profiling - -## OpenCode Integration - -### Plugin Structure - -```javascript -// plugin.js - OpenCode plugin entry point -export default { - name: 'llmemory', - version: '1.0.0', - description: 'Persistent memory system for AI agents', - - commands: { - 'memory': './src/cli.js' - }, - - api: { - store: async (content, options) => { - const { storeMemory } = await import('./src/db/queries.js'); - return storeMemory(content, options); - }, - - search: async (query, options) => { - const { search } = await import('./src/search/index.js'); - return search(query, options); - }, - - extractRemember: async (text, context) => { - const { extractRememberPatterns } = await import('./src/extractors/remember.js'); - return extractRememberPatterns(text, context); - } - }, - - onInstall: async () => { - const { initDatabase } = await import('./src/db/connection.js'); - await initDatabase(); - console.log('LLMemory installed! Try: memory --agent-context'); - } -}; -``` - -### Usage from Other Plugins - -```javascript -import llmemory from '@opencode/llmemory'; - -// Store a memory -await llmemory.api.store( - 'NixOS: flake.lock must be committed for reproducible builds', - { tags: ['nixos', 'build-system'], entered_by: 'investigate-agent' } -); - -// Search -const results = await llmemory.api.search('nixos builds', { - tags: ['nixos'], - limit: 5 -}); - -// Auto-extract from agent output -const memories = await llmemory.api.extractRemember(agentOutput, { - agentName: 'optimize-agent', - currentTask: 'performance-tuning' -}); -``` - -## Next Steps - -1. ✅ Create project directory and documentation -2. **Implement MVP (Phase 1)**: Basic CLI, LIKE search, core commands -3. **Test with real usage**: Validate concept, collect metrics -4. **Migrate to FTS5 (Phase 2)**: When dataset > 500 or latency issues -5. **Add fuzzy layer (Phase 3)**: For production-quality search -6. **OpenCode integration**: Plugin API and auto-extraction -7. **Documentation**: Complete agent guide, CLI reference, API docs - -## Success Metrics - -- **Usability**: Agents can store/retrieve memories intuitively -- **Quality**: Search returns relevant results, not noise -- **Performance**: Queries complete in <100ms for typical datasets -- **Adoption**: Agents use memory system regularly in workflows -- **Token Efficiency**: Results are high-quality, limited in quantity diff --git a/shared/linked-dotfiles/opencode/llmemory/STATUS.md b/shared/linked-dotfiles/opencode/llmemory/STATUS.md deleted file mode 100644 index 764c4ad..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/STATUS.md +++ /dev/null @@ -1,186 +0,0 @@ -# LLMemory Project Status - -**Created:** 2025-10-29 -**Phase:** 0 Complete (Planning & Documentation) -**Next Phase:** Phase 1 - MVP Implementation - -## ✅ What's Complete - -### Documentation (7 files) -- ✅ **README.md** - Project overview, quick start, features -- ✅ **SPECIFICATION.md** - Complete technical specification (20+ pages) -- ✅ **IMPLEMENTATION_PLAN.md** - Step-by-step implementation guide with checkboxes -- ✅ **docs/ARCHITECTURE.md** - System design, algorithms, data flows -- ✅ **PROTOTYPE.md** - CLI validation results -- ✅ **NEXT_SESSION.md** - Quick start guide for next developer -- ✅ **STATUS.md** - This file - -### Code Structure (3 files) -- ✅ **package.json** - Dependencies configured -- ✅ **bin/memory** - Executable wrapper with error handling -- ✅ **src/cli.js** - CLI prototype with all command structures - -### Configuration -- ✅ **.gitignore** - Standard Node.js patterns -- ✅ Directory structure created - -## 📊 Project Statistics - -- **Documentation:** ~15,000 words across 7 files -- **Planning Time:** 2 investigate agents (comprehensive analysis) -- **Code Lines:** ~150 (prototype only) -- **Dependencies:** 4 core + 5 dev + 5 optional - -## 📁 File Structure - -``` -llmemory/ -├── README.md # Project overview -├── SPECIFICATION.md # Technical spec (20+ pages) -├── IMPLEMENTATION_PLAN.md # Step-by-step guide -├── NEXT_SESSION.md # Quick start for next dev -├── PROTOTYPE.md # CLI validation -├── STATUS.md # This file -├── package.json # Dependencies -├── .gitignore # Git ignore patterns -├── bin/ -│ └── memory # Executable wrapper -├── src/ -│ └── cli.js # CLI prototype -└── docs/ - └── ARCHITECTURE.md # System design -``` - -## 🎯 Next Steps - -**Immediate:** Install dependencies and start Phase 1 -**Location:** See IMPLEMENTATION_PLAN.md - Phase 1, Step 1.2 - -```bash -cd llmemory -npm install # Install dependencies -node src/cli.js --help # Test prototype (will work after npm install) -``` - -**Then:** Implement database layer (Step 1.2) -- Create src/db/connection.js -- Create src/db/schema.js -- Create src/db/queries.js - -## 📚 Key Documents - -**For Overview:** -- Start with README.md - -**For Implementation:** -1. SPECIFICATION.md - What to build -2. IMPLEMENTATION_PLAN.md - How to build it (with checkboxes!) -3. ARCHITECTURE.md - Why it's designed this way - -**For Quick Start:** -- NEXT_SESSION.md - Everything you need to continue - -## 🧪 Testing Commands - -```bash -# After npm install, these should work: -node src/cli.js --help -node src/cli.js store "test" --tags demo -node src/cli.js search "test" -node src/cli.js --agent-context -``` - -Currently shows placeholder output. Full implementation in Phase 1. - -## 💡 Design Highlights - -**Three-Phase Approach:** -1. Phase 1: MVP with LIKE search (<500 memories, <50ms) -2. Phase 2: FTS5 upgrade (10K memories, <100ms) -3. Phase 3: Fuzzy matching (100K+ memories, <200ms) - -**Key Technologies:** -- SQLite with better-sqlite3 -- Commander.js for CLI -- FTS5 for full-text search -- Trigram indexing for fuzzy matching - -**Architecture:** -- CLI Layer (Commander.js) -- Search Layer (LIKE → FTS5 → Fuzzy) -- Storage Layer (SQLite) - -## 🎓 Learning Resources - -Included in documentation: -- SQLite FTS5 algorithm explanation -- BM25 relevance ranking formula -- Levenshtein edit distance implementation -- Trigram similarity calculation -- Memory format best practices - -## 🚀 Timeline Estimate - -- Phase 1 (MVP): 12-15 hours -- Phase 2 (FTS5): 8-10 hours -- Phase 3 (Fuzzy): 8-10 hours -- **Total: 28-35 hours to full implementation** - -## ✨ Project Quality - -**Documentation Quality:** ⭐⭐⭐⭐⭐ -- Comprehensive technical specifications -- Step-by-step implementation guide -- Algorithm pseudo-code included -- Examples and anti-patterns documented - -**Code Quality:** N/A (not yet implemented) -- Prototype validates CLI design -- Ready for TDD implementation - -**Architecture Quality:** ⭐⭐⭐⭐⭐ -- Phased approach (MVP → production) -- Clear migration triggers -- Performance targets defined -- Scalability considerations - -## 🔍 Notable Features - -**Agent-Centric Design:** -- Grep-like query syntax (familiar to AI agents) -- `--agent-context` flag with comprehensive guide -- Auto-extraction of `*Remember*` patterns -- Token-efficient search results - -**Production-Ready Architecture:** -- Three search strategies (LIKE, FTS5, fuzzy) -- Intelligent cascading (exact → fuzzy) -- Relevance ranking (BM25 + edit distance + recency) -- Expiration handling -- Migration strategy - -## 📝 Notes for Implementation - -**Start Here:** -1. Read NEXT_SESSION.md (15 min) -2. Review SPECIFICATION.md (30 min) -3. Follow IMPLEMENTATION_PLAN.md Step 1.2 (database layer) - -**Testing Strategy:** -- Write tests first (TDD) -- Use :memory: database for unit tests -- Integration tests with temporary file -- Performance benchmarks after each phase - -**Commit Strategy:** -- Update checkboxes in IMPLEMENTATION_PLAN.md -- Clear commit messages (feat/fix/test/docs) -- Reference implementation plan steps - ---- - -**Status:** Phase 0 Complete ✅ -**Ready for:** Phase 1 Implementation -**Estimated Completion:** 12-15 hours of focused work - -See NEXT_SESSION.md to begin! 🚀 diff --git a/shared/linked-dotfiles/opencode/llmemory/bin/llmemory b/shared/linked-dotfiles/opencode/llmemory/bin/llmemory deleted file mode 100755 index 5c840dc..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/bin/llmemory +++ /dev/null @@ -1,2 +0,0 @@ -#!/usr/bin/env node -import '../src/cli.js'; diff --git a/shared/linked-dotfiles/opencode/llmemory/docs/ARCHITECTURE.md b/shared/linked-dotfiles/opencode/llmemory/docs/ARCHITECTURE.md deleted file mode 100644 index 3992418..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/docs/ARCHITECTURE.md +++ /dev/null @@ -1,826 +0,0 @@ -# LLMemory Architecture - -## System Overview - -LLMemory is a three-layer system: -1. **CLI Layer** - User/agent interface (Commander.js) -2. **Search Layer** - Query processing and ranking (LIKE → FTS5 → Fuzzy) -3. **Storage Layer** - Persistent data (SQLite) - -``` -┌─────────────────────────────────────┐ -│ CLI Layer │ -│ (memory store/search/list/prune) │ -└──────────────┬──────────────────────┘ - │ -┌──────────────▼──────────────────────┐ -│ Search Layer │ -│ Phase 1: LIKE search │ -│ Phase 2: FTS5 + BM25 ranking │ -│ Phase 3: + Trigram fuzzy matching │ -└──────────────┬──────────────────────┘ - │ -┌──────────────▼──────────────────────┐ -│ Storage Layer │ -│ SQLite Database │ -│ - memories (content, metadata) │ -│ - tags (normalized) │ -│ - memory_tags (many-to-many) │ -│ - memories_fts (FTS5 virtual) │ -│ - trigrams (fuzzy index) │ -└─────────────────────────────────────┘ -``` - -## Data Model - -### Phase 1 Schema (MVP) - -``` -┌─────────────────┐ -│ memories │ -├─────────────────┤ -│ id │ PK -│ content │ TEXT (max 10KB) -│ created_at │ INTEGER (Unix timestamp) -│ entered_by │ TEXT (agent name) -│ expires_at │ INTEGER (nullable) -└─────────────────┘ - │ - │ 1:N - ▼ -┌─────────────────┐ ┌─────────────────┐ -│ memory_tags │ N:M │ tags │ -├─────────────────┤ ├─────────────────┤ -│ memory_id │ FK ───│ id │ PK -│ tag_id │ FK │ name │ TEXT (unique, NOCASE) -└─────────────────┘ │ created_at │ INTEGER - └─────────────────┘ -``` - -### Phase 2 Schema (+ FTS5) - -Adds virtual table for full-text search: - -``` -┌─────────────────────┐ -│ memories_fts │ Virtual Table (FTS5) -├─────────────────────┤ -│ rowid → memories.id │ -│ content (indexed) │ -└─────────────────────┘ - │ - │ Synced via triggers - ▼ -┌─────────────────┐ -│ memories │ -└─────────────────┘ -``` - -**Triggers:** -- `memories_ai`: INSERT into memories → INSERT into memories_fts -- `memories_au`: UPDATE memories → UPDATE memories_fts -- `memories_ad`: DELETE memories → DELETE from memories_fts - -### Phase 3 Schema (+ Trigrams) - -Adds trigram index for fuzzy matching: - -``` -┌─────────────────┐ -│ trigrams │ -├─────────────────┤ -│ trigram │ TEXT (3 chars) -│ memory_id │ FK → memories.id -│ position │ INTEGER (for proximity) -└─────────────────┘ - │ - │ Generated on insert/update - ▼ -┌─────────────────┐ -│ memories │ -└─────────────────┘ -``` - -## Search Algorithm Evolution - -### Phase 1: LIKE Search - -**Algorithm:** -```python -function search_like(query, filters): - # Case-insensitive wildcard matching - sql = "SELECT * FROM memories WHERE LOWER(content) LIKE LOWER('%' || ? || '%')" - - # Apply filters - if filters.tags: - sql += " AND memory_id IN (SELECT memory_id FROM memory_tags WHERE tag_id IN (...))" - - if filters.after: - sql += " AND created_at >= ?" - - # Exclude expired - sql += " AND (expires_at IS NULL OR expires_at > now())" - - # Order by recency - sql += " ORDER BY created_at DESC LIMIT ?" - - return execute(sql, params) -``` - -**Strengths:** -- Simple, fast for small datasets -- No dependencies -- Predictable behavior - -**Weaknesses:** -- No relevance ranking -- Slow for large datasets (full table scan) -- No fuzzy matching -- No phrase queries or boolean logic - -**Performance:** O(n) where n = number of memories -**Target:** <50ms for <500 memories - ---- - -### Phase 2: FTS5 Search - -**Algorithm:** -```python -function search_fts5(query, filters): - # Build FTS5 query - fts_query = build_fts5_query(query) # Handles AND/OR/NOT, quotes, prefixes - - # FTS5 MATCH with BM25 ranking - sql = """ - SELECT m.*, mf.rank as relevance - FROM memories_fts mf - JOIN memories m ON m.id = mf.rowid - WHERE memories_fts MATCH ? - AND (m.expires_at IS NULL OR m.expires_at > now()) - """ - - # Apply filters (same as Phase 1) - # ... - - # Order by FTS5 rank (BM25 algorithm) - sql += " ORDER BY mf.rank LIMIT ?" - - return execute(sql, params) - -function build_fts5_query(query): - # Transform grep-like to FTS5 - # "docker compose" → "docker AND compose" - # "docker OR podman" → "docker OR podman" (unchanged) - # '"exact phrase"' → '"exact phrase"' (unchanged) - # "docker*" → "docker*" (unchanged) - - if has_operators(query): - return query - - # Implicit AND - terms = query.split() - return " AND ".join(terms) -``` - -**FTS5 Tokenization:** -- **Tokenizer:** `porter unicode61 remove_diacritics 2` -- **Porter:** Stemming (running → run, databases → database) -- **unicode61:** Unicode support -- **remove_diacritics:** Normalize accented characters (café → cafe) - -**BM25 Ranking:** -``` -score = Σ(IDF(term) * (f(term) * (k1 + 1)) / (f(term) + k1 * (1 - b + b * |D| / avgdl))) - -Where: -- IDF(term) = Inverse Document Frequency (rarer terms score higher) -- f(term) = Term frequency in document -- |D| = Document length -- avgdl = Average document length -- k1 = 1.2 (term frequency saturation) -- b = 0.75 (length normalization) -``` - -**Strengths:** -- Fast search with inverted index -- Relevance ranking (BM25) -- Boolean operators, phrase queries, prefix matching -- Scales to 100K+ documents - -**Weaknesses:** -- No fuzzy matching (typo tolerance) -- FTS5 index overhead (~30% storage) -- More complex setup (triggers needed) - -**Performance:** O(log n) for index lookup -**Target:** <100ms for 10K memories - ---- - -### Phase 3: Fuzzy Search - -**Algorithm:** -```python -function search_fuzzy(query, filters): - # Step 1: Try FTS5 exact match - results = search_fts5(query, filters) - - # Step 2: If too few results, try fuzzy - if len(results) < 5 and filters.fuzzy != false: - fuzzy_results = search_trigram(query, filters) - results = merge_dedup(results, fuzzy_results) - - # Step 3: Re-rank by combined score - for result in results: - result.score = calculate_combined_score(result, query) - - results.sort(by=lambda r: r.score, reverse=True) - return results[:filters.limit] - -function search_trigram(query, threshold=0.7, limit=10): - # Extract query trigrams - query_trigrams = extract_trigrams(query) # ["doc", "ock", "cke", "ker"] - - # Find candidates by trigram overlap - sql = """ - SELECT m.id, m.content, COUNT(DISTINCT tr.trigram) as matches - FROM memories m - JOIN trigrams tr ON tr.memory_id = m.id - WHERE tr.trigram IN (?, ?, ?, ...) - AND (m.expires_at IS NULL OR m.expires_at > now()) - GROUP BY m.id - HAVING matches >= ? - ORDER BY matches DESC - LIMIT ? - """ - - min_matches = ceil(len(query_trigrams) * threshold) - candidates = execute(sql, query_trigrams, min_matches, limit * 2) - - # Calculate edit distance and combined score - scored = [] - for candidate in candidates: - edit_dist = levenshtein(query, candidate.content[:len(query)*3]) - trigram_sim = candidate.matches / len(query_trigrams) - normalized_edit = 1 - (edit_dist / max(len(query), len(candidate.content))) - - score = 0.6 * trigram_sim + 0.4 * normalized_edit - - if score >= threshold: - scored.append((candidate, score)) - - scored.sort(by=lambda x: x[1], reverse=True) - return [c for c, s in scored[:limit]] - -function extract_trigrams(text): - # Normalize: lowercase, remove punctuation, collapse whitespace - normalized = text.lower().replace(/[^\w\s]/g, ' ').replace(/\s+/g, ' ').trim() - - # Add padding for boundary matching - padded = " " + normalized + " " - - # Sliding window of 3 characters - trigrams = [] - for i in range(len(padded) - 2): - trigram = padded[i:i+3] - if trigram.strip().len() == 3: # Skip whitespace-only - trigrams.append(trigram) - - return unique(trigrams) - -function levenshtein(a, b): - # Wagner-Fischer algorithm with single-row optimization - if len(a) == 0: return len(b) - if len(b) == 0: return len(a) - - prev_row = [0..len(b)] - - for i in range(len(a)): - cur_row = [i + 1] - for j in range(len(b)): - cost = 0 if a[i] == b[j] else 1 - cur_row.append(min( - cur_row[j] + 1, # deletion - prev_row[j + 1] + 1, # insertion - prev_row[j] + cost # substitution - )) - prev_row = cur_row - - return prev_row[len(b)] - -function calculate_combined_score(result, query): - # BM25 from FTS5 (if available) - bm25_score = result.fts_rank if result.has_fts_rank else 0 - - # Trigram similarity - trigram_score = result.trigram_matches / len(extract_trigrams(query)) - - # Edit distance (normalized) - edit_dist = levenshtein(query, result.content[:len(query)*3]) - edit_score = 1 - (edit_dist / max(len(query), len(result.content))) - - # Recency boost (exponential decay over 90 days) - days_ago = (now() - result.created_at) / 86400 - recency_score = max(0, 1 - (days_ago / 90)) - - # Weighted combination - score = (0.4 * bm25_score + - 0.3 * trigram_score + - 0.2 * edit_score + - 0.1 * recency_score) - - return score -``` - -**Trigram Similarity (Jaccard Index):** -``` -similarity = |trigrams(query) ∩ trigrams(document)| / |trigrams(query)| - -Example: -query = "docker" → trigrams: ["doc", "ock", "cke", "ker"] -document = "dcoker" → trigrams: ["dco", "cok", "oke", "ker"] - -intersection = ["ker"] → count = 1 -similarity = 1 / 4 = 0.25 (below threshold, but edit distance is 2) - -Better approach: Edit distance normalized by length -edit_distance("docker", "dcoker") = 2 -normalized = 1 - (2 / 6) = 0.67 (above threshold 0.6) -``` - -**Strengths:** -- Handles typos (edit distance ≤2) -- Partial matches ("docker" finds "dockerization") -- Cascading strategy (fast exact, fallback to fuzzy) -- Configurable threshold - -**Weaknesses:** -- Trigram table is large (~3x content size) -- Slower than FTS5 alone -- Tuning threshold requires experimentation - -**Performance:** O(log n) + O(m) where m = trigram candidates -**Target:** <200ms for 10K memories with fuzzy - ---- - -## Memory Lifecycle - -``` -┌──────────┐ -│ Store │ -└────┬─────┘ - │ - ▼ -┌────────────────────┐ -│ Validate: │ -│ - Length (<10KB) │ -│ - Tags (parse) │ -│ - Expiration │ -└────┬───────────────┘ - │ - ▼ -┌────────────────────┐ ┌─────────────────┐ -│ Insert: │────▶│ Trigger: │ -│ - memories table │ │ - Insert FTS5 │ -│ - Link tags │ │ - Gen trigrams │ -└────┬───────────────┘ └─────────────────┘ - │ - ▼ -┌────────────────────┐ -│ Searchable │ -└────────────────────┘ - │ - │ (time passes) - ▼ -┌────────────────────┐ -│ Expired? │───No──▶ Continue -└────┬───────────────┘ - │ Yes - ▼ -┌────────────────────┐ -│ Prune Command │ -│ (manual/auto) │ -└────┬───────────────┘ - │ - ▼ -┌────────────────────┐ ┌─────────────────┐ -│ Delete: │────▶│ Trigger: │ -│ - memories table │ │ - Delete FTS5 │ -│ - CASCADE tags │ │ - Delete tris │ -└────────────────────┘ └─────────────────┘ -``` - -## Query Processing Flow - -### Phase 1 (LIKE) -``` -User Query: "docker networking" - │ - ▼ -Parse Query: Extract terms, filters - │ - ▼ -Build SQL: LIKE '%docker%' AND LIKE '%networking%' - │ - ▼ -Apply Filters: Tags, dates, agent - │ - ▼ -Execute: Sequential scan through memories - │ - ▼ -Order: By created_at DESC - │ - ▼ -Limit: Take top N results - │ - ▼ -Format: Plain text / JSON / Markdown -``` - -### Phase 2 (FTS5) -``` -User Query: "docker AND networking" - │ - ▼ -Parse Query: Identify operators, quotes, prefixes - │ - ▼ -Build FTS5 Query: "docker AND networking" (already valid) - │ - ▼ -FTS5 MATCH: Inverted index lookup - │ - ▼ -BM25 Ranking: Calculate relevance scores - │ - ▼ -Apply Filters: Tags, dates, agent (on results) - │ - ▼ -Order: By rank (BM25 score) - │ - ▼ -Limit: Take top N results - │ - ▼ -Format: With relevance scores -``` - -### Phase 3 (Fuzzy) -``` -User Query: "dokcer networking" - │ - ▼ -Try FTS5: "dokcer AND networking" - │ - ▼ -Results: 0 (no exact match) - │ - ▼ -Trigger Fuzzy: Extract trigrams - │ - ├─▶ "dokcer" → ["dok", "okc", "kce", "cer"] - └─▶ "networking" → ["net", "etw", "two", ...] - │ - ▼ -Find Candidates: Query trigrams table - │ - ▼ -Calculate Similarity: Trigram overlap + edit distance - │ - ├─▶ "docker" → similarity = 0.85 (good match) - └─▶ "networking" → similarity = 1.0 (exact) - │ - ▼ -Filter: Threshold ≥ 0.7 - │ - ▼ -Re-rank: Combined score (trigram + edit + recency) - │ - ▼ -Merge: With FTS5 results (dedup by ID) - │ - ▼ -Limit: Take top N results - │ - ▼ -Format: With relevance scores -``` - -## Indexing Strategy - -### Phase 1 Indexes -```sql --- Recency queries (ORDER BY created_at DESC) -CREATE INDEX idx_memories_created ON memories(created_at DESC); - --- Expiration filtering (WHERE expires_at > now()) -CREATE INDEX idx_memories_expires ON memories(expires_at) - WHERE expires_at IS NOT NULL; - --- Tag lookups (JOIN on tag_id) -CREATE INDEX idx_tags_name ON tags(name); - --- Tag filtering (JOIN memory_tags on memory_id) -CREATE INDEX idx_memory_tags_tag ON memory_tags(tag_id); -``` - -**Query plans:** -```sql --- Search query uses indexes: -EXPLAIN QUERY PLAN -SELECT * FROM memories WHERE created_at > ? ORDER BY created_at DESC; --- Result: SEARCH memories USING INDEX idx_memories_created - -EXPLAIN QUERY PLAN -SELECT * FROM memories WHERE expires_at > strftime('%s', 'now'); --- Result: SEARCH memories USING INDEX idx_memories_expires -``` - -### Phase 2 Indexes (+ FTS5) -```sql --- FTS5 creates inverted index automatically -CREATE VIRTUAL TABLE memories_fts USING fts5(content, ...); --- Generates internal tables: memories_fts_data, memories_fts_idx, memories_fts_config -``` - -**FTS5 Index Structure:** -``` -Term → Document Postings List - -"docker" → [1, 5, 12, 34, 56, ...] -"compose" → [5, 12, 89, ...] -"networking" → [5, 34, 67, ...] - -Query "docker AND compose" → intersection([1,5,12,34,56], [5,12,89]) = [5, 12] -``` - -### Phase 3 Indexes (+ Trigrams) -```sql --- Trigram lookups (WHERE trigram IN (...)) -CREATE INDEX idx_trigrams_trigram ON trigrams(trigram); - --- Cleanup on memory deletion (CASCADE via memory_id) -CREATE INDEX idx_trigrams_memory ON trigrams(memory_id); -``` - -**Trigram Index Structure:** -``` -Trigram → Memory IDs - -"doc" → [1, 5, 12, 34, ...] (all memories with "doc") -"ock" → [1, 5, 12, 34, ...] (all memories with "ock") -"cke" → [1, 5, 12, ...] (all memories with "cke") - -Query "docker" trigrams ["doc", "ock", "cke", "ker"] -→ Find intersection: memories with all 4 trigrams (or ≥ threshold) -``` - -## Performance Optimization - -### Database Configuration -```sql --- WAL mode for better concurrency -PRAGMA journal_mode = WAL; - --- Memory-mapped I/O for faster reads -PRAGMA mmap_size = 268435456; -- 256MB - --- Larger cache for better performance -PRAGMA cache_size = -64000; -- 64MB (negative = KB) - --- Synchronous writes (balance between speed and durability) -PRAGMA synchronous = NORMAL; -- Not FULL (too slow), not OFF (unsafe) - --- Auto-vacuum to prevent bloat -PRAGMA auto_vacuum = INCREMENTAL; -``` - -### Query Optimization -```javascript -// Use prepared statements (compiled once, executed many times) -const searchStmt = db.prepare(` - SELECT * FROM memories - WHERE LOWER(content) LIKE LOWER(?) - ORDER BY created_at DESC - LIMIT ? -`); - -// Transaction for bulk inserts -const insertMany = db.transaction((memories) => { - for (const memory of memories) { - insertStmt.run(memory); - } -}); -``` - -### Trigram Pruning -```javascript -// Prune common trigrams (low information value) -// E.g., "the", "and", "ing" appear in most memories -const pruneCommonTrigrams = db.prepare(` - DELETE FROM trigrams - WHERE trigram IN ( - SELECT trigram FROM trigrams - GROUP BY trigram - HAVING COUNT(*) > (SELECT COUNT(*) * 0.5 FROM memories) - ) -`); - -// Run after bulk imports -pruneCommonTrigrams.run(); -``` - -### Result Caching -```javascript -// LRU cache for frequent queries -const LRU = require('lru-cache'); -const queryCache = new LRU({ - max: 100, // Cache 100 queries - ttl: 1000 * 60 * 5 // 5 minute TTL -}); - -function search(query, filters) { - const cacheKey = JSON.stringify({ query, filters }); - - if (queryCache.has(cacheKey)) { - return queryCache.get(cacheKey); - } - - const results = executeSearch(query, filters); - queryCache.set(cacheKey, results); - return results; -} -``` - -## Error Handling - -### Database Errors -```javascript -try { - db.prepare(sql).run(params); -} catch (error) { - if (error.code === 'SQLITE_BUSY') { - // Retry after backoff - await sleep(100); - return retry(operation, maxRetries - 1); - } - - if (error.code === 'SQLITE_CONSTRAINT') { - // Validation error (content too long, duplicate tag, etc.) - throw new ValidationError(error.message); - } - - if (error.code === 'SQLITE_CORRUPT') { - // Database corruption - suggest recovery - throw new DatabaseCorruptError('Database corrupted, run: memory recover'); - } - - // Unknown error - throw error; -} -``` - -### Migration Errors -```javascript -async function migrate(targetVersion) { - const currentVersion = getCurrentSchemaVersion(); - - // Backup before migration - await backupDatabase(`backup-v${currentVersion}.db`); - - try { - db.exec('BEGIN TRANSACTION'); - - // Run migrations - for (let v = currentVersion + 1; v <= targetVersion; v++) { - await runMigration(v); - } - - db.exec('COMMIT'); - console.log(`Migrated to version ${targetVersion}`); - } catch (error) { - db.exec('ROLLBACK'); - console.error('Migration failed, rolling back'); - - // Restore backup - await restoreDatabase(`backup-v${currentVersion}.db`); - throw error; - } -} -``` - -## Security Considerations - -### Input Validation -```javascript -// Prevent SQL injection (prepared statements) -const stmt = db.prepare('SELECT * FROM memories WHERE content LIKE ?'); -stmt.all(`%${userInput}%`); // Safe: userInput is parameterized - -// Validate content length -if (content.length > 10000) { - throw new ValidationError('Content exceeds 10KB limit'); -} - -// Sanitize tags (only alphanumeric, hyphens, underscores) -const sanitizeTag = (tag) => tag.replace(/[^a-z0-9\-_]/gi, ''); -``` - -### Sensitive Data Protection -```javascript -// Warn if sensitive patterns detected -const sensitivePatterns = [ - /password\s*[:=]\s*\S+/i, - /api[_-]?key\s*[:=]\s*\S+/i, - /token\s*[:=]\s*\S+/i, - /secret\s*[:=]\s*\S+/i -]; - -function checkSensitiveData(content) { - for (const pattern of sensitivePatterns) { - if (pattern.test(content)) { - console.warn('⚠️ Warning: Potential sensitive data detected'); - console.warn('Consider storing credentials in a secure vault instead'); - return true; - } - } - return false; -} -``` - -### File Permissions -```bash -# Database file should be user-readable only -chmod 600 ~/.config/opencode/memories.db - -# Backup files should have same permissions -chmod 600 ~/.config/opencode/memories-backup-*.db -``` - -## Scalability Limits - -### Phase 1 (LIKE) -- **Max memories:** ~500 (performance degrades beyond) -- **Query latency:** O(n) - linear scan -- **Storage:** ~250KB for 500 memories - -### Phase 2 (FTS5) -- **Max memories:** ~50K (comfortable), 100K+ (possible) -- **Query latency:** O(log n) - index lookup -- **Storage:** +30% for FTS5 index (~325KB for 500 memories) - -### Phase 3 (Fuzzy) -- **Max memories:** 100K+ (with trigram pruning) -- **Query latency:** O(log n) + O(m) where m = fuzzy candidates -- **Storage:** +200% for trigrams (~750KB for 500 memories) - - Mitigated by pruning common trigrams - -### Migration Triggers - -**Phase 1 → Phase 2:** -- Dataset > 500 memories -- Query latency > 500ms -- Manual user request - -**Phase 2 → Phase 3:** -- User reports needing fuzzy search -- High typo rates in queries -- Manual user request - -## Future Enhancements - -### Vector Embeddings (Phase 4?) -- Semantic search ("docker" → "containerization") -- Requires embedding model (~100MB) -- SQLite-VSS extension -- Hybrid: BM25 (lexical) + Cosine similarity (semantic) - -### Automatic Summarization -- LLM-generated summaries for long memories -- Reduces token usage in search results -- Trade-off: API dependency - -### Memory Versioning -- Track edits to memories -- Show history -- Revert to previous version - -### Conflict Detection -- Identify contradictory memories -- Suggest consolidation -- Flag for review - -### Collaborative Features -- Share memories between agents -- Team-wide memory pool -- Privacy controls - ---- - -**Document Version:** 1.0 -**Last Updated:** 2025-10-29 -**Status:** Planning Complete, Implementation Pending diff --git a/shared/linked-dotfiles/opencode/llmemory/docs/PHASE1_COMPLETE.md b/shared/linked-dotfiles/opencode/llmemory/docs/PHASE1_COMPLETE.md deleted file mode 100644 index 8b59a8a..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/docs/PHASE1_COMPLETE.md +++ /dev/null @@ -1,318 +0,0 @@ -# LLMemory MVP Implementation - Complete! 🎉 - -## Status: Phase 1 MVP Complete ✅ - -**Date:** 2025-10-29 -**Test Results:** 39/39 tests passing (100%) -**Implementation Time:** ~2 hours (following TDD approach) - -## What Was Implemented - -### 1. Database Layer ✅ -**Files Created:** -- `src/db/schema.js` - Schema initialization with WAL mode, indexes -- `src/db/connection.js` - Database connection management - -**Features:** -- SQLite with WAL mode for concurrency -- Full schema (memories, tags, memory_tags, metadata) -- Proper indexes on created_at, expires_at, tag_name -- Schema versioning (v1) -- In-memory database helper for testing - -**Tests:** 13/13 passing -- Schema initialization -- Table creation -- Index creation -- Connection management -- WAL mode (with in-memory fallback handling) - -### 2. Store Command ✅ -**Files Created:** -- `src/commands/store.js` - Memory storage with validation -- `src/utils/validation.js` - Content and expiration validation -- `src/utils/tags.js` - Tag parsing, normalization, linking - -**Features:** -- Content validation (<10KB, non-empty) -- Tag parsing (comma-separated, lowercase normalization) -- Expiration date handling (ISO 8601, future dates only) -- Tag deduplication across memories -- Atomic transactions - -**Tests:** 8/8 passing -- Store with tags -- Content validation (10KB limit, empty rejection) -- Tag normalization (lowercase) -- Missing tags handled gracefully -- Expiration parsing -- Tag deduplication - -### 3. Search Command ✅ -**Files Created:** -- `src/commands/search.js` - LIKE-based search with filters - -**Features:** -- Case-insensitive LIKE search -- Tag filtering (AND/OR logic) -- Date range filtering (after/before) -- Agent filtering (entered_by) -- Automatic expiration exclusion -- Limit and offset for pagination -- Tags joined in results - -**Tests:** 9/9 passing -- Content search -- Tag filtering (AND and OR logic) -- Date range filtering -- Agent filtering -- Expired memory exclusion -- Limit enforcement -- Ordering by recency -- Tags in results - -### 4. List & Prune Commands ✅ -**Files Created:** -- `src/commands/list.js` - List recent memories with sorting -- `src/commands/prune.js` - Remove expired memories - -**Features:** -- List with sorting (created, expires, content) -- Tag filtering -- Pagination (limit/offset) -- Dry-run mode for prune -- Delete expired or before date - -**Tests:** 9/9 passing (in integration tests) -- Full workflow (store → search → list → prune) -- Performance (<50ms for 100 memories) -- <1 second to store 100 memories -- Edge cases (empty query, special chars, unicode, long tags) - -## Test Summary - -``` -✓ Database Layer (13 tests) - ✓ Schema Initialization (7 tests) - ✓ Connection Management (6 tests) - -✓ Store Command (8 tests) - ✓ Basic storage with tags - ✓ Validation (10KB limit, empty content, future expiration) - ✓ Tag handling (normalization, deduplication) - -✓ Search Command (9 tests) - ✓ Content search (case-insensitive) - ✓ Filtering (tags AND/OR, dates, agent) - ✓ Automatic expiration exclusion - ✓ Sorting and pagination - -✓ Integration Tests (9 tests) - ✓ Full workflows (store → search → list → prune) - ✓ Performance targets met - ✓ Edge cases handled - -Total: 39/39 tests passing (100%) -Duration: ~100ms -``` - -## Performance Results - -**Phase 1 Targets:** -- ✅ Search 100 memories: <50ms (actual: ~20-30ms) -- ✅ Store 100 memories: <1000ms (actual: ~200-400ms) -- ✅ Database size: Minimal with indexes - -## TDD Approach Validation - -**Workflow:** -1. ✅ Wrote tests first (.todo() → real tests) -2. ✅ Watched tests fail (red) -3. ✅ Implemented features -4. ✅ Watched tests pass (green) -5. ✅ Refactored based on failures - -**Benefits Observed:** -- Caught CHECK constraint issues immediately -- Found validation edge cases early -- Performance testing built-in from start -- Clear success criteria for each feature - -## Known Limitations & Notes - -### WAL Mode in :memory: Databases -- In-memory SQLite returns 'memory' instead of 'wal' for journal_mode -- This is expected behavior and doesn't affect functionality -- File-based databases will correctly use WAL mode - -### Check Constraints -- Schema enforces `expires_at > created_at` -- Tests work around this by setting both timestamps -- Real usage won't hit this (expires always in future) - -## What's NOT Implemented (Future Phases) - -### Phase 2 (FTS5) -- [ ] FTS5 virtual table -- [ ] BM25 relevance ranking -- [ ] Boolean operators (AND/OR/NOT in query syntax) -- [ ] Phrase queries with quotes -- [ ] Migration script - -### Phase 3 (Fuzzy) -- [ ] Trigram indexing -- [ ] Levenshtein distance -- [ ] Intelligent cascade (exact → fuzzy) -- [ ] Combined relevance scoring - -### CLI Integration -- [x] Connect CLI to commands (src/cli.js fully wired) -- [x] Output formatting (plain text, JSON, markdown) -- [x] Colors with chalk -- [x] Global installation (bin/memory shim) -- [x] OpenCode plugin integration (plugin/llmemory.js) - -### Additional Features -- [x] Stats command (with --tags and --agents options) -- [x] Agent context documentation (--agent-context) -- [ ] Export/import commands (Phase 2) -- [ ] Auto-extraction (*Remember* pattern) (Phase 2) - -## Next Steps - -### Immediate (Complete MVP) -1. **Wire up CLI to commands** (Step 1.7) - - Replace placeholder commands with real implementations - - Add output formatting - - Test end-to-end CLI workflow - -2. **Manual Testing** - ```bash - node src/cli.js store "Docker uses bridge networks" --tags docker - node src/cli.js search "docker" - node src/cli.js list --limit 5 - ``` - -### Future Phases -- Phase 2: FTS5 when dataset > 500 memories -- Phase 3: Fuzzy when typo tolerance needed -- OpenCode plugin integration -- Agent documentation - -## File Structure - -``` -llmemory/ -├── src/ -│ ├── cli.js # CLI (placeholder, needs wiring) -│ ├── commands/ -│ │ ├── store.js # ✅ Implemented -│ │ ├── search.js # ✅ Implemented -│ │ ├── list.js # ✅ Implemented -│ │ └── prune.js # ✅ Implemented -│ ├── db/ -│ │ ├── connection.js # ✅ Implemented -│ │ └── schema.js # ✅ Implemented -│ └── utils/ -│ ├── validation.js # ✅ Implemented -│ └── tags.js # ✅ Implemented -├── test/ -│ └── integration.test.js # ✅ 39 tests passing -├── docs/ -│ ├── ARCHITECTURE.md # Complete -│ ├── TESTING.md # Complete -│ └── TDD_SETUP.md # Complete -├── SPECIFICATION.md # Complete -├── IMPLEMENTATION_PLAN.md # Phase 1 ✅ -├── README.md # Complete -└── package.json # Dependencies installed -``` - -## Commands Implemented (Programmatic API) - -```javascript -// Store -import { storeMemory } from './src/commands/store.js'; -const result = storeMemory(db, { - content: 'Docker uses bridge networks', - tags: 'docker,networking', - expires_at: '2026-01-01', - entered_by: 'manual' -}); - -// Search -import { searchMemories } from './src/commands/search.js'; -const results = searchMemories(db, 'docker', { - tags: ['networking'], - limit: 10 -}); - -// List -import { listMemories } from './src/commands/list.js'; -const recent = listMemories(db, { - limit: 20, - sort: 'created', - order: 'desc' -}); - -// Prune -import { pruneMemories } from './src/commands/prune.js'; -const pruned = pruneMemories(db, { dryRun: false }); -``` - -## Success Metrics Met - -**Phase 1 Goals:** -- ✅ Working CLI tool structure -- ✅ Basic search (LIKE-based) -- ✅ Performance: <50ms for 500 memories -- ✅ Test coverage: >80% (100% achieved) -- ✅ All major workflows tested -- ✅ TDD approach validated - -**Code Quality:** -- ✅ Clean separation of concerns -- ✅ Modular design (easy to extend) -- ✅ Comprehensive error handling -- ✅ Well-tested (integration-first) -- ✅ Documentation complete - -## Lessons Learned - -1. **TDD Works Great for Database Code** - - Caught schema issues immediately - - Performance testing built-in - - Clear success criteria - -2. **Integration Tests > Unit Tests** - - 39 integration tests covered everything - - No unit tests needed for simple functions - - Real database testing found real issues - -3. **SQLite CHECK Constraints Are Strict** - - Enforce data integrity at DB level - - Required workarounds in tests - - Good for production reliability - -4. **In-Memory DBs Have Quirks** - - WAL mode returns 'memory' not 'wal' - - Tests adjusted for both cases - - File-based DBs will work correctly - -## Celebration! 🎉 - -**We did it!** Phase 1 MVP is complete with: -- 100% test pass rate (39/39) -- All core features working -- Clean, maintainable code -- Comprehensive documentation -- TDD approach validated - -**Next:** Wire up CLI and we have a working memory system! - ---- - -**Status:** Phase 1 Complete ✅ -**Tests:** 39/39 passing (100%) -**Next Phase:** CLI Integration → Phase 2 (FTS5) -**Time to MVP:** ~2 hours (TDD approach) diff --git a/shared/linked-dotfiles/opencode/llmemory/docs/TDD_SETUP.md b/shared/linked-dotfiles/opencode/llmemory/docs/TDD_SETUP.md deleted file mode 100644 index 4349e49..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/docs/TDD_SETUP.md +++ /dev/null @@ -1,113 +0,0 @@ -# TDD Testing Philosophy - Added to LLMemory - -## What Was Updated - -### 1. Updated IMPLEMENTATION_PLAN.md -- ✅ Rewrote testing strategy section with integration-first philosophy -- ✅ Added TDD workflow to Steps 1.3 (Store) and 1.4 (Search) -- ✅ Each step now has "write test first" as explicit requirement -- ✅ Test code examples included before implementation examples - -### 2. Updated AGENTS.md -- ⚠️ File doesn't exist in opencode root, skipped -- Created TESTING.md instead with full testing guide - -### 3. Created docs/TESTING.md -- ✅ Comprehensive testing philosophy document -- ✅ TDD workflow with detailed examples -- ✅ Integration-first approach explained -- ✅ When to write unit tests (rarely!) -- ✅ Realistic data seeding strategies -- ✅ Watch-driven development workflow -- ✅ Good vs bad test examples - -### 4. Created test/integration.test.js -- ✅ Test structure scaffolded with `.todo()` markers -- ✅ Shows TDD structure before implementation -- ✅ Database layer tests -- ✅ Store command tests -- ✅ Search command tests -- ✅ Performance tests -- ✅ Edge case tests - -### 5. Simplified Dependencies -- ⚠️ Removed `better-sqlite3` temporarily (build issues on NixOS) -- ✅ Installed: commander, chalk, date-fns, vitest -- ✅ Tests run successfully (all `.todo()` so pass by default) - -## Current Status - -**Tests Setup:** ✅ Complete -```bash -npm test # Runs all tests (currently 0 real tests, 30+ .todo()) -npm run test:watch # Watch mode for TDD workflow -``` - -**Next Steps (TDD Approach):** - -1. **Install better-sqlite3** (need native build tools) - ```bash - # On NixOS, may need: nix-shell -p gcc gnumake python3 - npm install better-sqlite3 - ``` - -2. **Write First Real Test** (database schema) - ```javascript - test('creates memories table with correct schema', () => { - const db = new Database(':memory:'); - initSchema(db); - - const tables = db.prepare("SELECT name FROM sqlite_master WHERE type='table'").all(); - expect(tables.map(t => t.name)).toContain('memories'); - }); - ``` - -3. **Watch Test Fail** (`npm run test:watch`) - -4. **Implement** (src/db/schema.js) - -5. **Watch Test Pass** - -6. **Move to Next Test** - -## TDD Philosophy Summary - -**DO:** -- ✅ Write integration tests first -- ✅ Use realistic data (50-100 memories) -- ✅ Test with `:memory:` or temp file database -- ✅ Run in watch mode -- ✅ See test fail → implement → see test pass - -**DON'T:** -- ❌ Write unit tests for simple functions -- ❌ Test implementation details -- ❌ Use toy data (1-2 memories) -- ❌ Mock the database (test the real thing) - -## Build Issue Note - -`better-sqlite3` requires native compilation. On NixOS: -```bash -# Option 1: Use nix-shell -nix-shell -p gcc gnumake python3 -npm install better-sqlite3 - -# Option 2: Use in-memory mock for testing -# Implement with native SQLite later -``` - -This is documented in test/integration.test.js comments. - -## Next Session Reminder - -Start with: `/home/nate/nixos/shared/linked-dotfiles/opencode/llmemory/` - -1. Fix better-sqlite3 installation -2. Remove `.todo()` from first test -3. Watch it fail -4. Implement schema.js -5. Watch it pass -6. Continue with TDD approach - -All tests are scaffolded and ready! diff --git a/shared/linked-dotfiles/opencode/llmemory/docs/TESTING.md b/shared/linked-dotfiles/opencode/llmemory/docs/TESTING.md deleted file mode 100644 index 81e8662..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/docs/TESTING.md +++ /dev/null @@ -1,529 +0,0 @@ -# LLMemory Testing Guide - -## Testing Philosophy: Integration-First TDD - -This project uses **integration-first TDD** - we write integration tests that verify real workflows, not unit tests that verify implementation details. - -## Core Principles - -### 1. Integration Tests Are Primary - -**Why:** -- Tests real behavior users/agents will experience -- Less brittle (survives refactoring) -- Higher confidence in system working correctly -- Catches integration issues early - -**Example:** -```javascript -// GOOD: Integration test -test('store and search workflow', () => { - // Test the actual workflow - storeMemory(db, { content: 'Docker uses bridge networks', tags: 'docker' }); - const results = searchMemories(db, 'docker'); - expect(results[0].content).toContain('Docker'); -}); - -// AVOID: Over-testing implementation details -test('parseContent returns trimmed string', () => { - expect(parseContent(' test ')).toBe('test'); -}); -// ^ This is probably already tested by integration tests -``` - -### 2. Unit Tests Are Rare - -**Only write unit tests for:** -- Complex algorithms (Levenshtein distance, trigram extraction) -- Pure functions with many edge cases -- Critical validation logic - -**Don't write unit tests for:** -- Database queries (test via integration) -- CLI argument parsing (test via integration) -- Simple utilities (tag parsing, date formatting) -- Anything already covered by integration tests - -**Rule of thumb:** Think twice before writing a unit test. Ask: "Is this already tested by my integration tests?" - -### 3. Test With Realistic Data - -**Use real SQLite databases:** -```javascript -beforeEach(() => { - db = new Database(':memory:'); // Fast, isolated - initSchema(db); - - // Seed with realistic data - seedDatabase(db, 50); // 50 realistic memories -}); -``` - -**Generate realistic test data:** -```javascript -// test/helpers/seed.js -export function generateRealisticMemory() { - const templates = [ - { content: 'Docker Compose requires explicit subnet config', tags: ['docker', 'networking'] }, - { content: 'PostgreSQL VACUUM FULL locks tables', tags: ['postgresql', 'performance'] }, - { content: 'Git worktree allows parallel branches', tags: ['git', 'workflow'] }, - // 50+ realistic templates - ]; - return randomChoice(templates); -} -``` - -**Why:** Tests should reflect real usage, not artificial toy data. - -### 4. Watch-Driven Development - -**Workflow:** -```bash -# Terminal 1: Watch mode (always running) -npm run test:watch - -# Terminal 2: Manual testing -node src/cli.js store "test memory" -``` - -**Steps:** -1. Write integration test (red/failing) -2. Watch test fail -3. Implement feature -4. Watch test pass (green) -5. Verify manually with CLI -6. Refine based on output - -## TDD Workflow Example - -### Example: Implementing Store Command - -**Step 1: Write Test First** -```javascript -// test/integration.test.js -describe('Store Command', () => { - let db; - - beforeEach(() => { - db = new Database(':memory:'); - initSchema(db); - }); - - test('stores memory with tags', () => { - const result = storeMemory(db, { - content: 'Docker uses bridge networks', - tags: 'docker,networking' - }); - - expect(result.id).toBeDefined(); - - // Verify in database - const memory = db.prepare('SELECT * FROM memories WHERE id = ?').get(result.id); - expect(memory.content).toBe('Docker uses bridge networks'); - - // Verify tags linked correctly - const tags = db.prepare(` - SELECT t.name FROM tags t - JOIN memory_tags mt ON t.id = mt.tag_id - WHERE mt.memory_id = ? - `).all(result.id); - - expect(tags.map(t => t.name)).toEqual(['docker', 'networking']); - }); - - test('rejects content over 10KB', () => { - expect(() => { - storeMemory(db, { content: 'x'.repeat(10001) }); - }).toThrow('Content exceeds 10KB limit'); - }); -}); -``` - -**Step 2: Run Test (Watch It Fail)** -```bash -$ npm run test:watch - -FAIL test/integration.test.js - Store Command - ✕ stores memory with tags (2 ms) - ✕ rejects content over 10KB (1 ms) - - ● Store Command › stores memory with tags - ReferenceError: storeMemory is not defined -``` - -**Step 3: Implement Feature** -```javascript -// src/commands/store.js -export function storeMemory(db, { content, tags, expires, entered_by }) { - // Validate content - if (content.length > 10000) { - throw new Error('Content exceeds 10KB limit'); - } - - // Insert memory - const result = db.prepare(` - INSERT INTO memories (content, entered_by, expires_at) - VALUES (?, ?, ?) - `).run(content, entered_by, expires); - - // Handle tags - if (tags) { - const tagList = tags.split(',').map(t => t.trim().toLowerCase()); - linkTags(db, result.lastInsertRowid, tagList); - } - - return { id: result.lastInsertRowid }; -} -``` - -**Step 4: Watch Test Pass** -```bash -PASS test/integration.test.js - Store Command - ✓ stores memory with tags (15 ms) - ✓ rejects content over 10KB (3 ms) - -Tests: 2 passed, 2 total -``` - -**Step 5: Verify Manually** -```bash -$ node src/cli.js store "Docker uses bridge networks" --tags docker,networking -Memory #1 stored successfully - -$ node src/cli.js search "docker" -[2025-10-29 12:45] docker, networking -Docker uses bridge networks -``` - -**Step 6: Refine** -```javascript -// Add more test cases based on manual testing -test('normalizes tags to lowercase', () => { - storeMemory(db, { content: 'test', tags: 'Docker,NETWORKING' }); - - const tags = db.prepare('SELECT name FROM tags').all(); - expect(tags).toEqual([ - { name: 'docker' }, - { name: 'networking' } - ]); -}); -``` - -## Test Organization - -### Directory Structure -``` -test/ -├── integration.test.js # PRIMARY - All main workflows -├── unit/ -│ ├── fuzzy.test.js # RARE - Only complex algorithms -│ └── levenshtein.test.js # RARE - Only complex algorithms -├── helpers/ -│ ├── seed.js # Realistic data generation -│ └── db.js # Database setup helpers -└── fixtures/ - └── realistic-memories.js # Memory templates -``` - -### Integration Test Structure - -```javascript -// test/integration.test.js -import { describe, test, expect, beforeEach, afterEach } from 'vitest'; -import Database from 'better-sqlite3'; -import { storeMemory, searchMemories } from '../src/commands/index.js'; -import { initSchema } from '../src/db/schema.js'; -import { seedDatabase } from './helpers/seed.js'; - -describe('Memory System Integration', () => { - let db; - - beforeEach(() => { - db = new Database(':memory:'); - initSchema(db); - }); - - afterEach(() => { - db.close(); - }); - - describe('Store and Retrieve', () => { - test('stores and finds memory', () => { - storeMemory(db, { content: 'test', tags: 'demo' }); - const results = searchMemories(db, 'test'); - expect(results).toHaveLength(1); - }); - }); - - describe('Search with Filters', () => { - beforeEach(() => { - seedDatabase(db, 50); // Realistic data - }); - - test('filters by tags', () => { - const results = searchMemories(db, 'docker', { tags: ['networking'] }); - results.forEach(r => { - expect(r.tags).toContain('networking'); - }); - }); - }); - - describe('Performance', () => { - test('searches 100 memories in <50ms', () => { - seedDatabase(db, 100); - - const start = Date.now(); - searchMemories(db, 'test'); - const duration = Date.now() - start; - - expect(duration).toBeLessThan(50); - }); - }); -}); -``` - -## Unit Test Structure (Rare) - -**Only for complex algorithms:** - -```javascript -// test/unit/levenshtein.test.js -import { describe, test, expect } from 'vitest'; -import { levenshtein } from '../../src/search/fuzzy.js'; - -describe('Levenshtein Distance', () => { - test('calculates edit distance correctly', () => { - expect(levenshtein('docker', 'dcoker')).toBe(2); - expect(levenshtein('kubernetes', 'kuberntes')).toBe(2); - expect(levenshtein('same', 'same')).toBe(0); - }); - - test('handles edge cases', () => { - expect(levenshtein('', 'hello')).toBe(5); - expect(levenshtein('a', '')).toBe(1); - expect(levenshtein('', '')).toBe(0); - }); - - test('handles unicode correctly', () => { - expect(levenshtein('café', 'cafe')).toBe(1); - }); -}); -``` - -## Test Data Helpers - -### Realistic Memory Generation - -```javascript -// test/helpers/seed.js -const REALISTIC_MEMORIES = [ - { content: 'Docker Compose uses bridge networks by default. Custom networks require explicit subnet config.', tags: ['docker', 'networking'] }, - { content: 'PostgreSQL VACUUM FULL locks tables and requires 2x disk space. Use VACUUM ANALYZE for production.', tags: ['postgresql', 'performance'] }, - { content: 'Git worktree allows working on multiple branches without stashing. Use: git worktree add ../branch branch-name', tags: ['git', 'workflow'] }, - { content: 'NixOS flake.lock must be committed to git for reproducible builds across machines', tags: ['nixos', 'build-system'] }, - { content: 'TypeScript 5.0+ const type parameters preserve literal types: function id(x: T): T', tags: ['typescript', 'types'] }, - // ... 50+ more realistic examples -]; - -export function generateRealisticMemory() { - return { ...randomChoice(REALISTIC_MEMORIES) }; -} - -export function seedDatabase(db, count = 50) { - const insert = db.prepare(` - INSERT INTO memories (content, entered_by, created_at) - VALUES (?, ?, ?) - `); - - const insertMany = db.transaction((memories) => { - for (const memory of memories) { - const result = insert.run( - memory.content, - randomChoice(['investigate-agent', 'optimize-agent', 'manual']), - Date.now() - randomInt(0, 90 * 86400000) // Random within 90 days - ); - - // Link tags - if (memory.tags) { - linkTags(db, result.lastInsertRowid, memory.tags); - } - } - }); - - const memories = Array.from({ length: count }, () => generateRealisticMemory()); - insertMany(memories); -} - -function randomChoice(arr) { - return arr[Math.floor(Math.random() * arr.length)]; -} - -function randomInt(min, max) { - return Math.floor(Math.random() * (max - min + 1)) + min; -} -``` - -## Running Tests - -```bash -# Watch mode (primary workflow) -npm run test:watch - -# Run once -npm test - -# With coverage -npm run test:coverage - -# Specific test file -npm test integration.test.js - -# Run in CI (no watch) -npm test -- --run -``` - -## Coverage Guidelines - -**Target: >80% coverage, but favor integration over unit** - -**What to measure:** -- Are all major workflows tested? (store, search, list, prune) -- Are edge cases covered? (empty data, expired memories, invalid input) -- Are performance targets met? (<50ms search for Phase 1) - -**What NOT to obsess over:** -- 100% line coverage (diminishing returns) -- Testing every internal function (if covered by integration tests) -- Testing framework code (CLI parsing, DB driver) - -**Check coverage:** -```bash -npm run test:coverage - -# View HTML report -open coverage/index.html -``` - -## Examples of Good vs Bad Tests - -### ✅ Good: Integration Test -```javascript -test('full workflow: store, search, list, prune', () => { - // Store memories - storeMemory(db, { content: 'Memory 1', tags: 'test' }); - storeMemory(db, { content: 'Memory 2', tags: 'test', expires_at: Date.now() - 1000 }); - - // Search finds active memory - const results = searchMemories(db, 'Memory'); - expect(results).toHaveLength(2); // Both found initially - - // List shows both - const all = listMemories(db); - expect(all).toHaveLength(2); - - // Prune removes expired - const pruned = pruneMemories(db); - expect(pruned.count).toBe(1); - - // Search now finds only active - const afterPrune = searchMemories(db, 'Memory'); - expect(afterPrune).toHaveLength(1); -}); -``` - -### ❌ Bad: Over-Testing Implementation -```javascript -// AVOID: Testing internal implementation details -test('parseTagString splits on comma', () => { - expect(parseTagString('a,b,c')).toEqual(['a', 'b', 'c']); -}); - -test('normalizeTag converts to lowercase', () => { - expect(normalizeTag('Docker')).toBe('docker'); -}); - -// These are implementation details already covered by integration tests! -``` - -### ✅ Good: Unit Test (Justified) -```javascript -// Complex algorithm worth isolated testing -test('levenshtein distance edge cases', () => { - // Empty strings - expect(levenshtein('', '')).toBe(0); - expect(levenshtein('abc', '')).toBe(3); - - // Unicode - expect(levenshtein('café', 'cafe')).toBe(1); - - // Long strings - const long1 = 'a'.repeat(1000); - const long2 = 'a'.repeat(999) + 'b'; - expect(levenshtein(long1, long2)).toBe(1); -}); -``` - -## Debugging Failed Tests - -### 1. Use `.only` to Focus -```javascript -test.only('this specific test', () => { - // Only runs this test -}); -``` - -### 2. Inspect Database State -```javascript -test('debug search', () => { - storeMemory(db, { content: 'test' }); - - // Inspect what's in DB - const all = db.prepare('SELECT * FROM memories').all(); - console.log('Database contents:', all); - - const results = searchMemories(db, 'test'); - console.log('Search results:', results); - - expect(results).toHaveLength(1); -}); -``` - -### 3. Use Temp File for Manual Inspection -```javascript -test('debug with file', () => { - const db = new Database('/tmp/debug.db'); - initSchema(db); - - storeMemory(db, { content: 'test' }); - - // Now inspect with: sqlite3 /tmp/debug.db -}); -``` - -## Summary - -**DO:** -- ✅ Write integration tests for all workflows -- ✅ Use realistic data (50-100 memories) -- ✅ Test with `:memory:` database -- ✅ Run in watch mode (`npm run test:watch`) -- ✅ Verify manually with CLI after tests pass -- ✅ Think twice before writing unit tests - -**DON'T:** -- ❌ Test implementation details -- ❌ Write unit tests for simple functions -- ❌ Use toy data (1-2 memories) -- ❌ Mock database or CLI (test the real thing) -- ❌ Aim for 100% coverage at expense of test quality - -**Remember:** Integration tests that verify real workflows are worth more than 100 unit tests that verify implementation details. - ---- - -**Testing Philosophy:** Integration-first TDD with realistic data -**Coverage Target:** >80% (mostly integration tests) -**Unit Tests:** Rare, only for complex algorithms -**Workflow:** Write test (fail) → Implement (pass) → Verify (manual) → Refine diff --git a/shared/linked-dotfiles/opencode/llmemory/package.json b/shared/linked-dotfiles/opencode/llmemory/package.json deleted file mode 100644 index 5600464..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/package.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "llmemory", - "version": "0.1.0", - "description": "Persistent memory/journal system for AI agents with grep-like search", - "main": "src/cli.js", - "type": "module", - "bin": { - "llmemory": "./bin/llmemory" - }, - "scripts": { - "start": "node src/cli.js", - "test": "vitest run", - "test:watch": "vitest", - "test:coverage": "vitest --coverage", - "lint": "eslint src/", - "format": "prettier --write src/ test/" - }, - "keywords": [ - "ai", - "agent", - "memory", - "journal", - "search", - "sqlite", - "knowledge-base" - ], - "author": "", - "license": "MIT", - "engines": { - "node": ">=18.0.0" - }, - "dependencies": { - "better-sqlite3": "^12.4.1", - "chalk": "^5.3.0", - "commander": "^11.1.0", - "date-fns": "^3.0.0" - }, - "devDependencies": { - "vitest": "^1.0.0" - }, - "comments": { - "better-sqlite3": "Removed temporarily due to build issues - will add back when implementing database layer", - "optional-deps": "Removed optional dependencies for now - can add later for enhanced UX" - } -} diff --git a/shared/linked-dotfiles/opencode/llmemory/src/cli.js b/shared/linked-dotfiles/opencode/llmemory/src/cli.js deleted file mode 100644 index 8dcb6e0..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/src/cli.js +++ /dev/null @@ -1,459 +0,0 @@ -#!/usr/bin/env node - -import { Command } from 'commander'; -import chalk from 'chalk'; -import { formatDistanceToNow } from 'date-fns'; -import { initDb, getDb } from './db/connection.js'; -import { storeMemory, ValidationError } from './commands/store.js'; -import { searchMemories } from './commands/search.js'; -import { listMemories } from './commands/list.js'; -import { pruneMemories } from './commands/prune.js'; -import { deleteMemories } from './commands/delete.js'; -import { parseTags } from './utils/tags.js'; - -const program = new Command(); - -function formatMemory(memory, options = {}) { - const { json = false, markdown = false } = options; - - if (json) { - return JSON.stringify(memory, null, 2); - } - - const createdDate = new Date(memory.created_at * 1000); - const createdStr = formatDistanceToNow(createdDate, { addSuffix: true }); - - let expiresStr = ''; - if (memory.expires_at) { - const expiresDate = new Date(memory.expires_at * 1000); - expiresStr = formatDistanceToNow(expiresDate, { addSuffix: true }); - } - - if (markdown) { - let md = `## Memory #${memory.id}\n\n`; - md += `${memory.content}\n\n`; - md += `**Created**: ${createdStr} by ${memory.entered_by}\n`; - if (memory.tags) md += `**Tags**: ${memory.tags}\n`; - if (expiresStr) md += `**Expires**: ${expiresStr}\n`; - return md; - } - - let output = ''; - output += chalk.blue.bold(`#${memory.id}`) + chalk.gray(` • ${createdStr} • ${memory.entered_by}\n`); - output += `${memory.content}\n`; - if (memory.tags) { - const tagList = memory.tags.split(','); - output += chalk.yellow(tagList.map(t => `#${t}`).join(' ')) + '\n'; - } - if (expiresStr) { - output += chalk.red(`⏱ Expires ${expiresStr}\n`); - } - return output; -} - -function formatMemoryList(memories, options = {}) { - if (options.json) { - return JSON.stringify(memories, null, 2); - } - - if (memories.length === 0) { - return chalk.gray('No memories found.'); - } - - return memories.map(m => formatMemory(m, options)).join('\n' + chalk.gray('─'.repeat(60)) + '\n'); -} - -function parseDate(dateStr) { - if (!dateStr) return null; - const date = new Date(dateStr); - return Math.floor(date.getTime() / 1000); -} - -program - .name('llmemory') - .description('LLMemory - AI Agent Memory System') - .version('0.1.0'); - -program - .command('store ') - .description('Store a new memory') - .option('-t, --tags ', 'Comma-separated tags') - .option('-e, --expires ', 'Expiration date') - .option('--by ', 'Agent/user identifier', 'manual') - .action((content, options) => { - try { - initDb(); - const db = getDb(); - - const memory = storeMemory(db, { - content, - tags: options.tags ? parseTags(options.tags) : null, - expires_at: parseDate(options.expires), - entered_by: options.by - }); - - console.log(chalk.green('✓ Memory stored successfully')); - console.log(formatMemory(memory)); - } catch (error) { - if (error instanceof ValidationError) { - console.error(chalk.red('✗ Validation error:'), error.message); - process.exit(1); - } - console.error(chalk.red('✗ Error:'), error.message); - process.exit(1); - } - }); - -program - .command('search ') - .description('Search memories') - .option('-t, --tags ', 'Filter by tags (AND)') - .option('--any-tag ', 'Filter by tags (OR)') - .option('--after ', 'Created after date') - .option('--before ', 'Created before date') - .option('--entered-by ', 'Filter by creator') - .option('-l, --limit ', 'Max results', '10') - .option('--offset ', 'Pagination offset', '0') - .option('--json', 'Output as JSON') - .option('--markdown', 'Output as Markdown') - .action((query, options) => { - try { - initDb(); - const db = getDb(); - - const searchOptions = { - tags: options.tags ? parseTags(options.tags) : [], - anyTag: !!options.anyTag, - after: parseDate(options.after), - before: parseDate(options.before), - entered_by: options.enteredBy, - limit: parseInt(options.limit), - offset: parseInt(options.offset) - }; - - if (options.anyTag) { - searchOptions.tags = parseTags(options.anyTag); - } - - const results = searchMemories(db, query, searchOptions); - - if (results.length === 0) { - console.log(chalk.gray('No memories found matching your query.')); - return; - } - - console.log(chalk.green(`Found ${results.length} ${results.length === 1 ? 'memory' : 'memories'}\n`)); - console.log(formatMemoryList(results, { json: options.json, markdown: options.markdown })); - } catch (error) { - console.error(chalk.red('✗ Error:'), error.message); - process.exit(1); - } - }); - -program - .command('list') - .description('List recent memories') - .option('-l, --limit ', 'Max results', '20') - .option('--offset ', 'Pagination offset', '0') - .option('-t, --tags ', 'Filter by tags') - .option('--sort ', 'Sort by field (created, expires, content)', 'created') - .option('--order ', 'Sort order (asc, desc)', 'desc') - .option('--json', 'Output as JSON') - .option('--markdown', 'Output as Markdown') - .action((options) => { - try { - initDb(); - const db = getDb(); - - const listOptions = { - limit: parseInt(options.limit), - offset: parseInt(options.offset), - tags: options.tags ? parseTags(options.tags) : [], - sort: options.sort, - order: options.order - }; - - const results = listMemories(db, listOptions); - - if (results.length === 0) { - console.log(chalk.gray('No memories found.')); - return; - } - - console.log(chalk.green(`Listing ${results.length} ${results.length === 1 ? 'memory' : 'memories'}\n`)); - console.log(formatMemoryList(results, { json: options.json, markdown: options.markdown })); - } catch (error) { - console.error(chalk.red('✗ Error:'), error.message); - process.exit(1); - } - }); - -program - .command('prune') - .description('Remove expired memories') - .option('--dry-run', 'Show what would be deleted without deleting') - .option('--force', 'Skip confirmation prompt') - .option('--before ', 'Delete memories before date (even if not expired)') - .action(async (options) => { - try { - initDb(); - const db = getDb(); - - const pruneOptions = { - dryRun: options.dryRun || false, - before: parseDate(options.before) - }; - - const result = pruneMemories(db, pruneOptions); - - if (result.count === 0) { - console.log(chalk.green('✓ No expired memories to prune.')); - return; - } - - if (pruneOptions.dryRun) { - console.log(chalk.yellow(`Would delete ${result.count} ${result.count === 1 ? 'memory' : 'memories'}:\n`)); - result.memories.forEach(m => { - console.log(chalk.gray(` #${m.id}: ${m.content.substring(0, 60)}...`)); - }); - console.log(chalk.yellow('\nRun without --dry-run to actually delete.')); - } else { - if (!options.force) { - console.log(chalk.yellow(`⚠ About to delete ${result.count} ${result.count === 1 ? 'memory' : 'memories'}.`)); - console.log(chalk.gray('Run with --dry-run to preview first, or --force to skip this check.')); - process.exit(0); - } - console.log(chalk.green(`✓ Pruned ${result.count} expired ${result.count === 1 ? 'memory' : 'memories'}.`)); - } - } catch (error) { - console.error(chalk.red('✗ Error:'), error.message); - process.exit(1); - } - }); - -program - .command('delete') - .description('Delete memories by various criteria') - .option('--ids ', 'Comma-separated memory IDs to delete') - .option('-t, --tags ', 'Filter by tags (AND logic)') - .option('--any-tag ', 'Filter by tags (OR logic)') - .option('-q, --query ', 'Delete memories matching text (LIKE search)') - .option('--after ', 'Delete memories created after date') - .option('--before ', 'Delete memories created before date') - .option('--entered-by ', 'Delete memories by specific agent') - .option('--include-expired', 'Include expired memories in deletion') - .option('--expired-only', 'Delete only expired memories') - .option('--dry-run', 'Show what would be deleted without deleting') - .option('--json', 'Output as JSON') - .option('--markdown', 'Output as Markdown') - .action(async (options) => { - try { - initDb(); - const db = getDb(); - - // Parse options - const deleteOptions = { - ids: options.ids ? options.ids.split(',').map(id => parseInt(id.trim())).filter(id => !isNaN(id)) : [], - tags: options.tags ? parseTags(options.tags) : [], - anyTag: !!options.anyTag, - query: options.query || null, - after: parseDate(options.after), - before: parseDate(options.before), - entered_by: options.enteredBy, - includeExpired: options.includeExpired || false, - expiredOnly: options.expiredOnly || false, - dryRun: options.dryRun || false - }; - - if (options.anyTag) { - deleteOptions.tags = parseTags(options.anyTag); - } - - // Execute deletion - const result = deleteMemories(db, deleteOptions); - - if (result.count === 0) { - console.log(chalk.gray('No memories match the specified criteria.')); - return; - } - - if (deleteOptions.dryRun) { - console.log(chalk.yellow(`Would delete ${result.count} ${result.count === 1 ? 'memory' : 'memories'}:\n`)); - console.log(formatMemoryList(result.memories, { json: options.json, markdown: options.markdown })); - console.log(chalk.yellow('\nRun without --dry-run to actually delete.')); - } else { - console.log(chalk.green(`✓ Deleted ${result.count} ${result.count === 1 ? 'memory' : 'memories'}.`)); - } - } catch (error) { - if (error.message.includes('At least one filter')) { - console.error(chalk.red('✗ Safety check:'), error.message); - console.error(chalk.gray('\nAvailable filters: --ids, --tags, --query, --after, --before, --entered-by, --expired-only')); - process.exit(1); - } - console.error(chalk.red('✗ Error:'), error.message); - process.exit(1); - } - }); - - -program - .command('stats') - .description('Show memory statistics') - .option('--tags', 'Show tag frequency distribution') - .option('--agents', 'Show memories per agent') - .action((options) => { - try { - initDb(); - const db = getDb(); - - const totalMemories = db.prepare('SELECT COUNT(*) as count FROM memories WHERE expires_at IS NULL OR expires_at > strftime(\'%s\', \'now\')').get(); - const expiredMemories = db.prepare('SELECT COUNT(*) as count FROM memories WHERE expires_at IS NOT NULL AND expires_at <= strftime(\'%s\', \'now\')').get(); - - console.log(chalk.blue.bold('Memory Statistics\n')); - console.log(`${chalk.green('Active memories:')} ${totalMemories.count}`); - console.log(`${chalk.red('Expired memories:')} ${expiredMemories.count}`); - - if (options.tags) { - console.log(chalk.blue.bold('\nTag Distribution:')); - const tagStats = db.prepare(` - SELECT t.name, COUNT(*) as count - FROM tags t - JOIN memory_tags mt ON t.id = mt.tag_id - JOIN memories m ON mt.memory_id = m.id - WHERE m.expires_at IS NULL OR m.expires_at > strftime('%s', 'now') - GROUP BY t.name - ORDER BY count DESC - `).all(); - - if (tagStats.length === 0) { - console.log(chalk.gray(' No tags found.')); - } else { - tagStats.forEach(({ name, count }) => { - console.log(` ${chalk.yellow(`#${name}`)}: ${count}`); - }); - } - } - - if (options.agents) { - console.log(chalk.blue.bold('\nMemories by Agent:')); - const agentStats = db.prepare(` - SELECT entered_by, COUNT(*) as count - FROM memories - WHERE expires_at IS NULL OR expires_at > strftime('%s', 'now') - GROUP BY entered_by - ORDER BY count DESC - `).all(); - - if (agentStats.length === 0) { - console.log(chalk.gray(' No agents found.')); - } else { - agentStats.forEach(({ entered_by, count }) => { - console.log(` ${chalk.cyan(entered_by)}: ${count}`); - }); - } - } - } catch (error) { - console.error(chalk.red('✗ Error:'), error.message); - process.exit(1); - } - }); - -program - .command('export ') - .description('Export memories to JSON file') - .action((file) => { - console.log(chalk.yellow('Export command - Phase 2 feature')); - console.log('File:', file); - }); - -program - .command('import ') - .description('Import memories from JSON file') - .action((file) => { - console.log(chalk.yellow('Import command - Phase 2 feature')); - console.log('File:', file); - }); - -// Global options -program - .option('--agent-context', 'Display comprehensive agent documentation') - .option('--db ', 'Custom database location') - .option('--verbose', 'Detailed logging') - .option('--quiet', 'Suppress non-error output'); - -if (process.argv.includes('--agent-context')) { - console.log(chalk.blue.bold('='.repeat(80))); - console.log(chalk.blue.bold('LLMemory - Agent Context Documentation')); - console.log(chalk.blue.bold('='.repeat(80))); - console.log(chalk.white('\n📚 LLMemory is a persistent memory/journal system for AI agents.\n')); - - console.log(chalk.green.bold('QUICK START:')); - console.log(chalk.white(' Store a memory:')); - console.log(chalk.gray(' $ llmemory store "Completed authentication refactor" --tags backend,auth')); - console.log(chalk.white('\n Search memories:')); - console.log(chalk.gray(' $ llmemory search "authentication" --tags backend --limit 5')); - console.log(chalk.white('\n List recent work:')); - console.log(chalk.gray(' $ llmemory list --limit 10')); - console.log(chalk.white('\n Remove old memories:')); - console.log(chalk.gray(' $ llmemory prune --dry-run')); - - console.log(chalk.green.bold('\n\nCOMMAND REFERENCE:')); - console.log(chalk.yellow(' store') + chalk.white(' Store a new memory')); - console.log(chalk.gray(' -t, --tags Comma-separated tags')); - console.log(chalk.gray(' -e, --expires Expiration date')); - console.log(chalk.gray(' --by Agent/user identifier (default: manual)')); - - console.log(chalk.yellow('\n search') + chalk.white(' Search memories (case-insensitive)')); - console.log(chalk.gray(' -t, --tags Filter by tags (AND)')); - console.log(chalk.gray(' --any-tag Filter by tags (OR)')); - console.log(chalk.gray(' --after Created after date')); - console.log(chalk.gray(' --before Created before date')); - console.log(chalk.gray(' --entered-by Filter by creator')); - console.log(chalk.gray(' -l, --limit Max results (default: 10)')); - console.log(chalk.gray(' --json Output as JSON')); - console.log(chalk.gray(' --markdown Output as Markdown')); - - console.log(chalk.yellow('\n list') + chalk.white(' List recent memories')); - console.log(chalk.gray(' -l, --limit Max results (default: 20)')); - console.log(chalk.gray(' -t, --tags Filter by tags')); - console.log(chalk.gray(' --sort Sort by: created, expires, content')); - console.log(chalk.gray(' --order Sort order: asc, desc')); - - console.log(chalk.yellow('\n prune') + chalk.white(' Remove expired memories')); - console.log(chalk.gray(' --dry-run Preview without deleting')); - console.log(chalk.gray(' --force Skip confirmation')); - console.log(chalk.gray(' --before Delete memories before date')); - - console.log(chalk.yellow('\n delete') + chalk.white(' Delete memories by criteria')); - console.log(chalk.gray(' --ids Comma-separated memory IDs')); - console.log(chalk.gray(' -t, --tags Filter by tags (AND logic)')); - console.log(chalk.gray(' --any-tag Filter by tags (OR logic)')); - console.log(chalk.gray(' -q, --query LIKE search on content')); - console.log(chalk.gray(' --after Created after date')); - console.log(chalk.gray(' --before Created before date')); - console.log(chalk.gray(' --entered-by Filter by creator')); - console.log(chalk.gray(' --include-expired Include expired memories')); - console.log(chalk.gray(' --expired-only Delete only expired')); - console.log(chalk.gray(' --dry-run Preview without deleting')); - console.log(chalk.gray(' --force Skip confirmation')); - - console.log(chalk.yellow('\n stats') + chalk.white(' Show memory statistics')); - console.log(chalk.gray(' --tags Show tag distribution')); - console.log(chalk.gray(' --agents Show memories per agent')); - - console.log(chalk.green.bold('\n\nDESIGN PRINCIPLES:')); - console.log(chalk.white(' • ') + chalk.gray('Sparse token usage - only returns relevant results')); - console.log(chalk.white(' • ') + chalk.gray('Fast search - optimized LIKE queries, FTS5 ready')); - console.log(chalk.white(' • ') + chalk.gray('Flexible tagging - organize with multiple tags')); - console.log(chalk.white(' • ') + chalk.gray('Automatic cleanup - expire old memories')); - console.log(chalk.white(' • ') + chalk.gray('Agent-agnostic - works across sessions')); - - console.log(chalk.blue('\n📖 For detailed docs, see:')); - console.log(chalk.gray(' SPECIFICATION.md - Complete technical specification')); - console.log(chalk.gray(' ARCHITECTURE.md - System design and algorithms')); - console.log(chalk.gray(' docs/TESTING.md - TDD approach and test philosophy')); - console.log(chalk.blue.bold('\n' + '='.repeat(80) + '\n')); - process.exit(0); -} - -program.parse(); diff --git a/shared/linked-dotfiles/opencode/llmemory/src/commands/delete.js b/shared/linked-dotfiles/opencode/llmemory/src/commands/delete.js deleted file mode 100644 index aab1b25..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/src/commands/delete.js +++ /dev/null @@ -1,122 +0,0 @@ -// Delete command - remove memories by various criteria -import { parseTags } from '../utils/tags.js'; - -export function deleteMemories(db, options = {}) { - const { - ids = [], - tags = [], - anyTag = false, - query = null, - after = null, - before = null, - entered_by = null, - includeExpired = false, - expiredOnly = false, - dryRun = false - } = options; - - // Safety check: require at least one filter criterion - if (ids.length === 0 && tags.length === 0 && !query && !after && !before && !entered_by && !expiredOnly) { - throw new Error('At least one filter criterion is required (ids, tags, query, date range, agent, or expiredOnly)'); - } - - // Build base query to find matching memories - let sql = ` - SELECT DISTINCT - m.id, - m.content, - m.created_at, - m.entered_by, - m.expires_at, - GROUP_CONCAT(t.name, ',') as tags - FROM memories m - LEFT JOIN memory_tags mt ON m.id = mt.memory_id - LEFT JOIN tags t ON mt.tag_id = t.id - WHERE 1=1 - `; - - const params = []; - - // Filter by IDs - if (ids.length > 0) { - const placeholders = ids.map(() => '?').join(','); - sql += ` AND m.id IN (${placeholders})`; - params.push(...ids); - } - - // Content search (case-insensitive LIKE) - if (query && query.trim().length > 0) { - sql += ` AND LOWER(m.content) LIKE LOWER(?)`; - params.push(`%${query}%`); - } - - // Handle expired memories - if (expiredOnly) { - // Only delete expired memories - sql += ` AND m.expires_at IS NOT NULL AND m.expires_at <= strftime('%s', 'now')`; - } else if (!includeExpired) { - // Exclude expired memories by default - sql += ` AND (m.expires_at IS NULL OR m.expires_at > strftime('%s', 'now'))`; - } - // If includeExpired is true, don't add any expiration filter - - // Date filters - if (after) { - const afterTimestamp = typeof after === 'number' ? after : Math.floor(new Date(after).getTime() / 1000); - sql += ` AND m.created_at >= ?`; - params.push(afterTimestamp); - } - - if (before) { - const beforeTimestamp = typeof before === 'number' ? before : Math.floor(new Date(before).getTime() / 1000); - sql += ` AND m.created_at <= ?`; - params.push(beforeTimestamp); - } - - // Agent filter - if (entered_by) { - sql += ` AND m.entered_by = ?`; - params.push(entered_by); - } - - // Group by memory ID to aggregate tags - sql += ` GROUP BY m.id`; - - // Tag filters (applied after grouping) - if (tags.length > 0) { - const tagList = parseTags(tags.join(',')); - - if (anyTag) { - // OR logic - memory must have at least one of the tags - sql += ` HAVING (${tagList.map(() => 'tags LIKE ?').join(' OR ')})`; - params.push(...tagList.map(tag => `%${tag}%`)); - } else { - // AND logic - memory must have all tags - sql += ` HAVING (${tagList.map(() => 'tags LIKE ?').join(' AND ')})`; - params.push(...tagList.map(tag => `%${tag}%`)); - } - } - - // Execute query to find matching memories - const toDelete = db.prepare(sql).all(...params); - - if (dryRun) { - return { - count: toDelete.length, - memories: toDelete, - deleted: false - }; - } - - // Actually delete - if (toDelete.length > 0) { - const memoryIds = toDelete.map(m => m.id); - const placeholders = memoryIds.map(() => '?').join(','); - db.prepare(`DELETE FROM memories WHERE id IN (${placeholders})`).run(...memoryIds); - } - - return { - count: toDelete.length, - deleted: true - }; -} diff --git a/shared/linked-dotfiles/opencode/llmemory/src/commands/list.js b/shared/linked-dotfiles/opencode/llmemory/src/commands/list.js deleted file mode 100644 index 3ac9f34..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/src/commands/list.js +++ /dev/null @@ -1,54 +0,0 @@ -// List command - show recent memories -export function listMemories(db, options = {}) { - const { - limit = 20, - offset = 0, - tags = [], - sort = 'created', - order = 'desc' - } = options; - - // Validate sort field - const validSortFields = ['created', 'expires', 'content']; - const sortField = validSortFields.includes(sort) ? sort : 'created'; - - // Map to actual column name - const columnMap = { - 'created': 'created_at', - 'expires': 'expires_at', - 'content': 'content' - }; - - const sortColumn = columnMap[sortField]; - const sortOrder = order.toLowerCase() === 'asc' ? 'ASC' : 'DESC'; - - let sql = ` - SELECT DISTINCT - m.id, - m.content, - m.created_at, - m.entered_by, - m.expires_at, - GROUP_CONCAT(t.name, ',') as tags - FROM memories m - LEFT JOIN memory_tags mt ON m.id = mt.memory_id - LEFT JOIN tags t ON mt.tag_id = t.id - WHERE (m.expires_at IS NULL OR m.expires_at > strftime('%s', 'now')) - `; - - const params = []; - - sql += ` GROUP BY m.id`; - - // Tag filter - if (tags.length > 0) { - sql += ` HAVING (${tags.map(() => 'tags LIKE ?').join(' AND ')})`; - params.push(...tags.map(tag => `%${tag}%`)); - } - - sql += ` ORDER BY m.${sortColumn} ${sortOrder}`; - sql += ` LIMIT ? OFFSET ?`; - params.push(limit, offset); - - return db.prepare(sql).all(...params); -} diff --git a/shared/linked-dotfiles/opencode/llmemory/src/commands/prune.js b/shared/linked-dotfiles/opencode/llmemory/src/commands/prune.js deleted file mode 100644 index e49f8b1..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/src/commands/prune.js +++ /dev/null @@ -1,42 +0,0 @@ -// Prune command - remove expired memories -export function pruneMemories(db, options = {}) { - const { - dryRun = false, - before = null - } = options; - - let sql = 'SELECT id, content, expires_at FROM memories WHERE '; - const params = []; - - if (before) { - // Delete memories before this date (even if not expired) - const beforeTimestamp = typeof before === 'number' ? before : Math.floor(new Date(before).getTime() / 1000); - sql += 'created_at < ?'; - params.push(beforeTimestamp); - } else { - // Delete only expired memories - sql += 'expires_at IS NOT NULL AND expires_at <= strftime(\'%s\', \'now\')'; - } - - const toDelete = db.prepare(sql).all(...params); - - if (dryRun) { - return { - count: toDelete.length, - memories: toDelete, - deleted: false - }; - } - - // Actually delete - if (toDelete.length > 0) { - const ids = toDelete.map(m => m.id); - const placeholders = ids.map(() => '?').join(','); - db.prepare(`DELETE FROM memories WHERE id IN (${placeholders})`).run(...ids); - } - - return { - count: toDelete.length, - deleted: true - }; -} diff --git a/shared/linked-dotfiles/opencode/llmemory/src/commands/search.js b/shared/linked-dotfiles/opencode/llmemory/src/commands/search.js deleted file mode 100644 index 57d551c..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/src/commands/search.js +++ /dev/null @@ -1,86 +0,0 @@ -// Search command - find memories with filters -import { parseTags } from '../utils/tags.js'; - -export function searchMemories(db, query, options = {}) { - const { - tags = [], - anyTag = false, - after = null, - before = null, - entered_by = null, - limit = 10, - offset = 0 - } = options; - - // Build base query with LIKE search - let sql = ` - SELECT DISTINCT - m.id, - m.content, - m.created_at, - m.entered_by, - m.expires_at, - GROUP_CONCAT(t.name, ',') as tags - FROM memories m - LEFT JOIN memory_tags mt ON m.id = mt.memory_id - LEFT JOIN tags t ON mt.tag_id = t.id - WHERE 1=1 - `; - - const params = []; - - // Content search (case-insensitive LIKE) - if (query && query.trim().length > 0) { - sql += ` AND LOWER(m.content) LIKE LOWER(?)`; - params.push(`%${query}%`); - } - - // Exclude expired memories - sql += ` AND (m.expires_at IS NULL OR m.expires_at > strftime('%s', 'now'))`; - - // Date filters - if (after) { - const afterTimestamp = typeof after === 'number' ? after : Math.floor(new Date(after).getTime() / 1000); - sql += ` AND m.created_at >= ?`; - params.push(afterTimestamp); - } - - if (before) { - const beforeTimestamp = typeof before === 'number' ? before : Math.floor(new Date(before).getTime() / 1000); - sql += ` AND m.created_at <= ?`; - params.push(beforeTimestamp); - } - - // Agent filter - if (entered_by) { - sql += ` AND m.entered_by = ?`; - params.push(entered_by); - } - - // Group by memory ID to aggregate tags - sql += ` GROUP BY m.id`; - - // Tag filters (applied after grouping) - if (tags.length > 0) { - const tagList = parseTags(tags.join(',')); - - if (anyTag) { - // OR logic - memory must have at least one of the tags - sql += ` HAVING (${tagList.map(() => 'tags LIKE ?').join(' OR ')})`; - params.push(...tagList.map(tag => `%${tag}%`)); - } else { - // AND logic - memory must have all tags - sql += ` HAVING (${tagList.map(() => 'tags LIKE ?').join(' AND ')})`; - params.push(...tagList.map(tag => `%${tag}%`)); - } - } - - // Order by recency - sql += ` ORDER BY m.created_at DESC`; - - // Limit and offset - sql += ` LIMIT ? OFFSET ?`; - params.push(limit, offset); - - return db.prepare(sql).all(...params); -} diff --git a/shared/linked-dotfiles/opencode/llmemory/src/commands/store.js b/shared/linked-dotfiles/opencode/llmemory/src/commands/store.js deleted file mode 100644 index 7919634..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/src/commands/store.js +++ /dev/null @@ -1,44 +0,0 @@ -// Store command - save memory to database -import { validateContent, validateExpiresAt, ValidationError } from '../utils/validation.js'; -import { linkTags } from '../utils/tags.js'; - -export function storeMemory(db, { content, tags, expires_at, entered_by = 'manual' }) { - // Validate content - const validatedContent = validateContent(content); - - // Validate expiration - const validatedExpires = validateExpiresAt(expires_at); - - // Get current timestamp in seconds - const now = Math.floor(Date.now() / 1000); - - // Insert memory - const insertStmt = db.prepare(` - INSERT INTO memories (content, entered_by, created_at, expires_at) - VALUES (?, ?, ?, ?) - `); - - const result = insertStmt.run( - validatedContent, - entered_by, - now, - validatedExpires - ); - - const memoryId = result.lastInsertRowid; - - // Link tags if provided - if (tags) { - linkTags(db, memoryId, tags); - } - - return { - id: memoryId, - content: validatedContent, - created_at: now, - entered_by, - expires_at: validatedExpires - }; -} - -export { ValidationError }; diff --git a/shared/linked-dotfiles/opencode/llmemory/src/db/connection.js b/shared/linked-dotfiles/opencode/llmemory/src/db/connection.js deleted file mode 100644 index b749763..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/src/db/connection.js +++ /dev/null @@ -1,67 +0,0 @@ -// Database connection management -import Database from 'better-sqlite3'; -import { homedir } from 'os'; -import { join } from 'path'; -import { mkdirSync, existsSync } from 'fs'; -import { initSchema } from './schema.js'; - -const DEFAULT_DB_PATH = join(homedir(), '.config', 'opencode', 'memories.db'); - -let dbInstance = null; - -export function initDb(dbPath = DEFAULT_DB_PATH) { - if (dbInstance) { - return dbInstance; - } - - // Create directory if it doesn't exist - const dir = join(dbPath, '..'); - if (!existsSync(dir)) { - mkdirSync(dir, { recursive: true }); - } - - // Open database - dbInstance = new Database(dbPath); - - // Enable WAL mode for better concurrency - dbInstance.pragma('journal_mode = WAL'); - - // Initialize schema - initSchema(dbInstance); - - return dbInstance; -} - -export function getDb() { - if (!dbInstance) { - return initDb(); - } - return dbInstance; -} - -export function closeDb() { - if (dbInstance) { - dbInstance.close(); - dbInstance = null; - } -} - -export function openDatabase(dbPath = DEFAULT_DB_PATH) { - // For backwards compatibility with tests - const dir = join(dbPath, '..'); - if (!existsSync(dir)) { - mkdirSync(dir, { recursive: true }); - } - - const db = new Database(dbPath); - initSchema(db); - - return db; -} - -export function createMemoryDatabase() { - // For testing: in-memory database - const db = new Database(':memory:'); - initSchema(db); - return db; -} diff --git a/shared/linked-dotfiles/opencode/llmemory/src/db/schema.js b/shared/linked-dotfiles/opencode/llmemory/src/db/schema.js deleted file mode 100644 index 7ccee0b..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/src/db/schema.js +++ /dev/null @@ -1,86 +0,0 @@ -// Database schema initialization -import Database from 'better-sqlite3'; - -export function initSchema(db) { - // Enable WAL mode for better concurrency - db.pragma('journal_mode = WAL'); - db.pragma('synchronous = NORMAL'); - db.pragma('cache_size = -64000'); // 64MB cache - - // Create memories table - db.exec(` - CREATE TABLE IF NOT EXISTS memories ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - content TEXT NOT NULL CHECK(length(content) <= 10000), - created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')), - entered_by TEXT, - expires_at INTEGER, - CHECK(expires_at IS NULL OR expires_at > created_at) - ) - `); - - // Create tags table - db.exec(` - CREATE TABLE IF NOT EXISTS tags ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT NOT NULL UNIQUE COLLATE NOCASE, - created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')) - ) - `); - - // Create memory_tags junction table - db.exec(` - CREATE TABLE IF NOT EXISTS memory_tags ( - memory_id INTEGER NOT NULL, - tag_id INTEGER NOT NULL, - PRIMARY KEY (memory_id, tag_id), - FOREIGN KEY (memory_id) REFERENCES memories(id) ON DELETE CASCADE, - FOREIGN KEY (tag_id) REFERENCES tags(id) ON DELETE CASCADE - ) - `); - - // Create metadata table - db.exec(` - CREATE TABLE IF NOT EXISTS metadata ( - key TEXT PRIMARY KEY, - value TEXT NOT NULL - ) - `); - - // Create indexes - db.exec(` - CREATE INDEX IF NOT EXISTS idx_memories_created ON memories(created_at DESC) - `); - - db.exec(` - CREATE INDEX IF NOT EXISTS idx_memories_expires ON memories(expires_at) - WHERE expires_at IS NOT NULL - `); - - db.exec(` - CREATE INDEX IF NOT EXISTS idx_tags_name ON tags(name) - `); - - db.exec(` - CREATE INDEX IF NOT EXISTS idx_memory_tags_tag ON memory_tags(tag_id) - `); - - // Initialize metadata if needed - const metadataExists = db.prepare( - "SELECT COUNT(*) as count FROM metadata WHERE key = 'schema_version'" - ).get(); - - if (metadataExists.count === 0) { - db.prepare('INSERT INTO metadata (key, value) VALUES (?, ?)').run('schema_version', '1'); - db.prepare('INSERT INTO metadata (key, value) VALUES (?, ?)').run('created_at', Math.floor(Date.now() / 1000).toString()); - } -} - -export function getSchemaVersion(db) { - try { - const result = db.prepare('SELECT value FROM metadata WHERE key = ?').get('schema_version'); - return result ? parseInt(result.value) : 0; - } catch { - return 0; - } -} diff --git a/shared/linked-dotfiles/opencode/llmemory/src/utils/tags.js b/shared/linked-dotfiles/opencode/llmemory/src/utils/tags.js deleted file mode 100644 index c9365a2..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/src/utils/tags.js +++ /dev/null @@ -1,53 +0,0 @@ -// Utility functions for tag management -export function parseTags(tagString) { - if (!tagString || typeof tagString !== 'string') { - return []; - } - - return tagString - .split(',') - .map(tag => tag.trim().toLowerCase()) - .filter(tag => tag.length > 0) - .filter((tag, index, self) => self.indexOf(tag) === index); // Deduplicate -} - -export function normalizeTags(tags) { - if (Array.isArray(tags)) { - return tags.map(tag => tag.toLowerCase().trim()).filter(tag => tag.length > 0); - } - return parseTags(tags); -} - -export function getOrCreateTag(db, tagName) { - const normalized = tagName.toLowerCase().trim(); - - // Try to get existing tag - const existing = db.prepare('SELECT id FROM tags WHERE name = ?').get(normalized); - - if (existing) { - return existing.id; - } - - // Create new tag - const result = db.prepare('INSERT INTO tags (name) VALUES (?)').run(normalized); - return result.lastInsertRowid; -} - -export function linkTags(db, memoryId, tags) { - const tagList = normalizeTags(tags); - - if (tagList.length === 0) { - return; - } - - const linkStmt = db.prepare('INSERT INTO memory_tags (memory_id, tag_id) VALUES (?, ?)'); - - const linkAll = db.transaction((memoryId, tags) => { - for (const tag of tags) { - const tagId = getOrCreateTag(db, tag); - linkStmt.run(memoryId, tagId); - } - }); - - linkAll(memoryId, tagList); -} diff --git a/shared/linked-dotfiles/opencode/llmemory/src/utils/validation.js b/shared/linked-dotfiles/opencode/llmemory/src/utils/validation.js deleted file mode 100644 index 1596b87..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/src/utils/validation.js +++ /dev/null @@ -1,54 +0,0 @@ -// Validation utilities -export class ValidationError extends Error { - constructor(message) { - super(message); - this.name = 'ValidationError'; - } -} - -export function validateContent(content) { - if (!content || typeof content !== 'string') { - throw new ValidationError('Content is required and must be a string'); - } - - if (content.trim().length === 0) { - throw new ValidationError('Content cannot be empty'); - } - - if (content.length > 10000) { - throw new ValidationError('Content exceeds 10KB limit'); - } - - return content.trim(); -} - -export function validateExpiresAt(expiresAt) { - if (expiresAt === null || expiresAt === undefined) { - return null; - } - - let timestamp; - - if (typeof expiresAt === 'number') { - timestamp = expiresAt; - } else if (typeof expiresAt === 'string') { - // Try parsing as ISO date - const date = new Date(expiresAt); - if (isNaN(date.getTime())) { - throw new ValidationError('Invalid expiration date format'); - } - timestamp = Math.floor(date.getTime() / 1000); - } else if (expiresAt instanceof Date) { - timestamp = Math.floor(expiresAt.getTime() / 1000); - } else { - throw new ValidationError('Invalid expiration date type'); - } - - // Check if in the past - const now = Math.floor(Date.now() / 1000); - if (timestamp <= now) { - throw new ValidationError('Expiration date must be in the future'); - } - - return timestamp; -} diff --git a/shared/linked-dotfiles/opencode/llmemory/test/integration.test.js b/shared/linked-dotfiles/opencode/llmemory/test/integration.test.js deleted file mode 100644 index 932df2b..0000000 --- a/shared/linked-dotfiles/opencode/llmemory/test/integration.test.js +++ /dev/null @@ -1,969 +0,0 @@ -import { describe, test, expect, beforeEach, afterEach } from 'vitest'; -import Database from 'better-sqlite3'; -import { initSchema, getSchemaVersion } from '../src/db/schema.js'; -import { createMemoryDatabase } from '../src/db/connection.js'; -import { storeMemory } from '../src/commands/store.js'; -import { searchMemories } from '../src/commands/search.js'; -import { listMemories } from '../src/commands/list.js'; -import { pruneMemories } from '../src/commands/prune.js'; -import { deleteMemories } from '../src/commands/delete.js'; - -describe('Database Layer', () => { - let db; - - beforeEach(() => { - // Use in-memory database for speed - db = new Database(':memory:'); - }); - - afterEach(() => { - if (db) { - db.close(); - } - }); - - describe('Schema Initialization', () => { - test('creates memories table with correct schema', () => { - initSchema(db); - - const tables = db.prepare( - "SELECT name FROM sqlite_master WHERE type='table' AND name='memories'" - ).all(); - - expect(tables).toHaveLength(1); - expect(tables[0].name).toBe('memories'); - - // Check columns - const columns = db.prepare('PRAGMA table_info(memories)').all(); - const columnNames = columns.map(c => c.name); - - expect(columnNames).toContain('id'); - expect(columnNames).toContain('content'); - expect(columnNames).toContain('created_at'); - expect(columnNames).toContain('entered_by'); - expect(columnNames).toContain('expires_at'); - }); - - test('creates tags table with correct schema', () => { - initSchema(db); - - const tables = db.prepare( - "SELECT name FROM sqlite_master WHERE type='table' AND name='tags'" - ).all(); - - expect(tables).toHaveLength(1); - - const columns = db.prepare('PRAGMA table_info(tags)').all(); - const columnNames = columns.map(c => c.name); - - expect(columnNames).toContain('id'); - expect(columnNames).toContain('name'); - expect(columnNames).toContain('created_at'); - }); - - test('creates memory_tags junction table', () => { - initSchema(db); - - const tables = db.prepare( - "SELECT name FROM sqlite_master WHERE type='table' AND name='memory_tags'" - ).all(); - - expect(tables).toHaveLength(1); - - const columns = db.prepare('PRAGMA table_info(memory_tags)').all(); - const columnNames = columns.map(c => c.name); - - expect(columnNames).toContain('memory_id'); - expect(columnNames).toContain('tag_id'); - }); - - test('creates metadata table with schema_version', () => { - initSchema(db); - - const version = db.prepare( - "SELECT value FROM metadata WHERE key = 'schema_version'" - ).get(); - - expect(version).toBeDefined(); - expect(version.value).toBe('1'); - }); - - test('creates indexes on memories(created_at, expires_at)', () => { - initSchema(db); - - const indexes = db.prepare( - "SELECT name FROM sqlite_master WHERE type='index' AND tbl_name='memories'" - ).all(); - - const indexNames = indexes.map(i => i.name); - expect(indexNames).toContain('idx_memories_created'); - expect(indexNames).toContain('idx_memories_expires'); - }); - - test('creates indexes on tags(name) and memory_tags(tag_id)', () => { - initSchema(db); - - const tagIndexes = db.prepare( - "SELECT name FROM sqlite_master WHERE type='index' AND tbl_name='tags'" - ).all(); - expect(tagIndexes.some(i => i.name === 'idx_tags_name')).toBe(true); - - const junctionIndexes = db.prepare( - "SELECT name FROM sqlite_master WHERE type='index' AND tbl_name='memory_tags'" - ).all(); - expect(junctionIndexes.some(i => i.name === 'idx_memory_tags_tag')).toBe(true); - }); - - test('enables WAL mode for better concurrency', () => { - initSchema(db); - - const journalMode = db.pragma('journal_mode', { simple: true }); - // In-memory databases return 'memory' instead of 'wal' - // This is expected behavior for :memory: databases - expect(['wal', 'memory']).toContain(journalMode); - }); - }); - - describe('Connection Management', () => { - test('opens database connection', () => { - const testDb = createMemoryDatabase(); - expect(testDb).toBeDefined(); - - // Should be able to query - const result = testDb.prepare('SELECT 1 as test').get(); - expect(result.test).toBe(1); - - testDb.close(); - }); - - test('initializes schema on first run', () => { - const testDb = createMemoryDatabase(); - - // Check that tables exist - const tables = testDb.prepare( - "SELECT name FROM sqlite_master WHERE type='table'" - ).all(); - - const tableNames = tables.map(t => t.name); - expect(tableNames).toContain('memories'); - expect(tableNames).toContain('tags'); - expect(tableNames).toContain('memory_tags'); - expect(tableNames).toContain('metadata'); - - testDb.close(); - }); - - test('skips schema creation if already initialized', () => { - const testDb = new Database(':memory:'); - - // Initialize twice - initSchema(testDb); - initSchema(testDb); - - // Should still have correct schema version - const version = getSchemaVersion(testDb); - expect(version).toBe(1); - - testDb.close(); - }); - - test('sets pragmas (WAL, cache_size, synchronous)', () => { - const testDb = createMemoryDatabase(); - - const journalMode = testDb.pragma('journal_mode', { simple: true }); - // In-memory databases return 'memory' instead of 'wal' - expect(['wal', 'memory']).toContain(journalMode); - - const synchronous = testDb.pragma('synchronous', { simple: true }); - expect(synchronous).toBe(1); // NORMAL - - testDb.close(); - }); - - test('closes connection properly', () => { - const testDb = createMemoryDatabase(); - - expect(() => testDb.close()).not.toThrow(); - expect(testDb.open).toBe(false); - }); - }); -}); - -describe('Store Command', () => { - let db; - - beforeEach(() => { - db = createMemoryDatabase(); - }); - - afterEach(() => { - if (db) { - db.close(); - } - }); - - test('stores memory with tags', () => { - const result = storeMemory(db, { - content: 'Docker uses bridge networks by default', - tags: 'docker,networking', - entered_by: 'test' - }); - - expect(result.id).toBeDefined(); - expect(result.content).toBe('Docker uses bridge networks by default'); - - // Verify in database - const memory = db.prepare('SELECT * FROM memories WHERE id = ?').get(result.id); - expect(memory.content).toBe('Docker uses bridge networks by default'); - expect(memory.entered_by).toBe('test'); - - // Verify tags - const tags = db.prepare(` - SELECT t.name FROM tags t - JOIN memory_tags mt ON t.id = mt.tag_id - WHERE mt.memory_id = ? - ORDER BY t.name - `).all(result.id); - - expect(tags.map(t => t.name)).toEqual(['docker', 'networking']); - }); - - test('rejects content over 10KB', () => { - const longContent = 'x'.repeat(10001); - - expect(() => { - storeMemory(db, { content: longContent }); - }).toThrow('Content exceeds 10KB limit'); - }); - - test('normalizes tags to lowercase', () => { - storeMemory(db, { - content: 'Test memory', - tags: 'Docker,NETWORKING,KuberNeteS' - }); - - const tags = db.prepare('SELECT name FROM tags ORDER BY name').all(); - expect(tags.map(t => t.name)).toEqual(['docker', 'kubernetes', 'networking']); - }); - - test('handles missing tags gracefully', () => { - const result = storeMemory(db, { - content: 'Memory without tags' - }); - - expect(result.id).toBeDefined(); - - const tags = db.prepare(` - SELECT t.name FROM tags t - JOIN memory_tags mt ON t.id = mt.tag_id - WHERE mt.memory_id = ? - `).all(result.id); - - expect(tags).toHaveLength(0); - }); - - test('handles expiration date parsing', () => { - const futureDate = new Date(Date.now() + 86400000); // Tomorrow - - const result = storeMemory(db, { - content: 'Temporary memory', - expires_at: futureDate.toISOString() - }); - - const memory = db.prepare('SELECT expires_at FROM memories WHERE id = ?').get(result.id); - expect(memory.expires_at).toBeGreaterThan(Math.floor(Date.now() / 1000)); - }); - - test('deduplicates tags across memories', () => { - storeMemory(db, { content: 'Memory 1', tags: 'docker,networking' }); - storeMemory(db, { content: 'Memory 2', tags: 'docker,kubernetes' }); - - const tags = db.prepare('SELECT name FROM tags ORDER BY name').all(); - expect(tags.map(t => t.name)).toEqual(['docker', 'kubernetes', 'networking']); - }); - - test('rejects empty content', () => { - expect(() => { - storeMemory(db, { content: '' }); - }).toThrow(); // Just check that it throws, message might vary - }); - - test('rejects expiration in the past', () => { - const pastDate = new Date(Date.now() - 86400000); // Yesterday - - expect(() => { - storeMemory(db, { - content: 'Test', - expires_at: pastDate.toISOString() - }); - }).toThrow('Expiration date must be in the future'); - }); -}); - -describe('Search Command', () => { - let db; - - beforeEach(() => { - db = createMemoryDatabase(); - - // Seed with test data - storeMemory(db, { - content: 'Docker uses bridge networks by default', - tags: 'docker,networking' - }); - storeMemory(db, { - content: 'Kubernetes pods share network namespace', - tags: 'kubernetes,networking' - }); - storeMemory(db, { - content: 'PostgreSQL requires explicit vacuum', - tags: 'postgresql,database' - }); - }); - - afterEach(() => { - if (db) { - db.close(); - } - }); - - test('finds memories by content (case-insensitive)', () => { - const results = searchMemories(db, 'docker'); - - expect(results).toHaveLength(1); - expect(results[0].content).toContain('Docker'); - }); - - test('filters by tags (AND logic)', () => { - const results = searchMemories(db, '', { tags: ['networking'] }); - - expect(results).toHaveLength(2); - const contents = results.map(r => r.content); - expect(contents).toContain('Docker uses bridge networks by default'); - expect(contents).toContain('Kubernetes pods share network namespace'); - }); - - test('filters by tags (OR logic with anyTag)', () => { - const results = searchMemories(db, '', { tags: ['docker', 'postgresql'], anyTag: true }); - - expect(results).toHaveLength(2); - const contents = results.map(r => r.content); - expect(contents).toContain('Docker uses bridge networks by default'); - expect(contents).toContain('PostgreSQL requires explicit vacuum'); - }); - - test('filters by date range (after/before)', () => { - const now = Date.now(); - - // Add a memory from "yesterday" - db.prepare('UPDATE memories SET created_at = ? WHERE id = 1').run( - Math.floor((now - 86400000) / 1000) - ); - - // Search for memories after yesterday - const results = searchMemories(db, '', { - after: Math.floor((now - 43200000) / 1000) // 12 hours ago - }); - - expect(results.length).toBeGreaterThanOrEqual(2); - }); - - test('filters by entered_by (agent)', () => { - storeMemory(db, { - content: 'Memory from investigate agent', - entered_by: 'investigate-agent' - }); - - const results = searchMemories(db, '', { entered_by: 'investigate-agent' }); - - expect(results).toHaveLength(1); - expect(results[0].entered_by).toBe('investigate-agent'); - }); - - test('excludes expired memories automatically', () => { - // Add expired memory (bypass CHECK constraint by inserting with created_at in past) - const pastTimestamp = Math.floor((Date.now() - 86400000) / 1000); // Yesterday - db.prepare('INSERT INTO memories (content, created_at, expires_at) VALUES (?, ?, ?)').run( - 'Expired memory', - pastTimestamp - 86400, // created_at even earlier - pastTimestamp // expires_at in past but after created_at - ); - - const results = searchMemories(db, 'expired'); - - expect(results).toHaveLength(0); - }); - - test('respects limit option', () => { - // Add more memories - for (let i = 0; i < 10; i++) { - storeMemory(db, { content: `Memory ${i}`, tags: 'test' }); - } - - const results = searchMemories(db, '', { limit: 5 }); - - expect(results).toHaveLength(5); - }); - - test('orders by created_at DESC', () => { - const results = searchMemories(db, ''); - - // Results should be in descending order (newest first) - for (let i = 1; i < results.length; i++) { - expect(results[i - 1].created_at).toBeGreaterThanOrEqual(results[i].created_at); - } - }); - - test('returns memory with tags joined', () => { - const results = searchMemories(db, 'docker'); - - expect(results).toHaveLength(1); - expect(results[0].tags).toBeTruthy(); - expect(results[0].tags).toContain('docker'); - expect(results[0].tags).toContain('networking'); - }); -}); - -describe('Integration Tests', () => { - let db; - - beforeEach(() => { - db = createMemoryDatabase(); - }); - - afterEach(() => { - if (db) { - db.close(); - } - }); - - describe('Full Workflow', () => { - test('store → search → retrieve workflow', () => { - // Store - const stored = storeMemory(db, { - content: 'Docker uses bridge networks', - tags: 'docker,networking' - }); - - expect(stored.id).toBeDefined(); - - // Search - const results = searchMemories(db, 'docker'); - expect(results).toHaveLength(1); - expect(results[0].content).toBe('Docker uses bridge networks'); - - // List - const all = listMemories(db); - expect(all).toHaveLength(1); - expect(all[0].tags).toContain('docker'); - }); - - test('store multiple → list → filter by tags', () => { - storeMemory(db, { content: 'Memory 1', tags: 'docker,networking' }); - storeMemory(db, { content: 'Memory 2', tags: 'kubernetes,networking' }); - storeMemory(db, { content: 'Memory 3', tags: 'postgresql,database' }); - - const all = listMemories(db); - expect(all).toHaveLength(3); - - const networkingOnly = listMemories(db, { tags: ['networking'] }); - expect(networkingOnly).toHaveLength(2); - }); - - test('store with expiration → prune → verify removed', () => { - // Store non-expired - storeMemory(db, { content: 'Active memory' }); - - // Store expired (manually set to past by updating both timestamps) - const expired = storeMemory(db, { content: 'Expired memory' }); - const pastCreated = Math.floor((Date.now() - 172800000) / 1000); // 2 days ago - const pastExpired = Math.floor((Date.now() - 86400000) / 1000); // 1 day ago - db.prepare('UPDATE memories SET created_at = ?, expires_at = ? WHERE id = ?').run( - pastCreated, - pastExpired, - expired.id - ); - - // Verify both exist - const before = listMemories(db); - expect(before).toHaveLength(1); // Expired is filtered out - - // Prune - const result = pruneMemories(db); - expect(result.count).toBe(1); - expect(result.deleted).toBe(true); - - // Verify expired removed - const all = db.prepare('SELECT * FROM memories').all(); - expect(all).toHaveLength(1); - expect(all[0].content).toBe('Active memory'); - }); - }); - - describe('Performance', () => { - test('searches 100 memories in <50ms (Phase 1 target)', () => { - // Insert 100 memories - for (let i = 0; i < 100; i++) { - storeMemory(db, { - content: `Memory ${i} about docker and networking`, - tags: i % 2 === 0 ? 'docker' : 'networking' - }); - } - - const start = Date.now(); - const results = searchMemories(db, 'docker'); - const duration = Date.now() - start; - - expect(results.length).toBeGreaterThan(0); - expect(duration).toBeLessThan(50); - }); - - test('stores 100 memories in <1 second', () => { - const start = Date.now(); - - for (let i = 0; i < 100; i++) { - storeMemory(db, { - content: `Memory ${i}`, - tags: 'test' - }); - } - - const duration = Date.now() - start; - expect(duration).toBeLessThan(1000); - }); - }); - - describe('Edge Cases', () => { - test('handles empty search query', () => { - storeMemory(db, { content: 'Test memory' }); - - const results = searchMemories(db, ''); - expect(results).toHaveLength(1); - }); - - test('handles no results found', () => { - storeMemory(db, { content: 'Test memory' }); - - const results = searchMemories(db, 'nonexistent'); - expect(results).toHaveLength(0); - }); - - test('handles special characters in content', () => { - const specialContent = 'Test with special chars: @#$%^&*()'; - storeMemory(db, { content: specialContent }); - - const results = searchMemories(db, 'special chars'); - expect(results).toHaveLength(1); - expect(results[0].content).toBe(specialContent); - }); - - test('handles unicode in content and tags', () => { - storeMemory(db, { - content: 'Unicode test: café, 日本語, emoji 🚀', - tags: 'café,日本語' - }); - - const results = searchMemories(db, 'café'); - expect(results).toHaveLength(1); - }); - - test('handles very long tag lists', () => { - const manyTags = Array.from({ length: 20 }, (_, i) => `tag${i}`).join(','); - - const stored = storeMemory(db, { - content: 'Memory with many tags', - tags: manyTags - }); - - const results = searchMemories(db, '', { tags: ['tag5'] }); - expect(results).toHaveLength(1); - }); - }); -}); -describe('Delete Command', () => { - let db; - - beforeEach(() => { - db = createMemoryDatabase(); - - // Seed with test data - storeMemory(db, { - content: 'Test memory 1', - tags: 'test,demo', - entered_by: 'test-agent' - }); - storeMemory(db, { - content: 'Test memory 2', - tags: 'test,sample', - entered_by: 'test-agent' - }); - storeMemory(db, { - content: 'Production memory', - tags: 'prod,important', - entered_by: 'prod-agent' - }); - storeMemory(db, { - content: 'Docker networking notes', - tags: 'docker,networking', - entered_by: 'manual' - }); - }); - - afterEach(() => { - if (db) { - db.close(); - } - }); - - describe('Delete by IDs', () => { - test('deletes memories by single ID', () => { - - - const result = deleteMemories(db, { ids: [1] }); - - expect(result.count).toBe(1); - expect(result.deleted).toBe(true); - - const remaining = db.prepare('SELECT * FROM memories').all(); - expect(remaining).toHaveLength(3); - expect(remaining.find(m => m.id === 1)).toBeUndefined(); - }); - - test('deletes memories by comma-separated IDs', () => { - - - const result = deleteMemories(db, { ids: [1, 2] }); - - expect(result.count).toBe(2); - expect(result.deleted).toBe(true); - - const remaining = db.prepare('SELECT * FROM memories').all(); - expect(remaining).toHaveLength(2); - }); - - test('handles non-existent IDs gracefully', () => { - - - const result = deleteMemories(db, { ids: [999, 1000] }); - - expect(result.count).toBe(0); - expect(result.deleted).toBe(true); - }); - - test('handles mix of valid and invalid IDs', () => { - - - const result = deleteMemories(db, { ids: [1, 999, 2] }); - - expect(result.count).toBe(2); - }); - }); - - describe('Delete by Tags', () => { - test('deletes memories by single tag', () => { - - - const result = deleteMemories(db, { tags: ['test'] }); - - expect(result.count).toBe(2); - expect(result.deleted).toBe(true); - - const remaining = db.prepare('SELECT * FROM memories').all(); - expect(remaining).toHaveLength(2); - expect(remaining.find(m => m.content.includes('Test'))).toBeUndefined(); - }); - - test('deletes memories by multiple tags (AND logic)', () => { - - - const result = deleteMemories(db, { tags: ['test', 'demo'] }); - - expect(result.count).toBe(1); - expect(result.deleted).toBe(true); - - const memory = db.prepare('SELECT * FROM memories WHERE id = 1').get(); - expect(memory).toBeUndefined(); - }); - - test('deletes memories by tags with OR logic (anyTag)', () => { - - - const result = deleteMemories(db, { - tags: ['demo', 'docker'], - anyTag: true - }); - - expect(result.count).toBe(2); // Memory 1 (demo) and Memory 4 (docker) - expect(result.deleted).toBe(true); - }); - - test('returns zero count when no tags match', () => { - - - const result = deleteMemories(db, { tags: ['nonexistent'] }); - - expect(result.count).toBe(0); - }); - }); - - describe('Delete by Content (LIKE query)', () => { - test('deletes memories matching LIKE query', () => { - - - const result = deleteMemories(db, { query: 'Test' }); - - expect(result.count).toBe(2); - expect(result.deleted).toBe(true); - }); - - test('case-insensitive LIKE matching', () => { - - - const result = deleteMemories(db, { query: 'DOCKER' }); - - expect(result.count).toBe(1); - expect(result.deleted).toBe(true); - }); - - test('handles partial matches', () => { - - - const result = deleteMemories(db, { query: 'memory' }); - - expect(result.count).toBe(3); // Matches "Test memory 1", "Test memory 2", "Production memory" - }); - }); - - describe('Delete by Date Range', () => { - test('deletes memories before date', () => { - - - const now = Date.now(); - - // Update memory 1 to be from yesterday - db.prepare('UPDATE memories SET created_at = ? WHERE id = 1').run( - Math.floor((now - 86400000) / 1000) - ); - - const result = deleteMemories(db, { - before: Math.floor(now / 1000) - }); - - expect(result.count).toBeGreaterThanOrEqual(1); - }); - - test('deletes memories after date', () => { - - - const yesterday = Math.floor((Date.now() - 86400000) / 1000); - - const result = deleteMemories(db, { - after: yesterday - }); - - expect(result.count).toBeGreaterThanOrEqual(3); - }); - - test('deletes memories in date range (after + before)', () => { - - - const now = Date.now(); - const yesterday = Math.floor((now - 86400000) / 1000); - const tomorrow = Math.floor((now + 86400000) / 1000); - - // Set specific timestamps - db.prepare('UPDATE memories SET created_at = ? WHERE id = 1').run(yesterday - 86400); - db.prepare('UPDATE memories SET created_at = ? WHERE id = 2').run(yesterday); - db.prepare('UPDATE memories SET created_at = ? WHERE id = 3').run(Math.floor(now / 1000)); - - const result = deleteMemories(db, { - after: yesterday - 3600, // After memory 1 - before: Math.floor(now / 1000) - 3600 // Before memory 3 - }); - - expect(result.count).toBe(1); // Only memory 2 - }); - }); - - describe('Delete by Agent', () => { - test('deletes memories by entered_by agent', () => { - - - const result = deleteMemories(db, { entered_by: 'test-agent' }); - - expect(result.count).toBe(2); - expect(result.deleted).toBe(true); - - const remaining = db.prepare('SELECT * FROM memories').all(); - expect(remaining.every(m => m.entered_by !== 'test-agent')).toBe(true); - }); - - test('combination: agent + tags', () => { - - - const result = deleteMemories(db, { - entered_by: 'test-agent', - tags: ['demo'] - }); - - expect(result.count).toBe(1); // Only memory 1 - }); - }); - - describe('Expired Memory Handling', () => { - test('excludes expired memories by default', () => { - - - // Create expired memory - const pastCreated = Math.floor((Date.now() - 172800000) / 1000); - const pastExpired = Math.floor((Date.now() - 86400000) / 1000); - db.prepare('INSERT INTO memories (content, created_at, expires_at, entered_by) VALUES (?, ?, ?, ?)').run( - 'Expired test memory', - pastCreated, - pastExpired, - 'test-agent' - ); - - const result = deleteMemories(db, { entered_by: 'test-agent' }); - - expect(result.count).toBe(2); // Only non-expired test-agent memories - }); - - test('includes expired with includeExpired flag', () => { - - - // Create expired memory - const pastCreated = Math.floor((Date.now() - 172800000) / 1000); - const pastExpired = Math.floor((Date.now() - 86400000) / 1000); - db.prepare('INSERT INTO memories (content, created_at, expires_at, entered_by) VALUES (?, ?, ?, ?)').run( - 'Expired test memory', - pastCreated, - pastExpired, - 'test-agent' - ); - - const result = deleteMemories(db, { - entered_by: 'test-agent', - includeExpired: true - }); - - expect(result.count).toBe(3); // All test-agent memories including expired - }); - - test('deletes only expired with expiredOnly flag', () => { - - - // Create expired memory - const pastCreated = Math.floor((Date.now() - 172800000) / 1000); - const pastExpired = Math.floor((Date.now() - 86400000) / 1000); - db.prepare('INSERT INTO memories (content, created_at, expires_at, entered_by) VALUES (?, ?, ?, ?)').run( - 'Expired memory', - pastCreated, - pastExpired, - 'test-agent' - ); - - const result = deleteMemories(db, { expiredOnly: true }); - - expect(result.count).toBe(1); - - // Verify non-expired still exist - const remaining = db.prepare('SELECT * FROM memories').all(); - expect(remaining).toHaveLength(4); - }); - }); - - describe('Dry Run Mode', () => { - test('dry-run returns memories without deleting', () => { - - - const result = deleteMemories(db, { - tags: ['test'], - dryRun: true - }); - - expect(result.count).toBe(2); - expect(result.deleted).toBe(false); - expect(result.memories).toHaveLength(2); - - // Verify nothing was deleted - const all = db.prepare('SELECT * FROM memories').all(); - expect(all).toHaveLength(4); - }); - - test('dry-run includes memory details', () => { - - - const result = deleteMemories(db, { - ids: [1], - dryRun: true - }); - - expect(result.memories[0]).toHaveProperty('id'); - expect(result.memories[0]).toHaveProperty('content'); - expect(result.memories[0]).toHaveProperty('created_at'); - }); - }); - - describe('Safety Features', () => { - test('requires at least one filter criterion', () => { - - - expect(() => { - deleteMemories(db, {}); - }).toThrow('At least one filter criterion is required'); - }); - - test('handles empty result set gracefully', () => { - - - const result = deleteMemories(db, { tags: ['nonexistent'] }); - - expect(result.count).toBe(0); - expect(result.deleted).toBe(true); - }); - }); - - describe('Combination Filters', () => { - test('combines tags + query', () => { - - - const result = deleteMemories(db, { - tags: ['test'], - query: 'memory 1' - }); - - expect(result.count).toBe(1); // Only "Test memory 1" - }); - - test('combines agent + date range', () => { - - - const now = Date.now(); - const yesterday = Math.floor((now - 86400000) / 1000); - - db.prepare('UPDATE memories SET created_at = ? WHERE id = 1').run(yesterday); - - const result = deleteMemories(db, { - entered_by: 'test-agent', - after: yesterday - 3600 - }); - - expect(result.count).toBeGreaterThanOrEqual(1); - }); - - test('combines all filter types', () => { - - - const result = deleteMemories(db, { - tags: ['test'], - query: 'memory', - entered_by: 'test-agent', - dryRun: true - }); - - expect(result.count).toBe(2); - expect(result.deleted).toBe(false); - }); - }); -}); diff --git a/shared/linked-dotfiles/opencode/opencode.jsonc b/shared/linked-dotfiles/opencode/opencode.jsonc deleted file mode 100644 index 61b540f..0000000 --- a/shared/linked-dotfiles/opencode/opencode.jsonc +++ /dev/null @@ -1,20 +0,0 @@ -{ - "$schema": "https://opencode.ai/config.json", - "theme": "catppuccin", - "model": "anthropic/claude-sonnet-4-5", - "autoupdate": false, - "plugin": [], // local plugins do not need to be added here - "agent": {}, - "mcp": { - "atlassian-mcp-server": { - "type": "local", - "command": [ - "npx", - "-y", - "mcp-remote", - "https://mcp.atlassian.com/v1/sse" - ], - "enabled": false - } - } -} \ No newline at end of file diff --git a/shared/linked-dotfiles/opencode/opencode.png b/shared/linked-dotfiles/opencode/opencode.png deleted file mode 100644 index d7f1c9edb63f8a6e7dd490600bdae6ca9d689349..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1269 zcmeAS@N?(olHy`uVBq!ia0vp^w}7~xg9%6q+XS6pU|`wf>EaktG3V`F-wc^lhGQRR zmTW({?W)g`HFa3VMC)MJ^y>@ZC|3#~E ztD{=~%PO|WU`I=ShN;b$mpYMZZS-_qwBufza6_37d+s?Ob-MknWtT}b=d04gjmquE zg=7tEEHdmaWSE>`ck61kTfe=uTPgbM^=IN@v*U8keSGq<^LpI8jO$ATuD*J@_`F@d z#fK-47suCDZOpB9lC6H%x^(HPhX<|)$8X6zRbLQ$z2?E&&G|OQb!%*so&J8h?ep?U z%H{5w2xksUC#=Zh^V*)TMVWPp7r$Q@y8T#Z{hAs4H`uIBZ;jeorFyPyV$wn5xCN2_ zr=4l4+G~@&i(~7n^QoVIDjsHheaO08hy7IXR0V_BmRYGrw*EbKVxN}lnYyWI)Un^hAAb(B3sSi66`uVeu|IIzq3Abk-nX)_Y{=eU^wqCV(Kf&*9 z$Ic6-!vDjiiWAeGpPRwF=K36UFa5Q@{%yOz|NQ4|h6+V}{9o-V5)p9HHK%Xg>tiuHs>9X3v7K&QQoq2Y1fL5IO+RpV`I_l1_iKLNJ$B5ki22{2 z)w{mB7VYtm)aP8jeEHwk*C$CA)V@7-P5kGEHi4oRtZ2fCeremdSvv|przKU-nHu%> zdvH1r`x$FhOcF-?Wey!|1`=>qsDp7@!qtms$=u& zR)r=z9j(9f;>F_rEqQ(&7q|=U)?iP;z?6N&cj<29>UXx1Mv$az@ggih|NP8->(76# z;uq!4y)FFeQjY41+56VNKk;zmwqDMyA$vE)td`{K5#g>q_CD6<)~fwx`#1b}AKp~D z-_m_Sa8v1g{%j6KcY343;Fs_G@)=~VmrC~@v-eo?;_C?xqxZn#jKR~@&t;ucLK6UC Cpf$1p diff --git a/shared/linked-dotfiles/opencode/package-lock.json b/shared/linked-dotfiles/opencode/package-lock.json deleted file mode 100644 index 206220a..0000000 --- a/shared/linked-dotfiles/opencode/package-lock.json +++ /dev/null @@ -1,138 +0,0 @@ -{ - "name": "opencode", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "dependencies": { - "@opencode-ai/plugin": "0.15.2", - "gray-matter": "^4.0.3" - } - }, - "node_modules/@opencode-ai/plugin": { - "version": "0.15.2", - "dependencies": { - "@opencode-ai/sdk": "0.15.2", - "zod": "4.1.8" - } - }, - "node_modules/@opencode-ai/sdk": { - "version": "0.15.2" - }, - "node_modules/argparse": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", - "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", - "license": "MIT", - "dependencies": { - "sprintf-js": "~1.0.2" - } - }, - "node_modules/esprima": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", - "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", - "license": "BSD-2-Clause", - "bin": { - "esparse": "bin/esparse.js", - "esvalidate": "bin/esvalidate.js" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/extend-shallow": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", - "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==", - "license": "MIT", - "dependencies": { - "is-extendable": "^0.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/gray-matter": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz", - "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==", - "license": "MIT", - "dependencies": { - "js-yaml": "^3.13.1", - "kind-of": "^6.0.2", - "section-matter": "^1.0.0", - "strip-bom-string": "^1.0.0" - }, - "engines": { - "node": ">=6.0" - } - }, - "node_modules/is-extendable": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", - "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/js-yaml": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", - "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", - "license": "MIT", - "dependencies": { - "argparse": "^1.0.7", - "esprima": "^4.0.0" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/kind-of": { - "version": "6.0.3", - "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz", - "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/section-matter": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz", - "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==", - "license": "MIT", - "dependencies": { - "extend-shallow": "^2.0.1", - "kind-of": "^6.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/sprintf-js": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", - "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", - "license": "BSD-3-Clause" - }, - "node_modules/strip-bom-string": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz", - "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/zod": { - "version": "4.1.8", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/colinhacks" - } - } - } -} diff --git a/shared/linked-dotfiles/opencode/package.json b/shared/linked-dotfiles/opencode/package.json deleted file mode 100644 index 3683e05..0000000 --- a/shared/linked-dotfiles/opencode/package.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "dependencies": { - "@opencode-ai/plugin": "0.15.2", - "gray-matter": "^4.0.3" - } -} diff --git a/shared/linked-dotfiles/opencode/plugin/file-proxy.js b/shared/linked-dotfiles/opencode/plugin/file-proxy.js deleted file mode 100644 index f339dca..0000000 --- a/shared/linked-dotfiles/opencode/plugin/file-proxy.js +++ /dev/null @@ -1,176 +0,0 @@ -import { tool } from "@opencode-ai/plugin"; -import { readFile, writeFile, mkdir } from "fs/promises"; -import { resolve, normalize, join, dirname } from "path"; -import { homedir } from "os"; - -const CONFIG_BASE = resolve(homedir(), ".config/opencode"); - -function resolvePath(filePath) { - if (!filePath.startsWith('/') && !filePath.startsWith('~')) { - filePath = join(CONFIG_BASE, filePath); - } - - if (filePath.startsWith('~/')) { - filePath = filePath.replace('~/', homedir() + '/'); - } - - const normalized = normalize(resolve(filePath)); - - if (!normalized.startsWith(CONFIG_BASE)) { - throw new Error( - `Access denied: Path must be within ~/.config/opencode\n` + - `Attempted: ${normalized}\n` + - `Use config_read/config_write/config_edit ONLY for opencode config files.` - ); - } - - return normalized; -} - -export const FileProxyPlugin = async () => { - return { - tool: { - config_read: tool({ - description: `Read files from OpenCode config directory (~/.config/opencode). - -**REQUIRED when reading config files from outside ~/.config/opencode directory.** - -Use this tool when: -- Reading agent definitions (agent/*.md) -- Reading skills (skills/*/SKILL.md) -- Reading workflows (OPTIMIZATION_WORKFLOW.md, etc.) -- Current working directory is NOT ~/.config/opencode - -Do NOT use if already in ~/.config/opencode - use standard 'read' tool instead.`, - - args: { - filePath: tool.schema.string() - .describe("Path to file (e.g., 'agent/optimize.md' or '~/.config/opencode/skills/skill-name/SKILL.md')") - }, - - async execute(args) { - try { - const validPath = resolvePath(args.filePath); - const content = await readFile(validPath, "utf-8"); - return content; - } catch (error) { - if (error.code === 'ENOENT') { - return `❌ File not found: ${args.filePath}\nCheck path and try again.`; - } - if (error.code === 'EACCES') { - return `❌ Permission denied: ${args.filePath}`; - } - if (error.message.includes('Access denied')) { - return `❌ ${error.message}`; - } - return `❌ Error reading file: ${error.message}`; - } - } - }), - - config_write: tool({ - description: `Write/create files in OpenCode config directory (~/.config/opencode). - -**REQUIRED when creating/writing config files from outside ~/.config/opencode directory.** - -Use this tool when: -- Creating new skills (skills/new-skill/SKILL.md) -- Creating new agent definitions -- Writing workflow documentation -- Current working directory is NOT ~/.config/opencode - -Common use case: Optimize agent creating skills or updating workflows from project directories. - -Do NOT use if already in ~/.config/opencode - use standard 'write' tool instead.`, - - args: { - filePath: tool.schema.string() - .describe("Path to file (e.g., 'skills/my-skill/SKILL.md')"), - content: tool.schema.string() - .describe("Complete file content to write") - }, - - async execute(args) { - try { - const validPath = resolvePath(args.filePath); - await mkdir(dirname(validPath), { recursive: true }); - await writeFile(validPath, args.content, "utf-8"); - return `✅ Successfully wrote to ${args.filePath}`; - } catch (error) { - if (error.code === 'EACCES') { - return `❌ Permission denied: ${args.filePath}`; - } - if (error.message.includes('Access denied')) { - return `❌ ${error.message}`; - } - return `❌ Error writing file: ${error.message}`; - } - } - }), - - config_edit: tool({ - description: `Edit existing files in OpenCode config directory (~/.config/opencode). - -**REQUIRED when editing config files from outside ~/.config/opencode directory.** - -Use this tool when: -- Updating agent definitions (adding sections to optimize.md) -- Enhancing existing skills -- Modifying workflow docs -- Current working directory is NOT ~/.config/opencode - -Operations: append (add to end), prepend (add to beginning), replace (find and replace text). - -Do NOT use if already in ~/.config/opencode - use standard 'edit' tool instead.`, - - args: { - filePath: tool.schema.string() - .describe("Path to file to edit"), - operation: tool.schema.enum(["append", "prepend", "replace"]) - .describe("Edit operation to perform"), - content: tool.schema.string() - .describe("Content to add or replacement text"), - searchPattern: tool.schema.string() - .optional() - .describe("Regex pattern to find (required for 'replace' operation)") - }, - - async execute(args) { - try { - const validPath = resolvePath(args.filePath); - let fileContent = await readFile(validPath, "utf-8"); - - switch (args.operation) { - case "append": - fileContent += "\n" + args.content; - break; - case "prepend": - fileContent = args.content + "\n" + fileContent; - break; - case "replace": - if (!args.searchPattern) { - throw new Error("searchPattern required for replace operation"); - } - fileContent = fileContent.replace(new RegExp(args.searchPattern, "g"), args.content); - break; - } - - await writeFile(validPath, fileContent, "utf-8"); - return `✅ Successfully edited ${args.filePath} (${args.operation})`; - } catch (error) { - if (error.code === 'ENOENT') { - return `❌ File not found: ${args.filePath}\nUse config_write to create new files.`; - } - if (error.code === 'EACCES') { - return `❌ Permission denied: ${args.filePath}`; - } - if (error.message.includes('Access denied')) { - return `❌ ${error.message}`; - } - return `❌ Error editing file: ${error.message}`; - } - } - }) - } - }; -}; diff --git a/shared/linked-dotfiles/opencode/plugin/llmemory.js b/shared/linked-dotfiles/opencode/plugin/llmemory.js deleted file mode 100644 index 7bb93c8..0000000 --- a/shared/linked-dotfiles/opencode/plugin/llmemory.js +++ /dev/null @@ -1,147 +0,0 @@ -/** - * LLMemory Plugin for OpenCode - * - * Provides a persistent memory/journal system for AI agents. - * Memories are stored in SQLite and searchable across sessions. - */ -import { tool } from "@opencode-ai/plugin"; -import { spawn } from "child_process"; -import { fileURLToPath } from 'url'; -import { dirname, join } from 'path'; - -const __filename = fileURLToPath(import.meta.url); -const __dirname = dirname(__filename); -const MEMORY_CLI = join(__dirname, '../llmemory/bin/llmemory'); - -function runMemoryCommand(args) { - return new Promise((resolve, reject) => { - const child = spawn('node', [MEMORY_CLI, ...args], { - env: { ...process.env } - }); - - let stdout = ''; - let stderr = ''; - - child.stdout.on('data', (data) => { - stdout += data.toString(); - }); - - child.stderr.on('data', (data) => { - stderr += data.toString(); - }); - - child.on('close', (code) => { - if (code !== 0) { - reject(new Error(stderr || `Command failed with code ${code}`)); - } else { - resolve(stdout); - } - }); - }); -} - -export const LLMemoryPlugin = async (ctx) => { - const tools = { - memory_store: tool({ - description: `Store a memory for future reference. Use this to remember important information across sessions. - -Examples: -- Store implementation decisions: "Decided to use JWT for auth instead of sessions" -- Record completed work: "Implemented user authentication with email/password" -- Save debugging insights: "Bug was caused by race condition in async handler" -- Document project context: "Client prefers Material-UI over Tailwind" - -Memories are searchable by content and tags.`, - args: { - content: tool.schema.string() - .describe("The memory content to store (required)"), - tags: tool.schema.string() - .optional() - .describe("Comma-separated tags for categorization (e.g., 'backend,auth,security')"), - expires: tool.schema.string() - .optional() - .describe("Optional expiration date (ISO format, e.g., '2026-12-31')"), - by: tool.schema.string() - .optional() - .describe("Agent/user identifier (defaults to 'agent')") - }, - async execute(args) { - const cmdArgs = ['store', args.content]; - if (args.tags) cmdArgs.push('--tags', args.tags); - if (args.expires) cmdArgs.push('--expires', args.expires); - if (args.by) cmdArgs.push('--by', args.by); - - try { - const result = await runMemoryCommand(cmdArgs); - return result; - } catch (error) { - return `Error storing memory: ${error.message}`; - } - } - }), - - memory_search: tool({ - description: `Search stored memories by content and/or tags. Returns relevant memories from past sessions. - -Use cases: -- Find past decisions: "authentication" -- Recall debugging insights: "race condition" -- Look up project context: "client preferences" -- Review completed work: "implemented" - -Supports filtering by tags, date ranges, and limiting results.`, - args: { - query: tool.schema.string() - .describe("Search query (case-insensitive substring match)"), - tags: tool.schema.string() - .optional() - .describe("Filter by tags (AND logic, comma-separated)"), - any_tag: tool.schema.string() - .optional() - .describe("Filter by tags (OR logic, comma-separated)"), - limit: tool.schema.number() - .optional() - .describe("Maximum results to return (default: 10)") - }, - async execute(args) { - const cmdArgs = ['search', args.query, '--json']; - if (args.tags) cmdArgs.push('--tags', args.tags); - if (args.any_tag) cmdArgs.push('--any-tag', args.any_tag); - if (args.limit) cmdArgs.push('--limit', String(args.limit)); - - try { - const result = await runMemoryCommand(cmdArgs); - return result; - } catch (error) { - return `Error searching memories: ${error.message}`; - } - } - }), - - memory_list: tool({ - description: `List recent memories, optionally filtered by tags. Useful for reviewing recent work or exploring stored context.`, - args: { - limit: tool.schema.number() - .optional() - .describe("Maximum results to return (default: 20)"), - tags: tool.schema.string() - .optional() - .describe("Filter by tags (comma-separated)") - }, - async execute(args) { - const cmdArgs = ['list', '--json']; - if (args.limit) cmdArgs.push('--limit', String(args.limit)); - if (args.tags) cmdArgs.push('--tags', args.tags); - - try { - const result = await runMemoryCommand(cmdArgs); - return result; - } catch (error) { - return `Error listing memories: ${error.message}`; - } - } - }) - }; - - return { tool: tools }; -}; diff --git a/shared/linked-dotfiles/opencode/plugin/skills.js b/shared/linked-dotfiles/opencode/plugin/skills.js deleted file mode 100644 index c31677e..0000000 --- a/shared/linked-dotfiles/opencode/plugin/skills.js +++ /dev/null @@ -1,185 +0,0 @@ -/** - * OpenCode Skills Plugin (Local Version) - * - * Implements Anthropic's Agent Skills Specification (v1.0) for OpenCode. - * - * Modified to: - * - Only scan ~/.config/opencode/skills/ directory - * - Provide minimal logging (one-line summary) - * - * Original: https://github.com/malhashemi/opencode-skills - */ -import { tool } from "@opencode-ai/plugin"; -import matter from "gray-matter"; -import { Glob } from "bun"; -import { join, dirname, basename } from "path"; -import { z } from "zod"; -import os from "os"; - -const SkillFrontmatterSchema = z.object({ - name: z.string() - .regex(/^[a-z0-9-]+$/, "Name must be lowercase alphanumeric with hyphens") - .min(1, "Name cannot be empty"), - description: z.string() - .min(20, "Description must be at least 20 characters for discoverability"), - license: z.string().optional(), - "allowed-tools": z.array(z.string()).optional(), - metadata: z.record(z.string()).optional() -}); - -async function parseSkill(skillPath, baseDir) { - try { - const content = await Bun.file(skillPath).text(); - const { data, content: markdown } = matter(content); - - let frontmatter; - try { - frontmatter = SkillFrontmatterSchema.parse(data); - } - catch (error) { - return null; - } - - const skillDir = basename(dirname(skillPath)); - if (frontmatter.name !== skillDir) { - return null; - } - - return { - name: frontmatter.name, - fullPath: dirname(skillPath), - description: frontmatter.description, - allowedTools: frontmatter["allowed-tools"], - metadata: frontmatter.metadata, - license: frontmatter.license, - content: markdown.trim(), - path: skillPath - }; - } - catch (error) { - return null; - } -} - -async function discoverSkills(basePath) { - const skills = []; - - try { - const glob = new Glob("**/SKILL.md"); - for await (const match of glob.scan({ - cwd: basePath, - absolute: true - })) { - const skill = await parseSkill(match, basePath); - if (skill) { - skills.push(skill); - } - } - } - catch (error) { - // Directory doesn't exist, return empty array - } - - return skills; -} - -export const SkillsPlugin = async (ctx) => { - const xdgConfigHome = process.env.XDG_CONFIG_HOME; - const configSkillsPath = xdgConfigHome - ? join(xdgConfigHome, "opencode/skills") - : join(os.homedir(), ".config/opencode/skills"); - - const skills = await discoverSkills(configSkillsPath); - - if (skills.length > 0) { - console.log(`Skills loaded: ${skills.map(s => s.name).join(", ")}`); - } - - // Build skill catalog for tool description - const skillCatalog = skills.length > 0 - ? skills.map(s => `- **${s.name}**: ${s.description}`).join('\n') - : 'No skills available.'; - - // Create single learn_skill tool - const tools = { - learn_skill: tool({ - description: `Load and execute a skill on demand. Skills provide specialized knowledge and workflows for specific tasks. - -Available skills: - -${skillCatalog} - -Use this tool when you need guidance on these specialized workflows.`, - args: { - skill_name: tool.schema.string() - .describe("The name of the skill to learn (e.g., 'do-job', 'reflect', 'go-pr-review', 'create-skill')") - }, - async execute(args, toolCtx) { - const skill = skills.find(s => s.name === args.skill_name); - - if (!skill) { - const availableSkills = skills.map(s => s.name).join(', '); - return `❌ Error: Skill '${args.skill_name}' not found. - -Available skills: ${availableSkills} - -Use one of the available skill names exactly as shown above.`; - } - - return `# ⚠️ SKILL EXECUTION INSTRUCTIONS ⚠️ - -**SKILL NAME:** ${skill.name} -**SKILL DIRECTORY:** ${skill.fullPath}/ - -## EXECUTION WORKFLOW: - -**STEP 1: PLAN THE WORK** -Before executing this skill, use the \`todowrite\` tool to create a todo list of the main tasks described in the skill content below. -- Parse the skill instructions carefully -- Identify the key tasks and steps required -- Create todos with status "pending" and appropriate priority levels -- This helps track progress and ensures nothing is missed - -**STEP 2: EXECUTE THE SKILL** -Follow the skill instructions below, marking todos as "in_progress" when starting a task and "completed" when done. -Use \`todowrite\` to update task statuses as you work through them. - -## PATH RESOLUTION RULES (READ CAREFULLY): - -All file paths mentioned below are relative to the SKILL DIRECTORY shown above. - -**Examples:** -- If the skill mentions \`scripts/init_skill.py\`, the full path is: \`${skill.fullPath}/scripts/init_skill.py\` -- If the skill mentions \`references/docs.md\`, the full path is: \`${skill.fullPath}/references/docs.md\` -- If the skill mentions \`assets/template.html\`, the full path is: \`${skill.fullPath}/assets/template.html\` - -**IMPORTANT:** Always prepend \`${skill.fullPath}/\` to any relative path mentioned in the skill content below. - ---- - -# SKILL CONTENT: - -${skill.content} - ---- - -**Remember:** -1. All relative paths in the skill content above are relative to: \`${skill.fullPath}/\` -2. Update your todo list as you progress through the skill tasks -`; - } - }) - }; - - return { tool: tools }; -}; - -export const SkillLogger = async () => { - return { - "tool.execute.before": async (input, output) => { - if (input.tool === "learn_skill") { - console.log(`Learning skill ${output}`) - } - }, - } -} diff --git a/shared/linked-dotfiles/opencode/plugin/swaync-notifications.js b/shared/linked-dotfiles/opencode/plugin/swaync-notifications.js deleted file mode 100644 index 13a3c2c..0000000 --- a/shared/linked-dotfiles/opencode/plugin/swaync-notifications.js +++ /dev/null @@ -1,130 +0,0 @@ -const detectWindowManager = () => { - if (process.env.NIRI_SOCKET) return 'niri'; - if (process.env.HYPRLAND_INSTANCE_SIGNATURE) return 'hyprland'; - if (process.env.SWAYSOCK) return 'sway'; - return null; -}; - -const findWindow = async ($, wm, pid) => { - try { - switch (wm) { - case 'niri': { - const output = await $`niri msg --json windows`.text(); - const windows = JSON.parse(output); - const window = windows.find(w => w.app_id?.toLowerCase().includes('opencode') || w.title?.toLowerCase().includes('opencode')); - return window ? { id: window.id.toString(), type: 'niri' } : null; - } - case 'hyprland': { - const output = await $`hyprctl clients -j`.text(); - const clients = JSON.parse(output); - const window = clients.find(c => c.pid === pid || c.title?.toLowerCase().includes('opencode')); - return window ? { id: window.address, type: 'hyprland' } : null; - } - case 'sway': { - const output = await $`swaymsg -t get_tree`.text(); - const tree = JSON.parse(output); - const findNode = (node) => { - if (node.pid === pid || node.name?.toLowerCase().includes('opencode')) { - return node; - } - if (node.nodes) { - for (const child of node.nodes) { - const found = findNode(child); - if (found) return found; - } - } - if (node.floating_nodes) { - for (const child of node.floating_nodes) { - const found = findNode(child); - if (found) return found; - } - } - return null; - }; - const window = findNode(tree); - return window ? { id: window.id.toString(), type: 'sway' } : null; - } - default: - return null; - } - } catch (error) { - console.error(`Failed to find window for ${wm}:`, error); - return null; - } -}; - -const focusWindow = async ($, windowInfo) => { - try { - switch (windowInfo.type) { - case 'niri': - await $`niri msg action focus-window --id ${windowInfo.id}`; - break; - case 'hyprland': - await $`hyprctl dispatch focuswindow address:${windowInfo.id}`; - break; - case 'sway': - await $`swaymsg [con_id=${windowInfo.id}] focus`; - break; - } - } catch (error) { - console.error(`Failed to focus window:`, error); - } -}; - -export const SwayNotificationCenter = async ({ project, client, $, directory, worktree }) => { - const wm = detectWindowManager(); - console.log(`SwayNC notification plugin initialized (WM: ${wm || 'unknown'})`); - - return { - event: async ({ event }) => { - if (event.type === "session.idle") { - const pid = process.pid; - const dir = directory || worktree || "unknown"; - const iconPath = `${process.env.HOME}/.config/opencode/opencode.png`; - - try { - const windowInfo = wm ? await findWindow($, wm, pid) : null; - - const notifyCmd = [ - "notify-send", - "-a", "OpenCode", - "-u", "normal", - "-i", iconPath, - "-h", `string:x-opencode-dir:${dir}`, - ...(windowInfo ? ["-A", `focus=Focus Window`] : []), - "OpenCode Ready", - `Waiting for input\nDirectory: ${dir}` - ]; - - if (windowInfo) { - import("child_process").then(({ spawn }) => { - const child = spawn(notifyCmd[0], notifyCmd.slice(1), { - detached: true, - stdio: 'ignore' - }); - child.unref(); - - let output = ''; - if (child.stdout) { - child.stdout.on('data', (data) => { - output += data.toString(); - }); - child.on('close', () => { - if (output.trim() === "focus") { - focusWindow($, windowInfo).catch(() => {}); - } - }); - } - }).catch(() => {}); - } else { - $`${notifyCmd}`.catch(() => {}); - } - } catch (error) { - console.error("Notification error:", error); - - await $`notify-send -a "OpenCode" -u normal -i ${iconPath} "OpenCode Ready" "Waiting for input in ${dir}"`; - } - } - }, - }; -}; diff --git a/shared/linked-dotfiles/opencode/skills/create-skill/SKILL.md b/shared/linked-dotfiles/opencode/skills/create-skill/SKILL.md deleted file mode 100644 index 12cada8..0000000 --- a/shared/linked-dotfiles/opencode/skills/create-skill/SKILL.md +++ /dev/null @@ -1,869 +0,0 @@ ---- -name: create-skill -description: Use when creating new skills, editing existing skills, or planning skill architecture - provides comprehensive guidance on skill structure, discoverability, token efficiency, and best practices for writing skills that AI can find and use effectively ---- - -# Creating Skills - -## Overview - -Skills are reference guides for proven techniques, patterns, or tools. Good skills are concise, well-structured, discoverable, and help AI instances find and apply effective approaches. - -**Core principle:** Only add context AI doesn't already have. Challenge every token - assume Claude is smart and knows standard practices. - -## When to Create a Skill - -**Create when:** -- Technique wasn't intuitively obvious to you -- You'd reference this again across projects -- Pattern applies broadly (not project-specific) -- Others would benefit from this knowledge - -**Don't create for:** -- One-off solutions -- Standard practices well-documented elsewhere -- Project-specific conventions (put in CLAUDE.md instead) -- Obvious or trivial information - -## Skill Types - -### Technique -Concrete method with steps to follow - -**Examples:** condition-based-waiting, root-cause-tracing, defensive-programming - -**Test with:** Application scenarios, variation scenarios, missing information tests - -### Pattern -Way of thinking about problems - -**Examples:** flatten-with-flags, reducing-complexity, information-hiding - -**Test with:** Recognition scenarios, application scenarios, counter-examples - -### Reference -API docs, syntax guides, tool documentation - -**Examples:** API documentation, command references, library guides - -**Test with:** Retrieval scenarios, application scenarios, gap testing - -## Skill Structure Requirements - -### Directory Layout -``` -skill-name/ -├── SKILL.md # Required: Main skill file with frontmatter -├── scripts/ # Optional: Executable code -├── references/ # Optional: Documentation to load as needed -└── assets/ # Optional: Files used in output -``` - -### Naming Conventions -- **Directory name**: lowercase with hyphens only (e.g., `my-skill`) -- **Frontmatter name**: must exactly match directory name -- **Skill access**: Skills are loaded via the `learn_skill` tool with the skill name as an argument -- **Use gerund form (verb + -ing)**: `processing-pdfs`, `analyzing-data`, `creating-skills` -- **Avoid vague names**: "Helper", "Utils", "Tools" - -## Frontmatter Requirements - -### Required Fields -```yaml ---- -name: skill-name -description: Use when [specific triggers/symptoms] - [what it does and how it helps] ---- -``` - -### Constraints -- **Max 1024 characters total** for frontmatter -- **Only `name` and `description`** fields supported -- **Name**: letters, numbers, hyphens only (no parentheses, special chars) -- **Description target**: under 500 characters if possible - -### Writing Effective Descriptions - -**Critical for discovery:** AI reads description to decide which skills to load. - -**Format:** Start with "Use when..." to focus on triggering conditions - -**Include:** -- Concrete triggers, symptoms, and situations that signal this skill applies -- Describe the *problem* not *language-specific symptoms* (unless skill is tech-specific) -- Technology-agnostic triggers unless skill is technology-specific -- What the skill does and how it helps - -**Always write in third person** (injected into system prompt): - -**Good examples:** -```yaml -description: Use when tests have race conditions, timing dependencies, or pass/fail inconsistently - replaces arbitrary timeouts with condition polling for reliable async tests - -description: Use when using React Router and handling authentication redirects - provides patterns for protected routes and auth state management - -description: Extract text and tables from PDF files, fill forms, merge documents. Use when working with PDF files or when the user mentions PDFs, forms, or document extraction. -``` - -**Bad examples:** -```yaml -# Too abstract, no triggers -description: For async testing - -# First person -description: I can help you with async tests when they're flaky - -# Vague, no specifics -description: Helps with documents -``` - -## Core Principles - -### Concise is Key - -Context window is shared with everything else. Only add what AI doesn't already know. - -**Challenge each piece of information:** -- "Does Claude really need this explanation?" -- "Can I assume Claude knows this?" -- "Does this paragraph justify its token cost?" - -**Good (concise - ~50 tokens):** -````markdown -## Extract PDF text - -Use pdfplumber for text extraction: - -```python -import pdfplumber - -with pdfplumber.open("file.pdf") as pdf: - text = pdf.pages[0].extract_text() -``` -```` - -**Bad (verbose - ~150 tokens):** -```markdown -## Extract PDF text - -PDF (Portable Document Format) files are a common file format that contains text, images, and other content. To extract text from a PDF, you'll need to use a library. There are many libraries available for PDF processing, but we recommend pdfplumber because it's easy to use and handles most cases well. First, you'll need to install it using pip. Then you can use the code below... -``` - -### Set Appropriate Degrees of Freedom - -Match specificity to task fragility and variability. - -**Analogy:** Think of AI as a robot exploring a path: -- **Narrow bridge with cliffs**: Provide specific guardrails and exact instructions (low freedom) -- **Open field with no hazards**: Give general direction and trust AI to find best route (high freedom) - -**High freedom** (text-based instructions): - -Use when multiple approaches are valid, decisions depend on context, heuristics guide approach. - -```markdown -## Code review process - -1. Analyze the code structure and organization -2. Check for potential bugs or edge cases -3. Suggest improvements for readability and maintainability -4. Verify adherence to project conventions -``` - -**Medium freedom** (pseudocode or scripts with parameters): - -Use when a preferred pattern exists, some variation is acceptable, configuration affects behavior. - -````markdown -## Generate report - -Use this template and customize as needed: - -```python -def generate_report(data, format="markdown", include_charts=True): - # Process data - # Generate output in specified format - # Optionally include visualizations -``` -```` - -**Low freedom** (specific scripts, few or no parameters): - -Use when operations are fragile and error-prone, consistency is critical, specific sequence must be followed. - -````markdown -## Database migration - -Run exactly this script: - -```bash -python scripts/migrate.py --verify --backup -``` - -Do not modify the command or add additional flags. -```` - -## Content Structure - -### Recommended Template - -```markdown -# Skill Title - -Brief overview of the skill's purpose (1-2 sentences with core principle). - -## When to Use This Skill - -List specific symptoms and use cases: -- Use case 1 -- Use case 2 - -**When NOT to use:** -- Counter-example 1 -- Counter-example 2 - -## Core Pattern (for techniques/patterns) - -Before/after code comparison OR quick reference table for scanning - -## Quick Reference - -Table or bullets for common operations - -## Implementation - -Step-by-step guidance (inline for simple, link to files for complex) - -## Common Mistakes - -What goes wrong + fixes - -## Real-World Impact (optional) - -Concrete results showing effectiveness -``` - -### Progressive Disclosure - -SKILL.md serves as overview that points to detailed materials as needed. - -**Keep SKILL.md under 500 lines for optimal performance** - -**Pattern 1: High-level guide with references** - -````markdown ---- -name: pdf-processing -description: Extracts text and tables from PDF files, fills forms, merges documents. Use when working with PDF files or when the user mentions PDFs, forms, or document extraction. ---- - -# PDF Processing - -## Quick start - -Extract text with pdfplumber: -```python -import pdfplumber -with pdfplumber.open("file.pdf") as pdf: - text = pdf.pages[0].extract_text() -``` - -## Advanced features - -**Form filling**: See `references/forms.md` for complete guide -**API reference**: See `references/api.md` for all methods -**Examples**: See `references/examples.md` for common patterns -```` - -**Pattern 2: Domain-specific organization** - -Keep token usage low by organizing content so AI loads only relevant domains. - -``` -bigquery-skill/ -├── SKILL.md (overview and navigation) -└── references/ - ├── finance.md (revenue, billing metrics) - ├── sales.md (opportunities, pipeline) - ├── product.md (API usage, features) - └── marketing.md (campaigns, attribution) -``` - -**Pattern 3: Conditional details** - -Show basic content inline, link to advanced content: - -```markdown -# DOCX Processing - -## Creating documents - -Use docx-js for new documents. See `references/docx-js.md`. - -## Editing documents - -For simple edits, modify the XML directly. - -**For tracked changes**: See `references/redlining.md` -**For OOXML details**: See `references/ooxml.md` -``` - -**Avoid deeply nested references** - keep all reference files one level deep from SKILL.md. AI may partially read nested files, resulting in incomplete information. - -**Table of contents for long references** - Files >100 lines need TOC at top to enable previewing scope. - -## Skill Discovery Optimization - -Future AI needs to FIND your skill. Optimize for discovery. - -### Keyword Coverage - -Use words AI would search for: -- **Error messages**: "Hook timed out", "ENOTEMPTY", "race condition" -- **Symptoms**: "flaky", "hanging", "zombie", "pollution" -- **Synonyms**: "timeout/hang/freeze", "cleanup/teardown/afterEach" -- **Tools**: Actual commands, library names, file types - -### Descriptive Naming - -**Use active voice, verb-first (gerund form):** -- ✅ `creating-skills` not `skill-creation` -- ✅ `testing-async-code` not `async-test-helpers` -- ✅ `processing-pdfs` not `pdf-processor` - -### Token Efficiency - -**Target word counts:** -- Getting-started workflows: <150 words each -- Frequently-loaded skills: <200 words total -- Other skills: <500 words (still be concise) - -**Techniques:** - -**Move details to tool help:** -```bash -# ❌ BAD: Document all flags in SKILL.md -search-conversations supports --text, --both, --after DATE, --before DATE, --limit N - -# ✅ GOOD: Reference --help -search-conversations supports multiple modes and filters. Run --help for details. -``` - -**Use cross-references:** -```markdown -# ❌ BAD: Repeat workflow details -When searching, dispatch subagent with template... -[20 lines of repeated instructions] - -# ✅ GOOD: Reference other skill -Always use subagents (50-100x context savings). Use skill-name for workflow. -``` - -**Compress examples:** -```markdown -# ❌ BAD: Verbose (42 words) -your human partner: "How did we handle authentication errors in React Router before?" -You: I'll search past conversations for React Router authentication patterns. -[Dispatch subagent with search query: "React Router authentication error handling 401"] - -# ✅ GOOD: Minimal (20 words) -Partner: "How did we handle auth errors in React Router?" -You: Searching... -[Dispatch subagent → synthesis] -``` - -**Verification:** -```bash -wc -w skills/path/SKILL.md -``` - -### Cross-Referencing Other Skills - -Use skill name only, with explicit requirement markers: -- ✅ Good: `**REQUIRED:** Use skill-name-here` -- ✅ Good: `**REQUIRED BACKGROUND:** You MUST understand skill-name-here` -- ❌ Bad: `See skills/testing/test-driven-development` (unclear if required) - -**Why no @ links:** Force-loads files immediately, consuming context before needed. - -### Discovery Workflow - -How AI finds and uses your skill: - -1. **Encounters problem** ("tests are flaky") -2. **Searches descriptions** (keyword matching) -3. **Finds SKILL** (description matches symptoms) -4. **Scans overview** (is this relevant?) -5. **Reads patterns** (quick reference table) -6. **Loads example** (only when implementing) - -**Optimize for this flow** - put searchable terms early and often. - -## Workflows and Feedback Loops - -### Use Workflows for Complex Tasks - -Break complex operations into clear, sequential steps. Provide checklists AI can copy and check off. - -**Example 1: Research synthesis workflow** (no code): - -````markdown -## Research synthesis workflow - -Copy this checklist and track your progress: - -``` -Research Progress: -- [ ] Step 1: Read all source documents -- [ ] Step 2: Identify key themes -- [ ] Step 3: Cross-reference claims -- [ ] Step 4: Create structured summary -- [ ] Step 5: Verify citations -``` - -**Step 1: Read all source documents** - -Review each document in the `sources/` directory. Note the main arguments and supporting evidence. - -**Step 2: Identify key themes** - -Look for patterns across sources. What themes appear repeatedly? Where do sources agree or disagree? - -[Continue with detailed steps...] -```` - -**Example 2: PDF form filling workflow** (with code): - -````markdown -## PDF form filling workflow - -Copy this checklist and check off items as you complete them: - -``` -Task Progress: -- [ ] Step 1: Analyze the form (run analyze_form.py) -- [ ] Step 2: Create field mapping (edit fields.json) -- [ ] Step 3: Validate mapping (run validate_fields.py) -- [ ] Step 4: Fill the form (run fill_form.py) -- [ ] Step 5: Verify output (run verify_output.py) -``` - -**Step 1: Analyze the form** - -Run: `python scripts/analyze_form.py input.pdf` - -[Continue with detailed steps...] -```` - -### Implement Feedback Loops - -**Common pattern:** Run validator → fix errors → repeat - -**Example: Document editing process** - -```markdown -## Document editing process - -1. Make your edits to `word/document.xml` -2. **Validate immediately**: `python ooxml/scripts/validate.py unpacked_dir/` -3. If validation fails: - - Review the error message carefully - - Fix the issues in the XML - - Run validation again -4. **Only proceed when validation passes** -5. Rebuild: `python ooxml/scripts/pack.py unpacked_dir/ output.docx` -6. Test the output document -``` - -## Code Examples - -**One excellent example beats many mediocre ones** - -Choose most relevant language: -- Testing techniques → TypeScript/JavaScript -- System debugging → Shell/Python -- Data processing → Python - -**Good example characteristics:** -- Complete and runnable -- Well-commented explaining WHY -- From real scenario -- Shows pattern clearly -- Ready to adapt (not generic template) - -**Don't:** -- Implement in 5+ languages -- Create fill-in-the-blank templates -- Write contrived examples - -## Common Patterns - -### Template Pattern - -Provide templates for output format. Match strictness to needs. - -**For strict requirements:** - -````markdown -## Report structure - -ALWAYS use this exact template structure: - -```markdown -# [Analysis Title] - -## Executive summary -[One-paragraph overview of key findings] - -## Key findings -- Finding 1 with supporting data -- Finding 2 with supporting data - -## Recommendations -1. Specific actionable recommendation -2. Specific actionable recommendation -``` -```` - -**For flexible guidance:** - -````markdown -## Report structure - -Here is a sensible default format, but use your best judgment: - -```markdown -# [Analysis Title] - -## Executive summary -[Overview] - -## Key findings -[Adapt sections based on what you discover] -``` - -Adjust sections as needed for the specific analysis type. -```` - -### Examples Pattern - -For skills where output quality depends on seeing examples: - -````markdown -## Commit message format - -Generate commit messages following these examples: - -**Example 1:** -Input: Added user authentication with JWT tokens -Output: -``` -feat(auth): implement JWT-based authentication - -Add login endpoint and token validation middleware -``` - -**Example 2:** -Input: Fixed bug where dates displayed incorrectly in reports -Output: -``` -fix(reports): correct date formatting in timezone conversion - -Use UTC timestamps consistently across report generation -``` - -Follow this style: type(scope): brief description, then detailed explanation. -```` - -### Conditional Workflow Pattern - -Guide through decision points: - -```markdown -## Document modification workflow - -1. Determine the modification type: - - **Creating new content?** → Follow "Creation workflow" below - **Editing existing content?** → Follow "Editing workflow" below - -2. Creation workflow: - - Use docx-js library - - Build document from scratch - - Export to .docx format - -3. Editing workflow: - - Unpack existing document - - Modify XML directly - - Validate after each change - - Repack when complete -``` - -## Flowchart Usage - -**Use flowcharts ONLY for:** -- Non-obvious decision points -- Process loops where you might stop too early -- "When to use A vs B" decisions - -**Never use flowcharts for:** -- Reference material → Use tables, lists -- Code examples → Use markdown blocks -- Linear instructions → Use numbered lists - -See `references/graphviz-conventions.dot` for graphviz style rules. - -## Content Guidelines - -### Avoid Time-Sensitive Information - -Don't include information that will become outdated. - -**Bad (time-sensitive):** -```markdown -If you're doing this before August 2025, use the old API. -After August 2025, use the new API. -``` - -**Good (old patterns section):** -```markdown -## Current method - -Use the v2 API endpoint: `api.example.com/v2/messages` - -## Old patterns - -
-Legacy v1 API (deprecated 2025-08) - -The v1 API used: `api.example.com/v1/messages` - -This endpoint is no longer supported. -
-``` - -### Use Consistent Terminology - -Choose one term and use it throughout: - -**Good - Consistent:** -- Always "API endpoint" -- Always "field" -- Always "extract" - -**Bad - Inconsistent:** -- Mix "API endpoint", "URL", "API route", "path" -- Mix "field", "box", "element", "control" - -## File Organization - -### Self-Contained Skill -``` -defense-in-depth/ - SKILL.md # Everything inline -``` -When: All content fits, no heavy reference needed - -### Skill with Reusable Tool -``` -condition-based-waiting/ - SKILL.md # Overview + patterns - example.ts # Working helpers to adapt -``` -When: Tool is reusable code, not just narrative - -### Skill with Heavy Reference -``` -pptx/ - SKILL.md # Overview + workflows - references/ - pptxgenjs.md # 600 lines API reference - ooxml.md # 500 lines XML structure - scripts/ # Executable tools -``` -When: Reference material too large for inline - -## Anti-Patterns - -### ❌ Narrative Example -"In session 2025-10-03, we found empty projectDir caused..." - -**Why bad:** Too specific, not reusable - -### ❌ Multi-Language Dilution -example-js.js, example-py.py, example-go.go - -**Why bad:** Mediocre quality, maintenance burden - -### ❌ Code in Flowcharts -```dot -step1 [label="import fs"]; -step2 [label="read file"]; -``` - -**Why bad:** Can't copy-paste, hard to read - -### ❌ Generic Labels -helper1, helper2, step3, pattern4 - -**Why bad:** Labels should have semantic meaning - -## Evaluation and Iteration - -### Build Evaluations First - -Create evaluations BEFORE writing extensive documentation. - -**Evaluation-driven development:** - -1. **Identify gaps**: Run tasks without skill, document failures -2. **Create evaluations**: Build 3+ scenarios testing these gaps -3. **Establish baseline**: Measure performance without skill -4. **Write minimal instructions**: Create just enough to pass evaluations -5. **Iterate**: Execute evaluations, compare baseline, refine - -### Develop Skills Iteratively - -**Creating new skill:** - -1. **Complete task without skill**: Work through problem, notice what context you repeatedly provide -2. **Identify reusable pattern**: What context would be useful for similar tasks? -3. **Ask AI to create skill**: "Create a skill that captures this pattern we just used" -4. **Review for conciseness**: Challenge unnecessary explanations -5. **Improve information architecture**: Organize content effectively -6. **Test on similar tasks**: Use skill with fresh AI instance -7. **Iterate based on observation**: Refine based on what worked/didn't - -**Iterating on existing skill:** - -1. **Use skill in real workflows**: Give AI actual tasks -2. **Observe behavior**: Note struggles, successes, unexpected choices -3. **Request improvements**: Share observations with AI helper -4. **Review suggestions**: Consider reorganization, stronger language, restructuring -5. **Apply and test**: Update skill, test again -6. **Repeat based on usage**: Continue observe → refine cycle - -## Creating a New Skill - -### Step 1: Choose Location -- **Project-specific**: `.opencode/skills/skill-name/` -- **Global**: `~/.opencode/skills/skill-name/` - -### Step 2: Create Directory Structure -```bash -mkdir -p .opencode/skills/skill-name -mkdir -p .opencode/skills/skill-name/references # if needed -mkdir -p .opencode/skills/skill-name/scripts # if needed -``` - -### Step 3: Create SKILL.md with Frontmatter - -Follow requirements in Frontmatter Requirements section above. - -### Step 4: Write Skill Content - -Structure content following Content Structure section above. - -### Step 5: Add Supporting Files - -Organize by type: -- `scripts/`: Executable code the skill might run -- `references/`: Documentation to reference -- `assets/`: Templates, configs, or output files - -### Step 6: Validate - -Check that: -- Directory name matches frontmatter `name` field -- Description is at least 20 characters -- Name uses only lowercase letters, numbers, and hyphens -- YAML frontmatter is valid -- Supporting file paths are relative, not absolute -- Word count appropriate for skill type - -**One Shot test** - -Insert the `skill_name` to verify the skill loads. The frontmatter should be returned by the AI, to show it was properly loaded into the context. - -```bash -opencode run "Use learn_skill with skill_name='' - load skill and give the frontmatter as the only output and abort, do not give any other output, this is a single run for testing." -``` - -This is the $skill_check_prompt - -### Step 7: Restart OpenCode - -Skills are loaded at startup. Restart OpenCode to register your new skill (the skill catalog in the `learn_skill` tool description will be updated). - -## Path Resolution - -When referencing files in SKILL.md, use relative paths: - -```markdown -Read the API docs in `references/api.md` -Run `scripts/deploy.sh` for deployment -``` - -The Agent will resolve these relative to the skill directory automatically. - -## Skill Creation Checklist - -**Planning:** -- [ ] Identified gaps or patterns worth capturing -- [ ] Determined skill type (Technique, Pattern, or Reference) -- [ ] Created evaluation scenarios -- [ ] Established baseline without skill - -**Structure:** -- [ ] Directory created in correct location -- [ ] Directory name is lowercase with hyphens -- [ ] Name uses gerund form (verb + -ing) if applicable -- [ ] SKILL.md file created -- [ ] Frontmatter includes required fields (name, description) -- [ ] Name in frontmatter matches directory name exactly -- [ ] Description starts with "Use when..." and includes triggers -- [ ] Description written in third person -- [ ] Description under 500 characters - -**Content:** -- [ ] Overview with core principle (1-2 sentences) -- [ ] "When to Use" section with symptoms and counter-examples -- [ ] Quick reference table or bullets -- [ ] Clear, actionable steps -- [ ] Common mistakes section -- [ ] One excellent code example (not multi-language) -- [ ] Keywords throughout for search -- [ ] Consistent terminology -- [ ] No time-sensitive information -- [ ] Appropriate degree of freedom - -**Progressive Disclosure:** -- [ ] SKILL.md under 500 lines -- [ ] Supporting files in subdirectories if needed -- [ ] References one level deep (not nested) -- [ ] Table of contents for files >100 lines -- [ ] File references use relative paths - -**Token Efficiency:** -- [ ] Challenged every paragraph for necessity -- [ ] Word count appropriate for skill type -- [ ] Compressed examples where possible -- [ ] Cross-references instead of repetition -- [ ] No generic or obvious explanations - -**Testing:** -- [ ] Skill appears in `opencode run "$skill_check_prompt"` output -- [ ] Evaluations pass with skill present -- [ ] Tested on similar tasks with fresh AI instance -- [ ] Observed and refined based on usage -- [ ] Skill appears in `learn_skill` tool's skill catalog - -**Deployment:** -- [ ] OpenCode restarted to load new skill -- [ ] Verified skill is discoverable via one-shot test with `learn_skill` -- [ ] Documented in project if applicable - -## Reference Files - -- `references/graphviz-conventions.dot`: Flowchart style guide and conventions -- `references/persuasion-principles.md`: Psychology for effective skill design diff --git a/shared/linked-dotfiles/opencode/skills/create-skill/references/graphviz-conventions.dot b/shared/linked-dotfiles/opencode/skills/create-skill/references/graphviz-conventions.dot deleted file mode 100644 index f02d8d9..0000000 --- a/shared/linked-dotfiles/opencode/skills/create-skill/references/graphviz-conventions.dot +++ /dev/null @@ -1,172 +0,0 @@ -digraph STYLE_GUIDE { - // The style guide for our process DSL, written in the DSL itself - - // Node type examples with their shapes - subgraph cluster_node_types { - label="NODE TYPES AND SHAPES"; - - // Questions are diamonds - "Is this a question?" [shape=diamond]; - - // Actions are boxes (default) - "Take an action" [shape=box]; - - // Commands are plaintext - "git commit -m 'msg'" [shape=plaintext]; - - // States are ellipses - "Current state" [shape=ellipse]; - - // Warnings are octagons - "STOP: Critical warning" [shape=octagon, style=filled, fillcolor=red, fontcolor=white]; - - // Entry/exit are double circles - "Process starts" [shape=doublecircle]; - "Process complete" [shape=doublecircle]; - - // Examples of each - "Is test passing?" [shape=diamond]; - "Write test first" [shape=box]; - "npm test" [shape=plaintext]; - "I am stuck" [shape=ellipse]; - "NEVER use git add -A" [shape=octagon, style=filled, fillcolor=red, fontcolor=white]; - } - - // Edge naming conventions - subgraph cluster_edge_types { - label="EDGE LABELS"; - - "Binary decision?" [shape=diamond]; - "Yes path" [shape=box]; - "No path" [shape=box]; - - "Binary decision?" -> "Yes path" [label="yes"]; - "Binary decision?" -> "No path" [label="no"]; - - "Multiple choice?" [shape=diamond]; - "Option A" [shape=box]; - "Option B" [shape=box]; - "Option C" [shape=box]; - - "Multiple choice?" -> "Option A" [label="condition A"]; - "Multiple choice?" -> "Option B" [label="condition B"]; - "Multiple choice?" -> "Option C" [label="otherwise"]; - - "Process A done" [shape=doublecircle]; - "Process B starts" [shape=doublecircle]; - - "Process A done" -> "Process B starts" [label="triggers", style=dotted]; - } - - // Naming patterns - subgraph cluster_naming_patterns { - label="NAMING PATTERNS"; - - // Questions end with ? - "Should I do X?"; - "Can this be Y?"; - "Is Z true?"; - "Have I done W?"; - - // Actions start with verb - "Write the test"; - "Search for patterns"; - "Commit changes"; - "Ask for help"; - - // Commands are literal - "grep -r 'pattern' ."; - "git status"; - "npm run build"; - - // States describe situation - "Test is failing"; - "Build complete"; - "Stuck on error"; - } - - // Process structure template - subgraph cluster_structure { - label="PROCESS STRUCTURE TEMPLATE"; - - "Trigger: Something happens" [shape=ellipse]; - "Initial check?" [shape=diamond]; - "Main action" [shape=box]; - "git status" [shape=plaintext]; - "Another check?" [shape=diamond]; - "Alternative action" [shape=box]; - "STOP: Don't do this" [shape=octagon, style=filled, fillcolor=red, fontcolor=white]; - "Process complete" [shape=doublecircle]; - - "Trigger: Something happens" -> "Initial check?"; - "Initial check?" -> "Main action" [label="yes"]; - "Initial check?" -> "Alternative action" [label="no"]; - "Main action" -> "git status"; - "git status" -> "Another check?"; - "Another check?" -> "Process complete" [label="ok"]; - "Another check?" -> "STOP: Don't do this" [label="problem"]; - "Alternative action" -> "Process complete"; - } - - // When to use which shape - subgraph cluster_shape_rules { - label="WHEN TO USE EACH SHAPE"; - - "Choosing a shape" [shape=ellipse]; - - "Is it a decision?" [shape=diamond]; - "Use diamond" [shape=diamond, style=filled, fillcolor=lightblue]; - - "Is it a command?" [shape=diamond]; - "Use plaintext" [shape=plaintext, style=filled, fillcolor=lightgray]; - - "Is it a warning?" [shape=diamond]; - "Use octagon" [shape=octagon, style=filled, fillcolor=pink]; - - "Is it entry/exit?" [shape=diamond]; - "Use doublecircle" [shape=doublecircle, style=filled, fillcolor=lightgreen]; - - "Is it a state?" [shape=diamond]; - "Use ellipse" [shape=ellipse, style=filled, fillcolor=lightyellow]; - - "Default: use box" [shape=box, style=filled, fillcolor=lightcyan]; - - "Choosing a shape" -> "Is it a decision?"; - "Is it a decision?" -> "Use diamond" [label="yes"]; - "Is it a decision?" -> "Is it a command?" [label="no"]; - "Is it a command?" -> "Use plaintext" [label="yes"]; - "Is it a command?" -> "Is it a warning?" [label="no"]; - "Is it a warning?" -> "Use octagon" [label="yes"]; - "Is it a warning?" -> "Is it entry/exit?" [label="no"]; - "Is it entry/exit?" -> "Use doublecircle" [label="yes"]; - "Is it entry/exit?" -> "Is it a state?" [label="no"]; - "Is it a state?" -> "Use ellipse" [label="yes"]; - "Is it a state?" -> "Default: use box" [label="no"]; - } - - // Good vs bad examples - subgraph cluster_examples { - label="GOOD VS BAD EXAMPLES"; - - // Good: specific and shaped correctly - "Test failed" [shape=ellipse]; - "Read error message" [shape=box]; - "Can reproduce?" [shape=diamond]; - "git diff HEAD~1" [shape=plaintext]; - "NEVER ignore errors" [shape=octagon, style=filled, fillcolor=red, fontcolor=white]; - - "Test failed" -> "Read error message"; - "Read error message" -> "Can reproduce?"; - "Can reproduce?" -> "git diff HEAD~1" [label="yes"]; - - // Bad: vague and wrong shapes - bad_1 [label="Something wrong", shape=box]; // Should be ellipse (state) - bad_2 [label="Fix it", shape=box]; // Too vague - bad_3 [label="Check", shape=box]; // Should be diamond - bad_4 [label="Run command", shape=box]; // Should be plaintext with actual command - - bad_1 -> bad_2; - bad_2 -> bad_3; - bad_3 -> bad_4; - } -} diff --git a/shared/linked-dotfiles/opencode/skills/create-skill/references/persuasion-principles.md b/shared/linked-dotfiles/opencode/skills/create-skill/references/persuasion-principles.md deleted file mode 100644 index 5950d95..0000000 --- a/shared/linked-dotfiles/opencode/skills/create-skill/references/persuasion-principles.md +++ /dev/null @@ -1,187 +0,0 @@ -# Persuasion Principles for Skill Design - -## Overview - -AI models respond to the same persuasion principles as humans. Understanding this psychology helps you design more effective skills - not to manipulate, but to ensure critical practices are followed even under pressure. - -**Research foundation:** Meincke et al. (2025) tested 7 persuasion principles with N=28,000 AI conversations. Persuasion techniques more than doubled compliance rates (33% → 72%, p < .001). - -## The Seven Principles - -### 1. Authority -**What it is:** Deference to expertise, credentials, or official sources. - -**How it works in skills:** -- Imperative language: "YOU MUST", "Never", "Always" -- Non-negotiable framing: "No exceptions" -- Eliminates decision fatigue and rationalization - -**When to use:** -- Discipline-enforcing skills (TDD, verification requirements) -- Safety-critical practices -- Established best practices - -**Example:** -```markdown -✅ Write code before test? Delete it. Start over. No exceptions. -❌ Consider writing tests first when feasible. -``` - -### 2. Commitment -**What it is:** Consistency with prior actions, statements, or public declarations. - -**How it works in skills:** -- Require announcements: "Announce skill usage" -- Force explicit choices: "Choose A, B, or C" -- Use tracking: TodoWrite for checklists - -**When to use:** -- Ensuring skills are actually followed -- Multi-step processes -- Accountability mechanisms - -**Example:** -```markdown -✅ When you find a skill, you MUST announce: "I'm using [Skill Name]" -❌ Consider letting your partner know which skill you're using. -``` - -### 3. Scarcity -**What it is:** Urgency from time limits or limited availability. - -**How it works in skills:** -- Time-bound requirements: "Before proceeding" -- Sequential dependencies: "Immediately after X" -- Prevents procrastination - -**When to use:** -- Immediate verification requirements -- Time-sensitive workflows -- Preventing "I'll do it later" - -**Example:** -```markdown -✅ After completing a task, IMMEDIATELY request code review before proceeding. -❌ You can review code when convenient. -``` - -### 4. Social Proof -**What it is:** Conformity to what others do or what's considered normal. - -**How it works in skills:** -- Universal patterns: "Every time", "Always" -- Failure modes: "X without Y = failure" -- Establishes norms - -**When to use:** -- Documenting universal practices -- Warning about common failures -- Reinforcing standards - -**Example:** -```markdown -✅ Checklists without TodoWrite tracking = steps get skipped. Every time. -❌ Some people find TodoWrite helpful for checklists. -``` - -### 5. Unity -**What it is:** Shared identity, "we-ness", in-group belonging. - -**How it works in skills:** -- Collaborative language: "our codebase", "we're colleagues" -- Shared goals: "we both want quality" - -**When to use:** -- Collaborative workflows -- Establishing team culture -- Non-hierarchical practices - -**Example:** -```markdown -✅ We're colleagues working together. I need your honest technical judgment. -❌ You should probably tell me if I'm wrong. -``` - -### 6. Reciprocity -**What it is:** Obligation to return benefits received. - -**How it works:** -- Use sparingly - can feel manipulative -- Rarely needed in skills - -**When to avoid:** -- Almost always (other principles more effective) - -### 7. Liking -**What it is:** Preference for cooperating with those we like. - -**How it works:** -- **DON'T USE for compliance** -- Conflicts with honest feedback culture -- Creates sycophancy - -**When to avoid:** -- Always for discipline enforcement - -## Principle Combinations by Skill Type - -| Skill Type | Use | Avoid | -|------------|-----|-------| -| Discipline-enforcing | Authority + Commitment + Social Proof | Liking, Reciprocity | -| Guidance/technique | Moderate Authority + Unity | Heavy authority | -| Collaborative | Unity + Commitment | Authority, Liking | -| Reference | Clarity only | All persuasion | - -## Why This Works: The Psychology - -**Bright-line rules reduce rationalization:** -- "YOU MUST" removes decision fatigue -- Absolute language eliminates "is this an exception?" questions -- Explicit anti-rationalization counters close specific loopholes - -**Implementation intentions create automatic behavior:** -- Clear triggers + required actions = automatic execution -- "When X, do Y" more effective than "generally do Y" -- Reduces cognitive load on compliance - -**AI models are parahuman:** -- Trained on human text containing these patterns -- Authority language precedes compliance in training data -- Commitment sequences (statement → action) frequently modeled -- Social proof patterns (everyone does X) establish norms - -## Ethical Use - -**Legitimate:** -- Ensuring critical practices are followed -- Creating effective documentation -- Preventing predictable failures - -**Illegitimate:** -- Manipulating for personal gain -- Creating false urgency -- Guilt-based compliance - -**The test:** Would this technique serve the user's genuine interests if they fully understood it? - -## Research Citations - -**Cialdini, R. B. (2021).** *Influence: The Psychology of Persuasion (New and Expanded).* Harper Business. -- Seven principles of persuasion -- Empirical foundation for influence research - -**Meincke, L., Shapiro, D., Duckworth, A. L., Mollick, E., Mollick, L., & Cialdini, R. (2025).** Call Me A Jerk: Persuading AI to Comply with Objectionable Requests. University of Pennsylvania. -- Tested 7 principles with N=28,000 AI conversations -- Compliance increased 33% → 72% with persuasion techniques -- Authority, commitment, scarcity most effective -- Validates parahuman model of AI behavior - -## Quick Reference - -When designing a skill, ask: - -1. **What type is it?** (Discipline vs. guidance vs. reference) -2. **What behavior am I trying to change?** -3. **Which principle(s) apply?** (Usually authority + commitment for discipline) -4. **Am I combining too many?** (Don't use all seven) -5. **Is this ethical?** (Serves user's genuine interests?) diff --git a/shared/linked-dotfiles/opencode/skills/do-job/SKILL.md b/shared/linked-dotfiles/opencode/skills/do-job/SKILL.md deleted file mode 100644 index b6dfca6..0000000 --- a/shared/linked-dotfiles/opencode/skills/do-job/SKILL.md +++ /dev/null @@ -1,670 +0,0 @@ ---- -name: do-job -description: Use when starting work on Jira tickets - fetches To Do tickets, creates git worktrees with proper PI-XXXXX naming, implements features using TDD (test-first), commits with ticket references, creates draft PRs, validates work with PR review, and transitions tickets through workflow to Testing ---- - -# Do Job - -Complete developer workflow from ticket selection to validated draft PR using TDD and git worktrees. - -## When to Use This Skill - -Use when: -- Starting work on a new Jira ticket -- Implementing features using test-driven development -- Creating PRs for Jira-tracked work - -## Workflow Selection - -**CRITICAL: Choose workflow based on ticket type** - -- **Regular tickets (Story, Task, Bug)**: Use Standard Implementation Workflow below -- **SPIKE tickets (Investigation/Research)**: Use SPIKE Investigation Workflow - -To determine ticket type, check the `issueTypeName` field when fetching the ticket. - -## Standard Implementation Workflow - -Copy and track progress: - -``` -Ticket Workflow Progress: -- [ ] Step 1: Fetch and select To Do ticket -- [ ] Step 2: Move ticket to In Progress -- [ ] Step 3: Set up git worktree -- [ ] Step 4: Write failing tests (TDD) -- [ ] Step 5: Implement feature/fix -- [ ] Step 6: Verify tests pass -- [ ] Step 7: Review work with developer -- [ ] Step 8: Commit with PI-XXXXX reference -- [ ] Step 9: Push branch -- [ ] Step 10: Create draft PR -- [ ] Step 11: Review work with PR reviewer -- [ ] Step 12: Link PR to ticket -- [ ] Step 13: Session reflection -``` - -## SPIKE Investigation Workflow - -**Use this workflow when ticket type is SPIKE** - -SPIKE tickets are for investigation and research only. No code changes, no PRs. - -``` -SPIKE Workflow Progress: -- [ ] Step 1: Fetch and select SPIKE ticket -- [ ] Step 2: Move ticket to In Progress -- [ ] Step 3: Add investigation start comment -- [ ] Step 4: Invoke investigate agent for research -- [ ] Step 5: Review findings with developer -- [ ] Step 6: Document findings in ticket -- [ ] Step 7: Create follow-up tickets (with approval) -- [ ] Step 8: Link follow-up tickets to SPIKE -- [ ] Step 9: Move SPIKE to Done -- [ ] Step 10: Session reflection -``` - -**Jump to SPIKE Workflow Steps section below for SPIKE-specific instructions.** - -## Prerequisites - -Verify environment: - -```bash -# Check Jira access -atlassian-mcp-server_getAccessibleAtlassianResources - -# Check GitHub CLI -gh auth status - -# Verify in repos root directory -ls -d */ -``` - -## Step 1: Fetch and Select Ticket - -```bash -# Get To Do tickets -atlassian-mcp-server_searchJiraIssuesUsingJql \ - cloudId="" \ - jql="assignee = currentUser() AND status = 'To Do' ORDER BY priority DESC, updated DESC" \ - fields='["summary", "description", "status", "priority"]' -``` - -Review tickets and select one based on priority and description. - -## Step 2: Move Ticket to In Progress - -```bash -atlassian-mcp-server_transitionJiraIssue \ - cloudId="" \ - issueIdOrKey="PI-XXXXX" \ - transition='{"id": "41"}' # In Progress transition ID -``` - -Add comment explaining work start: - -```bash -atlassian-mcp-server_addCommentToJiraIssue \ - cloudId="" \ - issueIdOrKey="PI-XXXXX" \ - commentBody="Starting work on this ticket using TDD approach" -``` - -## Step 3: Set Up Git Worktree - -**CRITICAL: All branches and commits MUST include PI-XXXXX ticket number** - -### Identify Repository - -From repos root directory, determine which repo based on ticket description and summary. - -### Create Worktree - -```bash -# Navigate to develop branch of target repo -cd /develop - -# Pull latest changes -git pull - -# Create worktree with proper naming -# Pattern: ../ with branch nate/PI-XXXXX_descriptive-name -git worktree add ../ -b nate/PI-XXXXX_descriptive-name - -# Navigate to new worktree -cd ../ -``` - -**Naming conventions:** -- Directory: Short descriptive name (e.g., `rename-folder-fix`) -- Branch: `nate/PI-XXXXX_descriptive-name` (e.g., `nate/PI-70535_rename-folder-fix`) -- **MUST include PI-XXXXX** in branch name - -## Step 4: Build Implementation Plan - -Analyze ticket requirements and create TDD plan: - -1. **Understand requirements** from ticket description -2. **Identify affected code** areas -3. **Plan test cases** covering: - - Happy path - - Edge cases - - Error conditions -4. **Plan implementation** approach - -## Step 5: TDD Implementation - -### Write Failing Tests First - -**CRITICAL: Write tests BEFORE implementation** - -```bash -# Identify test framework from repo -ls *_test.* test/ tests/ __tests__/ - -# Create or modify test file -# Write test that validates missing functionality -``` - -Test should: -- Clearly describe expected behavior -- Cover the specific bug/feature from ticket -- **Fail initially** (validates test is working) - -### Run Failing Tests - -```bash -# Verify test fails (proves test is valid) - -``` - -Expected: Test fails with clear error showing missing functionality. - -### Implement Feature/Fix - -Write minimal code to make test pass: - -```bash -# Implement the feature or fix -# Focus on making test pass, not perfection -``` - -### Run Tests Again - -```bash -# Verify test now passes - -``` - -Expected: All tests pass. - -### Refactor (if needed) - -Clean up implementation while keeping tests passing: - -- Improve code clarity -- Remove duplication -- Follow project conventions -- **Keep tests passing** - -## Step 6: Verify Complete Solution - -```bash -# Run full test suite - - -# Run linting (if available) - - -# Run type checking (if available) - -``` - -All checks must pass before proceeding. - -## Step 7: Commit Changes - -**CRITICAL: Commit message MUST include PI-XXXXX** - -```bash -# Stage changes -git add . - -# Commit with ticket reference -git commit -m "PI-XXXXX: - - -- What was changed -- Why it was changed -- How it addresses the ticket" -``` - -**Commit message format:** -- First line: `PI-XXXXX: ` (50 chars max) -- Blank line -- Detailed description explaining the why and how -- Reference ticket number (PI-XXXXX) in first line - -## Step 8: Push Branch - -```bash -# Push to remote -git push -u origin nate/PI-XXXXX_descriptive-name -``` - -## Step 9: Create Draft PR - -```bash -# Create draft PR with gh CLI -gh pr create \ - --draft \ - --title "PI-XXXXX: " \ - --body "$(cat <<'PRBODY' -## Summary -- Fixes issue described in PI-XXXXX -- Implements using TDD approach - -## Changes -- Added tests for -- Implemented -- Verified all tests pass - -## Testing -- [x] Unit tests added and passing -- [x] Linting passes -- [x] Manual testing completed - -## Jira -Related ticket: PI-XXXXX - -## Notes -Ready for review. Once approved, will move ticket to Pull Request status. -PRBODY -)" -``` - -**Save PR URL** returned by command for next steps. - -## Step 10: Review Work with PR Reviewer - -**CRITICAL: Invoke @pr-reviewer subagent to validate work before linking to ticket** - -```bash -# Invoke the pr-reviewer subagent -@pr-reviewer please review the PR I just created -``` - -The pr-reviewer will: -- Verify the repository is compatible (Go with Makefile) -- Run all validation commands (tests, linting, type checking) -- Review code quality against project standards -- Check for security issues and best practices -- Provide verdict: Ready for review OR needs work - -**If pr-reviewer finds issues:** -1. Address the critical issues identified -2. Re-run tests and validations -3. Commit fixes with `PI-XXXXX: Address PR review feedback` -4. Push updates -5. Invoke @pr-reviewer again to re-validate - -**Only proceed to Step 11 when pr-reviewer gives approval.** - -## Step 11: Link PR to Ticket - -```bash -# Add PR link to Jira ticket -atlassian-mcp-server_addCommentToJiraIssue \ - cloudId="" \ - issueIdOrKey="PI-XXXXX" \ - commentBody="Draft PR created: - -Implementation complete using TDD approach. Ready for code review." -``` - -## Step 12: Session Reflection and Optimization - -**CRITICAL: After completing the ticket workflow, reflect and optimize the system** - -### Two-Stage Process - -**Stage 1: Analysis** - Use `learn_skill(reflect)` for session analysis -- Reviews conversation history and workflow -- Identifies preventable friction (auth issues, environment setup, missing docs) -- Distinguishes from expected development work (debugging, testing) -- Provides structured findings with 1-3 high-impact improvements - -**Stage 2: Implementation** - Invoke `@optimize` agent to take action -- Takes reflection findings and implements changes automatically -- Updates CLAUDE.md/AGENTS.md with missing commands/docs -- Creates or updates skills based on patterns identified -- Adds shell alias recommendations for repeated commands -- Commits all changes with clear messages - -**Only proceed with reflection after:** -- PR is created and validated -- PR review subagent gives go ahead - -**No Improvements is OK** - -Do not reach for fixing things that are already solved. If there are systemic problems, then address them, otherwise, continue on. - -**Example workflow**: -``` -1. learn_skill(reflect) → produces analysis -2. Review findings -3. @optimize → implements improvements automatically -4. System is now better for next session -``` - ---- - -## SPIKE Workflow Steps - -**These steps apply only to SPIKE tickets (investigation/research)** - -### Step 1: Fetch and Select SPIKE Ticket - -Same as standard workflow Step 1 - fetch To Do tickets and select one. - -Verify it's a SPIKE by checking `issueTypeName` field. - -### Step 2: Move Ticket to In Progress - -Same as standard workflow Step 2. - -```bash -atlassian-mcp-server_transitionJiraIssue \ - cloudId="" \ - issueIdOrKey="PI-XXXXX" \ - transition='{"id": "41"}' -``` - -### Step 3: Add Investigation Start Comment - -```bash -atlassian-mcp-server_addCommentToJiraIssue \ - cloudId="" \ - issueIdOrKey="PI-XXXXX" \ - commentBody="Starting SPIKE investigation - will explore multiple solution approaches and document findings" -``` - -### Step 4: Invoke Investigate Agent - -**CRITICAL: Use @investigate subagent for creative exploration** - -The investigate agent has higher temperature (0.8) for creative thinking. - -```bash -@investigate - -Context: SPIKE ticket PI-XXXXX -Summary: -Description: - -Please investigate this problem and: -1. Explore the current codebase to understand the problem space -2. Research 3-5 different solution approaches -3. Evaluate trade-offs for each approach -4. Document findings with specific code references (file:line) -5. Recommend the best approach with justification -6. Break down into actionable implementation task(s) - typically just 1 ticket -``` - -The investigate agent will: -- Explore codebase thoroughly -- Research multiple solution paths (3-5 approaches) -- Consider creative/unconventional approaches -- Evaluate trade-offs objectively -- Document specific code references -- Recommend best approach with justification -- Propose implementation plan (typically 1 follow-up ticket) - -### Step 5: Document Findings - -Create comprehensive investigation summary in Jira ticket: - -```bash -atlassian-mcp-server_addCommentToJiraIssue \ - cloudId="" \ - issueIdOrKey="PI-XXXXX" \ - commentBody="## Investigation Findings - -### Problem Analysis - - -### Approaches Considered - -1. **Approach A**: - - Pros: - - Cons: - - Effort: - - Code: - -2. **Approach B**: - - Pros: - - Cons: - - Effort: - - Code: - -3. **Approach C**: - - Pros: - - Cons: - - Effort: - - Code: - -[Continue for all 3-5 approaches] - -### Recommendation -**Recommended Approach**: - -**Justification**: - -**Risks**: - -**Confidence**: - -### Proposed Implementation - -Typically breaking this down into **1 follow-up ticket**: - -**Summary**: - -**Description**: - -**Acceptance Criteria**: -- [ ] -- [ ] -- [ ] - -**Effort Estimate**: - -### References -- -- -- -- " -``` - -### Step 6: Review Findings with Developer - -**CRITICAL: Get developer approval before creating tickets** - -Present investigation findings and proposed follow-up ticket(s) to developer: - -``` -Investigation complete for PI-XXXXX. - -Summary: -- Explored solution approaches -- Recommend: -- Propose: follow-up ticket(s) (typically 1) - -Proposed ticket: -- Summary: -- Effort: - -Would you like me to create this ticket, or would you like to adjust the plan? -``` - -**Wait for developer confirmation before proceeding.** - -Developer may: -- Approve ticket creation as-is -- Request modifications to task breakdown -- Request different approach be pursued -- Decide no follow-up tickets needed -- Decide to handle implementation differently - -### Step 7: Create Follow-Up Tickets (With Approval) - -**Only proceed after developer approves in Step 6** - -Typically create just **1 follow-up ticket**. Occasionally more if investigation reveals multiple independent tasks. - -```bash -atlassian-mcp-server_createJiraIssue \ - cloudId="" \ - projectKey="" \ - issueTypeName="Story" \ - summary="" \ - description="## Context -From SPIKE PI-XXXXX investigation - -## Problem - - -## Recommended Approach - - -## Implementation Plan - - -## Acceptance Criteria -- [ ] -- [ ] -- [ ] - -## Code References -- -- - -## Related Tickets -- SPIKE: PI-XXXXX - -## Effort Estimate - - -## Additional Notes -" -``` - -**Note the returned ticket number (e.g., PI-XXXXX) for linking in Step 8.** - -If creating multiple tickets (rare), repeat for each task. - -### Step 8: Link Follow-Up Tickets to SPIKE - -```bash -atlassian-mcp-server_addCommentToJiraIssue \ - cloudId="" \ - issueIdOrKey="PI-XXXXX" \ - commentBody="## Follow-Up Ticket(s) Created - -Implementation task created: -- PI-XXXXX: - -This ticket is ready for implementation using the recommended approach." -``` - -If multiple tickets created, list all with their ticket numbers. - -### Step 9: Move SPIKE to Done - -```bash -# Get available transitions -atlassian-mcp-server_getTransitionsForJiraIssue \ - cloudId="" \ - issueIdOrKey="PI-XXXXX" - -# Transition to Done (find correct transition ID from above) -atlassian-mcp-server_transitionJiraIssue \ - cloudId="" \ - issueIdOrKey="PI-XXXXX" \ - transition='{"id": ""}' -``` - -### Step 10: Session Reflection - -Same as standard workflow - use `learn_skill` tool with `skill_name='reflect'` to identify preventable friction. - ---- - -## Post-Workflow Steps (Manual) - -**After automated pr-reviewer approval and manual developer review:** - -1. Remove draft status from PR -2. Request code review from team -3. Address any additional review comments -4. Get approval -5. **Manual:** Transition ticket to "Pull Request" status (transition ID: 381) -6. **Manual:** After merge, SDET moves to "Testing (Service Stack)" (transition ID: 201) - -## Common Mistakes - -### Standard Workflow Mistakes - -#### Branch Naming -- ❌ `fix-bug` (missing ticket number) -- ❌ `PI70535-fix` (missing hyphen, no username) -- ✅ `nate/PI-70535_rename-folder-fix` - -#### Commit Messages -- ❌ `fixed bug` (no ticket reference) -- ❌ `Updated code for PI-70535` (vague) -- ✅ `PI-70535: Fix shared folder rename permission check` - -#### TDD Order -- ❌ Write code first, then tests -- ❌ Skip tests entirely -- ✅ Write failing test → Implement → Verify passing → Refactor - -#### Worktree Location -- ❌ `git worktree add ./feature` (wrong location) -- ❌ `git worktree add ~/feature` (absolute path) -- ✅ `git worktree add ../feature-name` (parallel to develop) - -### SPIKE Workflow Mistakes - -#### Investigation Depth -- ❌ Only exploring 1 obvious solution -- ❌ Vague code references like "the auth module" -- ✅ Explore 3-5 distinct approaches with specific file:line references - -#### Ticket Creation -- ❌ Creating tickets without developer approval -- ❌ Creating many vague tickets automatically -- ✅ Propose plan, get approval, then create (typically 1 ticket) - -#### Code Changes -- ❌ Implementing solution during SPIKE -- ❌ Creating git worktree for SPIKE -- ✅ SPIKE is investigation only - no code, no worktree, no PR - -## Jira Transition IDs - -Reference for manual transitions: - -- To Do: 11 -- In Progress: 41 -- Pull Request: 381 -- Testing (Service Stack): 201 -- Done: (varies by project) - -## Reference Materials - -See references/tdd-workflow.md for detailed TDD best practices. -See references/git-worktree.md for git worktree patterns and troubleshooting. -See references/spike-workflow.md for SPIKE investigation patterns and examples. diff --git a/shared/linked-dotfiles/opencode/skills/do-job/references/git-worktree.md b/shared/linked-dotfiles/opencode/skills/do-job/references/git-worktree.md deleted file mode 100644 index 2b41997..0000000 --- a/shared/linked-dotfiles/opencode/skills/do-job/references/git-worktree.md +++ /dev/null @@ -1,236 +0,0 @@ -# Git Worktree Patterns - -## Overview - -Git worktrees allow multiple working directories from single repository, enabling parallel work on different branches without stashing or switching. - -## Basic Commands - -### Create Worktree - -```bash -# From main/develop branch -cd /develop -git pull - -# Create new worktree -git worktree add -b -``` - -### List Worktrees - -```bash -git worktree list -``` - -### Remove Worktree - -```bash -# From any location in repo -git worktree remove - -# Or manually -rm -rf -git worktree prune -``` - -## Naming Patterns - -### Directory Structure - -``` -repos/ -├── document-api/ -│ ├── develop/ # Main worktree -│ ├── fix-permissions/ # Feature worktree -│ └── add-tags/ # Another feature worktree -``` - -### Branch Naming - -Format: `/PI-XXXXX_` - -Examples: -- `nate/PI-70535_rename-folder-fix` -- `nate/PI-70361_upload-permissions` -- `nate/PI-69924_delete-access-level` - -## Workflow Patterns - -### Starting Work - -```bash -# Navigate to develop -cd document-api/develop - -# Update develop -git pull - -# Create worktree for ticket -git worktree add ../pi-70535-rename-fix -b nate/PI-70535_rename-folder-fix - -# Move to worktree -cd ../pi-70535-rename-fix -``` - -### During Work - -```bash -# Normal git operations work in worktree -git status -git add . -git commit -m "PI-70535: Fix folder rename permissions" -git push -u origin nate/PI-70535_rename-folder-fix -``` - -### After PR Merge - -```bash -# From anywhere in repo -git worktree remove ../pi-70535-rename-fix - -# Clean up -git worktree prune -git branch -d nate/PI-70535_rename-folder-fix - -# Update develop -cd develop -git pull -``` - -## Common Issues - -### Worktree Already Exists - -```bash -# Error: worktree already exists -# Solution: Remove old worktree first -git worktree remove -git worktree prune -``` - -### Branch Already Exists - -```bash -# Error: branch already exists -# Solution: Use existing branch or delete old one -git worktree add - -# Or delete old branch -git branch -D -git worktree add -b -``` - -### Locked Worktree - -```bash -# If worktree shows as locked -git worktree unlock -git worktree remove -``` - -### Orphaned Worktrees - -```bash -# Clean up references to deleted worktrees -git worktree prune -``` - -## Best Practices - -### Keep Worktrees Short-Lived - -- Create for specific ticket -- Remove after PR merged -- Don't accumulate many worktrees - -### Use Descriptive Names - -```bash -# ❌ Bad -git worktree add ../work -b fix - -# ✅ Good -git worktree add ../rename-folder-fix -b nate/PI-70535_rename-folder-fix -``` - -### Always Branch from Latest - -```bash -# Update base branch before creating worktree -cd develop -git pull -git worktree add ../feature -b username/PI-XXXXX_feature -``` - -### Clean Up Regularly - -```bash -# List all worktrees -git worktree list - -# Remove merged/abandoned worktrees -git worktree remove -git worktree prune -``` - -## Advantages Over Branch Switching - -### Parallel Work - -Work on multiple tickets simultaneously without switching contexts: - -``` -Terminal 1: cd document-api/develop # Review PRs -Terminal 2: cd document-api/feature-1 # Active development -Terminal 3: cd document-api/hotfix # Emergency fix -``` - -### Preserve State - -Each worktree maintains its own: -- Working directory state -- Staged changes -- Build artifacts -- IDE configuration - -### Avoid Stashing - -No need to stash changes when switching tasks: - -```bash -# Traditional flow (with stashing) -git stash -git checkout other-branch -# ... do work ... -git checkout original-branch -git stash pop - -# Worktree flow (no stashing) -cd ../other-worktree -# ... do work ... -cd ../original-worktree -# Everything still there -``` - -## Limitations - -### Shared References - -All worktrees share: -- Git objects -- Remote configuration -- Git hooks -- Submodules - -### Disk Space - -Each worktree requires full working directory (but shares `.git` objects). - -### Cannot Checkout Same Branch - -```bash -# Error: branch already checked out -# One branch can only be checked out in one worktree at a time -git worktree add ../dup -b existing-branch # Fails if already checked out -``` diff --git a/shared/linked-dotfiles/opencode/skills/do-job/references/spike-workflow.md b/shared/linked-dotfiles/opencode/skills/do-job/references/spike-workflow.md deleted file mode 100644 index 40a0f8d..0000000 --- a/shared/linked-dotfiles/opencode/skills/do-job/references/spike-workflow.md +++ /dev/null @@ -1,371 +0,0 @@ -# SPIKE Investigation Workflow - -## What is a SPIKE? - -A SPIKE ticket is a time-boxed research and investigation task. The goal is to explore a problem space, evaluate solution approaches, and create an actionable plan for implementation. - -**SPIKE = Investigation only. No code changes.** - -## Key Principles - -### 1. Exploration Over Implementation -- Focus on understanding the problem deeply -- Consider multiple solution approaches (3-5) -- Don't commit to first idea -- Think creatively about alternatives - -### 2. Documentation Over Code -- Document findings thoroughly -- Provide specific code references (file:line) -- Explain trade-offs objectively -- Create actionable implementation plan - -### 3. Developer Approval Required -- Always review findings with developer before creating tickets -- Developer has final say on implementation approach -- Get explicit approval before creating follow-up tickets -- Typically results in just 1 follow-up ticket - -### 4. No Code Changes -- ✅ Read and explore codebase -- ✅ Document findings -- ✅ Create implementation plan -- ❌ Write implementation code -- ❌ Create git worktree -- ❌ Create PR - -## Investigation Process - -### Phase 1: Problem Understanding - -**Understand current state:** -- Read ticket description thoroughly -- Explore relevant codebase areas -- Identify constraints and dependencies -- Document current implementation - -**Ask questions:** -- What problem are we solving? -- Who is affected? -- What are the constraints? -- What's the desired outcome? - -### Phase 2: Approach Exploration - -**Explore 3-5 different approaches:** - -For each approach, document: -- **Name**: Brief descriptive name -- **Description**: How it works -- **Pros**: Benefits and advantages -- **Cons**: Drawbacks and challenges -- **Effort**: Relative complexity (S/M/L/XL) -- **Code locations**: Specific file:line references - -**Think broadly:** -- Conventional approaches -- Creative/unconventional approaches -- Simple vs. complex solutions -- Short-term vs. long-term solutions - -### Phase 3: Trade-off Analysis - -**Evaluate objectively:** -- Implementation complexity -- Performance implications -- Maintenance burden -- Testing requirements -- Migration/rollout complexity -- Team familiarity with approach -- Long-term sustainability - -**Be honest about cons:** -- Every approach has trade-offs -- Document them clearly -- Don't hide problems - -### Phase 4: Recommendation - -**Make clear recommendation:** -- Which approach is best -- Why it's superior to alternatives -- Key risks and mitigations -- Confidence level (Low/Medium/High) - -**Justify recommendation:** -- Reference specific trade-offs -- Explain why pros outweigh cons -- Consider team context - -### Phase 5: Implementation Planning - -**Create actionable plan:** -- Typically breaks down into **1 follow-up ticket** -- Occasionally 2-3 if clearly independent tasks -- Never many vague tickets - -**For each ticket, include:** -- Clear summary -- Detailed description -- Recommended approach -- Acceptance criteria -- Code references from investigation -- Effort estimate (S/M/L/XL) - -## Investigation Output Template - -```markdown -## Investigation Findings - PI-XXXXX - -### Problem Analysis -[Current state description with file:line references] -[Problem statement] -[Constraints and requirements] - -### Approaches Considered - -#### 1. [Approach Name] -- **Description**: [How it works] -- **Pros**: - - [Benefit 1] - - [Benefit 2] -- **Cons**: - - [Drawback 1] - - [Drawback 2] -- **Effort**: [S/M/L/XL] -- **Code**: [file.ext:123, file.ext:456] - -#### 2. [Approach Name] -[Repeat structure for each approach] - -[Continue for 3-5 approaches] - -### Recommendation - -**Recommended Approach**: [Approach Name] - -**Justification**: [Why this is best, referencing specific trade-offs] - -**Risks**: -- [Risk 1]: [Mitigation] -- [Risk 2]: [Mitigation] - -**Confidence**: [Low/Medium/High] - -### Proposed Implementation - -Typically **1 follow-up ticket**: - -**Summary**: [Concise task description] - -**Description**: -[Detailed implementation plan] -[Step-by-step approach] -[Key considerations] - -**Acceptance Criteria**: -- [ ] [Criterion 1] -- [ ] [Criterion 2] -- [ ] [Criterion 3] - -**Effort Estimate**: [S/M/L/XL] - -**Code References**: -- [file.ext:123 - Description] -- [file.ext:456 - Description] - -### References -- [Documentation link] -- [Related ticket] -- [External resource] -``` - -## Example SPIKE Investigation - -### Problem -Performance degradation in user search with large datasets (10k+ users) - -### Approaches Considered - -#### 1. Database Query Optimization -- **Description**: Add indexes, optimize JOIN queries, use query caching -- **Pros**: - - Minimal code changes - - Works with existing architecture - - Can be implemented incrementally -- **Cons**: - - Limited scalability (still hits DB for each search) - - Query complexity increases with features - - Cache invalidation complexity -- **Effort**: M -- **Code**: user_service.go:245, user_repository.go:89 - -#### 2. Elasticsearch Integration -- **Description**: Index users in Elasticsearch, use for all search operations -- **Pros**: - - Excellent search performance at scale - - Full-text search capabilities - - Faceted search support -- **Cons**: - - New infrastructure to maintain - - Data sync complexity - - Team learning curve - - Higher operational cost -- **Effort**: XL -- **Code**: Would be new service, interfaces at user_service.go:200 - -#### 3. In-Memory Cache with Background Sync -- **Description**: Maintain searchable user cache in memory, sync periodically -- **Pros**: - - Very fast search performance - - No additional infrastructure - - Simple implementation -- **Cons**: - - Memory usage on app servers - - Eventual consistency issues - - Cache warming on deploy - - Doesn't scale past single-server memory -- **Effort**: L -- **Code**: New cache_service.go, integrate at user_service.go:245 - -#### 4. Materialized View with Triggers -- **Description**: Database materialized view optimized for search, auto-updated via triggers -- **Pros**: - - Good performance - - Consistent data - - Minimal app code changes -- **Cons**: - - Database-specific (PostgreSQL only) - - Trigger complexity - - Harder to debug issues - - Lock contention on high write volume -- **Effort**: M -- **Code**: Migration needed, user_repository.go:89 - -### Recommendation - -**Recommended Approach**: Database Query Optimization (#1) - -**Justification**: -Given our current scale (8k users, growing ~20%/year) and team context: -- Elasticsearch is over-engineering for current needs - reaches 50k users in ~5 years -- In-memory cache has consistency issues that would affect UX -- Materialized views add database complexity our team hasn't worked with -- Query optimization addresses immediate pain point with minimal risk -- Can revisit Elasticsearch if we hit 20k+ users or need full-text features - -**Risks**: -- May need to revisit in 2-3 years if growth accelerates: Monitor performance metrics, set alert at 15k users -- Won't support advanced search features: Document limitation, plan for future if needed - -**Confidence**: High - -### Proposed Implementation - -**1 follow-up ticket**: - -**Summary**: Optimize user search queries with indexes and caching - -**Description**: -1. Add composite index on (last_name, first_name, email) -2. Implement Redis query cache with 5-min TTL -3. Optimize JOIN query in getUsersForSearch -4. Add performance monitoring - -**Acceptance Criteria**: -- [ ] Search response time < 200ms for 95th percentile -- [ ] Database query count reduced from 3 to 1 per search -- [ ] Monitoring dashboard shows performance metrics -- [ ] Load testing validates 10k concurrent users - -**Effort Estimate**: M (1-2 days) - -**Code References**: -- user_service.go:245 - Main search function to optimize -- user_repository.go:89 - Database query to modify -- schema.sql:34 - Add index here - -### References -- PostgreSQL index documentation: https://... -- Existing Redis cache pattern: cache_service.go:12 -- Related performance ticket: PI-65432 - -## Common Pitfalls - -### ❌ Shallow Investigation -**Bad**: -- Only considers 1 obvious solution -- Vague references like "the user module" -- No trade-off analysis - -**Good**: -- Explores 3-5 distinct approaches -- Specific file:line references -- Honest pros/cons for each - -### ❌ Analysis Paralysis -**Bad**: -- Explores 15 different approaches -- Gets lost in theoretical possibilities -- Never makes clear recommendation - -**Good**: -- Focus on 3-5 viable approaches -- Make decision based on team context -- Acknowledge uncertainty but recommend path - -### ❌ Premature Implementation -**Bad**: -- Starts writing code during SPIKE -- Creates git worktree -- Implements "prototype" - -**Good**: -- Investigation only -- Code reading and references -- Plan for implementation ticket - -### ❌ Automatic Ticket Creation -**Bad**: -- Creates 5 tickets without developer review -- Breaks work into too many pieces -- Doesn't get approval first - -**Good**: -- Proposes implementation plan -- Waits for developer approval -- Typically creates just 1 ticket - -## Time-Boxing - -SPIKEs should be time-boxed to prevent over-analysis: - -- **Small SPIKE**: 2-4 hours -- **Medium SPIKE**: 1 day -- **Large SPIKE**: 2-3 days - -If hitting time limit: -1. Document what you've learned so far -2. Document what's still unknown -3. Recommend either: - - Proceeding with current knowledge - - Extending SPIKE with specific questions - - Creating prototype SPIKE to validate approach - -## Success Criteria - -A successful SPIKE: -- ✅ Thoroughly explores problem space -- ✅ Considers multiple approaches (3-5) -- ✅ Provides specific code references -- ✅ Makes clear recommendation with justification -- ✅ Creates actionable plan (typically 1 ticket) -- ✅ Gets developer approval before creating tickets -- ✅ Enables confident implementation - -A successful SPIKE does NOT: -- ❌ Implement the solution -- ❌ Create code changes -- ❌ Create tickets without approval -- ❌ Leave implementation plan vague -- ❌ Only explore 1 obvious solution diff --git a/shared/linked-dotfiles/opencode/skills/do-job/references/tdd-workflow.md b/shared/linked-dotfiles/opencode/skills/do-job/references/tdd-workflow.md deleted file mode 100644 index 7b3ca55..0000000 --- a/shared/linked-dotfiles/opencode/skills/do-job/references/tdd-workflow.md +++ /dev/null @@ -1,173 +0,0 @@ -# Test-Driven Development Workflow - -## Core TDD Cycle - -1. **Red** - Write failing test -2. **Green** - Make test pass -3. **Refactor** - Improve code while keeping tests passing - -## Writing Effective Tests - -### Test Structure - -``` -# Arrange - Set up test data and conditions -# Act - Execute the code being tested -# Assert - Verify expected behavior -``` - -### Good Test Characteristics - -- **Isolated** - Tests don't depend on each other -- **Repeatable** - Same input always produces same output -- **Fast** - Tests run quickly -- **Clear** - Test name describes what's being tested -- **Focused** - One concept per test - -### Test Naming - -``` -test___ -``` - -Examples: -- `test_rename_folder_as_owner_succeeds` -- `test_rename_folder_without_permission_returns_403` -- `test_rename_folder_with_empty_name_returns_400` - -## Common Patterns - -### Testing Error Conditions - -```javascript -// Test expected errors -test('rename_folder_without_permission_returns_403', async () => { - // Arrange: Set up user without permissions - const user = createUserWithoutPermissions(); - - // Act: Attempt rename - const response = await renameFolder(user, folderId, newName); - - // Assert: Verify 403 error - expect(response.status).toBe(403); - expect(response.error).toContain('forbidden'); -}); -``` - -### Testing Happy Path - -```javascript -test('rename_folder_as_owner_succeeds', async () => { - // Arrange: Set up folder with owner - const owner = createOwner(); - const folder = createFolder(owner); - - // Act: Rename folder - const response = await renameFolder(owner, folder.id, 'NewName'); - - // Assert: Verify success - expect(response.status).toBe(200); - expect(response.data.name).toBe('NewName'); -}); -``` - -### Testing Edge Cases - -```javascript -test('rename_folder_with_special_characters_sanitizes_name', async () => { - const owner = createOwner(); - const folder = createFolder(owner); - - const response = await renameFolder(owner, folder.id, ''); - - expect(response.status).toBe(200); - expect(response.data.name).not.toContain('