From ba9e6f3316984a475cbc1b99363f35f4e1c1c059 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Sat, 28 Mar 2026 22:28:32 -0700 Subject: [PATCH 1/7] =?UTF-8?q?feat:=20add=20Kit=20system=20=E2=80=94=20sh?= =?UTF-8?q?areable=20bundles=20of=20skills,=20MCP=20servers,=20and=20instr?= =?UTF-8?q?uctions?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduces the Kit extension system that enables anyone — vendors, solution architects, team leads, individual engineers — to create and distribute shareable development setups. ## What's included **Core runtime** (`packages/opencode/src/kit/`): - `Kit` namespace with Zod schemas, state management, YAML loading - Trust tiers (`built-in`, `verified`, `community`) - Skill packs with activation modes (`always`, `detect`, `manual`) - Activate/deactivate lifecycle with full cleanup **11 CLI commands** (`packages/opencode/src/cli/cmd/kit.ts`): - `kit list`, `kit create`, `kit show`, `kit install`, `kit remove` - `kit activate` — one command: installs skills, configures MCP, enables - `kit deactivate` — clean removal (instructions + MCP config + active-kits) - `kit detect`, `kit search`, `kit status`, `kit validate` **TUI startup nudge** (`packages/opencode/src/cli/cmd/tui/thread.ts`): - Non-blocking detection on TUI startup - Shows one-line suggestion when matching kits found **JSONC-preserving config writes**: - Uses `jsonc-parser` `modify`/`applyEdits` to preserve user comments - MCP servers added on activate, removed on deactivate **Documentation** (`docs/`): - User guide: `docs/docs/configure/kits.md` (CLI reference, locations, tiers) - Author guide: `docs/docs/develop/kits.md` (full schema, tutorial, examples) - Ecosystem plan: `docs/PARTNER_ECOSYSTEM_PLAN.md` (strategy + simulation results) - Roadmap with planned features (`kit switch`, inheritance, `kit enforce`) ## Testing - 60/60 automated E2E tests passing (name validation, activate/deactivate lifecycle, MCP merge, JSONC preservation, detect, validate, install) - 10 stakeholder simulations across 5 scenarios (Snowflake, Dagster, dbt Labs, Airbyte, Healthcare, MSP consulting, OSS contributor, self-serve, enterprise) - 29 bugs found and fixed across 3 review rounds ## External - Kit content lives in `AltimateAI/data-engineering-skills` (merged PR #9) - Registry at `data-engineering-skills/registry.json` with 1 real entry - `dbt-snowflake` kit: 9 skills + dbt MCP server Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/PARTNER_ECOSYSTEM_PLAN.md | 939 +++++++++++ docs/docs/configure/index.md | 8 + docs/docs/configure/kits.md | 192 +++ docs/docs/develop/ecosystem.md | 18 + docs/docs/develop/kits.md | 422 +++++ docs/mkdocs.yml | 2 + .../opencode/src/altimate/telemetry/index.ts | 36 + packages/opencode/src/cli/cmd/kit.ts | 1366 +++++++++++++++++ packages/opencode/src/cli/cmd/tui/thread.ts | 25 + packages/opencode/src/config/config.ts | 3 + packages/opencode/src/index.ts | 6 + packages/opencode/src/kit/index.ts | 3 + packages/opencode/src/kit/kit.ts | 430 ++++++ 13 files changed, 3450 insertions(+) create mode 100644 docs/PARTNER_ECOSYSTEM_PLAN.md create mode 100644 docs/docs/configure/kits.md create mode 100644 docs/docs/develop/kits.md create mode 100644 packages/opencode/src/cli/cmd/kit.ts create mode 100644 packages/opencode/src/kit/index.ts create mode 100644 packages/opencode/src/kit/kit.ts diff --git a/docs/PARTNER_ECOSYSTEM_PLAN.md b/docs/PARTNER_ECOSYSTEM_PLAN.md new file mode 100644 index 0000000000..73546710b2 --- /dev/null +++ b/docs/PARTNER_ECOSYSTEM_PLAN.md @@ -0,0 +1,939 @@ +# Altimate Code — Extension Ecosystem Plan + +> **Purpose:** Enable anyone — vendors, solution architects, team leads, individual engineers — to extend Altimate Code with kits that bundle skills, MCP servers, and instructions. +> +> **Date:** 2026-03-28 | **Status:** Validated through 5 scenario simulations (12 personas) +> +> **Key rename:** "Recipe" → "Kit" (differentiation from Goose, clearer mental model) + +### Simulation Results (2026-03-28) +| Scenario | Score | Key Finding | +|----------|-------|-------------| +| Snowflake (Large Enterprise) | 5/10 | Demo-ready core, 5 deal blockers | +| Dagster (Growth Startup) | 6/10 | Would partner conditionally | +| Fortune 500 Bank (Enterprise) | 3/10 | Missing enforcement, use AGENTS.md today | +| Solo Consultant (SA) | 5/10 | Best natural fit, needs `kit switch` + cleanup | +| Series A Self-Serve | 3/10 | Nobody discovers kit without being told | + +**Universal finding:** Authoring experience is good. Single-developer workflow works. Discovery and multi-person story are broken. Auto-detect on startup is the #1 priority. + +--- + +## Table of Contents + +1. [Executive Summary](#1-executive-summary) +2. [Industry Landscape — How Others Do It](#2-industry-landscape) +3. [Our Extensibility Architecture](#3-our-extensibility-architecture) +4. [The Three-Layer Partner Model](#4-the-three-layer-partner-model) +5. [Layer 1: Agent Skills (SKILL.md)](#5-layer-1-agent-skills) +6. [Layer 2: MCP Servers](#6-layer-2-mcp-servers) +7. [Layer 3: Plugins (Deep Integration)](#7-layer-3-plugins) +8. [Kits: The Distribution Unit](#8-kits-the-distribution-unit) +9. [data-engineering-skills: The Open-Source Foundation](#9-data-engineering-skills-the-open-source-foundation) +10. [Onboarding Playbook](#10-onboarding-playbook) +11. [What We Need to Build](#11-what-we-need-to-build) +12. [Competitive Positioning](#12-competitive-positioning) +13. [Appendix: Research Sources](#13-appendix) + +--- + +## 1. Executive Summary + +The data engineering agent space is converging on **three complementary extension layers**: + +| Layer | What It Does | Portability | Effort to Build | Example | +|-------|-------------|-------------|-----------------|---------| +| **Agent Skills** | Teaches the AI *how to think* about tasks | Universal (30+ products) | Low (markdown) | "How to debug a dbt model" | +| **MCP Servers** | Gives the AI *tools to execute* tasks | Universal (any MCP client) | Medium (code) | `dbt build`, `dagster materialize` | +| **Plugins** | Deep platform integration (auth, UI, hooks) | Altimate-specific | High (TypeScript) | Custom auth flow, tool interception | + +**Our strategy:** Make Altimate Code the best host for data engineering extensions by providing all three layers, with `AltimateAI/data-engineering-skills` as the open-source foundation that any vendor can contribute to. + +**Why partners should care:** +- Skills authored once work across Claude Code, Cursor, VS Code Copilot, Gemini CLI, OpenCode, and 25+ other agents (via the [agentskills.io](https://agentskills.io) open standard) +- MCP servers work across Goose, Claude Desktop, Continue.dev, Cline, and every MCP-compatible client +- Partners get distribution to every data engineer using AI coding agents, not just Altimate Code users + +--- + +## 2. Industry Landscape + +### 2.1 How Goose (Block) Does It + +Goose made the boldest architectural decision: **Extensions ARE MCP servers.** No proprietary format. + +**Key patterns worth adopting:** + +| Pattern | How Goose Does It | Our Equivalent | +|---------|-------------------|----------------| +| Extension = MCP server | Any MCP server is auto-discovered | We support this via `config.mcp` | +| **Recipes** | YAML bundles: extensions + prompts + settings + parameters | **Kits** (KIT.yaml) — our equivalent | +| Deep links | `goose://extension?cmd=...` one-click install | Not yet | +| Extension directory | Curated browse page (70+ servers) | Not yet | +| Custom distros | Full white-label with bundled extensions | Possible via our config system | +| Subagent composition | Recipes spawn parallel sub-agents | We have agents but no kit system yet | +| Malware scanning | Auto-scan before extension activation | Not yet | + +**Goose's real partner integrations:** +- **DataHub + Block:** DataHub MCP server for metadata intelligence +- **OpenMetadata:** Published a Goose Recipe (not just extension) +- **Dagster:** Ships `dagster-mcp` that works with any MCP client including Goose +- **Docker:** Containerized extension execution + +**Goose's gaps (our opportunity):** +- No formal partner program or certification +- No marketplace economics (no paid extensions) +- No extension quality metrics or ratings +- No automated testing framework for extensions +- Extension discovery relies on external directories + +### 2.2 How OpenCode Upstream Does It + +OpenCode (our upstream fork) has a mature plugin system with 50+ community plugins: + +**Plugin hooks (20+ interception points):** +``` +auth, event, config, chat.message, chat.params, chat.headers, +permission.ask, command.execute.before, tool.execute.before, +tool.execute.after, tool.definition, shell.env, +experimental.chat.system.transform, experimental.session.compacting +``` + +**Plugin distribution:** npm packages (prefix `opencode-`) or local files in `.opencode/plugins/` + +**Skill loading hierarchy (8 sources):** +1. Built-in (embedded at build time) +2. Filesystem builtin (`~/.altimate/builtin/`) +3. External directories (`.claude/skills/`, `.agents/skills/`) +4. Global home-directory skills +5. Project-level skills (walked up directory tree) +6. `.opencode/skill/` directories +7. Config `skills.paths` (additional directories) +8. Config `skills.urls` (remote — fetches `index.json` then downloads files) + +**Key insight:** We already inherit all of this. The question is what we build ON TOP of it. + +### 2.3 Industry-Wide Convergence + +| Product | Skills | MCP | Plugins | Marketplace | +|---------|--------|-----|---------|-------------| +| Claude Code | SKILL.md | Yes | Yes (.claude-plugin) | Yes (official) | +| Goose | No | Yes (primary) | No (MCP only) | Browse page | +| Continue.dev | Rules | Yes (primary) | Config-based | Continue Hub | +| Cline | SKILL.md | Yes | VS Code ext | VS Code marketplace | +| Cursor | Rules | Yes | No | No | +| Codex CLI | SKILL.md | Planned | No | No | +| Gemini CLI | SKILL.md | Yes | No | No | +| **Altimate Code** | SKILL.md | Yes | Yes (hooks) | **Not yet** | + +**The market signal is clear:** MCP for tools, Skills for knowledge, Plugins for deep integration. All three matter. + +### 2.4 Data Vendor MCP Servers (Already Shipping) + +| Vendor | MCP Server | Tools | Maturity | +|--------|-----------|-------|----------| +| **dbt** | `dbt-mcp` | 58 tools (SQL, Semantic Layer, Discovery, Admin, CLI, codegen, docs) | Production | +| **Dagster** | `dg[mcp]` | CLI wrapper, scaffold, YAML config, code quality | Production | +| **Airbyte** | 3 servers: PyAirbyte MCP, Knowledge MCP, Connector Builder MCP | Pipeline generation, docs search, 600+ connectors | Production | +| **Snowflake** | Cortex MCP | Query, schema, governance | Beta | +| **DataHub** | DataHub MCP | Metadata, lineage, governance | Production | +| **OpenMetadata** | OpenMetadata MCP | Governance, quality, profiling | Production | + +**Critical realization:** These vendors already ship MCP servers. Our job is to make Altimate Code the BEST host for these servers by adding data-engineering-specific skills on top. + +--- + +## 3. Our Extensibility Architecture + +### 3.1 What We Already Have + +``` +┌─────────────────────────────────────────────────────────┐ +│ Altimate Code │ +│ │ +│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌─────────┐ │ +│ │ Skills │ │ MCP │ │ Plugins │ │ Tools │ │ +│ │ (SKILL.md)│ │ Servers │ │ (Hooks) │ │ (Zod) │ │ +│ └────┬─────┘ └────┬─────┘ └────┬─────┘ └────┬────┘ │ +│ │ │ │ │ │ +│ ┌────┴──────────────┴──────────────┴──────────────┴───┐ │ +│ │ Agent Runtime (LLM Loop) │ │ +│ └─────────────────────┬───────────────────────────────┘ │ +│ │ │ +│ ┌─────────────────────┴───────────────────────────────┐ │ +│ │ SDK (@altimate/cli-sdk) — REST API + Types │ │ +│ └─────────────────────────────────────────────────────┘ │ +└──────────────────────────────────────────────────────────┘ +``` + +**Current extension points:** + +| Extension Point | Location | Partner Access | Gap | +|----------------|----------|---------------|-----| +| Skills (SKILL.md) | `packages/opencode/src/skill/` | Git repos, URLs, local dirs | No registry, no versioning | +| MCP Servers | `packages/opencode/src/mcp/` | Config YAML, auto-discovery | No bundled data-eng servers | +| Plugins (npm) | `packages/plugin/` | npm packages, local files | No marketplace | +| Tools (Zod) | `packages/opencode/src/tool/` | Config dirs, plugins | No external tool packaging | +| Providers | `packages/opencode/src/provider/` | Config, custom loaders | No plugin-based registration | +| SDK | `packages/sdk/js/` | REST API, OpenAPI types | No WebSocket, subprocess only | + +### 3.2 Config-Level Extension + +Partners can configure extensions via `opencode.jsonc` or `.altimate-code/`: + +```jsonc +{ + // Skills from partner repos + "skills": { + "paths": ["./vendor-skills/dagster/"], + "urls": ["https://raw.githubusercontent.com/DagsterHQ/dagster-skills/main/"] + }, + + // Partner MCP servers + "mcp": { + "dagster": { + "type": "stdio", + "command": ["uvx", "dg", "mcp", "serve"], + "env": { "DAGSTER_HOME": "/path/to/dagster" } + }, + "dbt": { + "type": "stdio", + "command": ["uvx", "dbt-mcp"], + "env": { "DBT_PROJECT_DIR": "./", "DBT_PROFILES_DIR": "~/.dbt" } + } + }, + + // Partner plugins + "plugin": ["@dagster/altimate-plugin@latest"] +} +``` + +--- + +## 4. The Three-Layer Partner Model + +We propose a **progressive complexity** model where partners choose their integration depth: + +``` + Effort ──────────────────────► + + ┌─────────────────────────────────────────────────────┐ + │ │ + │ Layer 1 Layer 2 Layer 3 │ + │ ──────── ──────── ──────── │ + │ │ + │ SKILL.md ──► MCP Server ──► Plugin │ + │ (Markdown) (Python/TS) (TypeScript) │ + │ │ + │ Teaches HOW Provides TOOLS Deep platform │ + │ to approach to execute integration │ + │ tasks tasks (auth, UI, hooks)│ + │ │ + │ ~1 day ~1 week ~2-4 weeks │ + │ │ + │ Works in 30+ Works in any Altimate-specific│ + │ AI agents MCP client but most powerful│ + │ │ + └─────────────────────────────────────────────────────┘ +``` + +Most partners start at Layer 1, add Layer 2 if they have an API/CLI, and only reach Layer 3 for deep integrations. + +--- + +## 5. Layer 1: Agent Skills (SKILL.md) + +### 5.1 Why Skills Matter + +Skills are the **highest-leverage, lowest-effort** extension point. They encode expert knowledge about how to use a vendor's tool. + +**Without a skill:** "Hey Claude, create a Dagster asset" → generic, possibly wrong output +**With a skill:** "Hey Claude, create a Dagster asset" → follows Dagster's opinionated patterns, uses `dg` CLI, validates with type checking + +### 5.2 Skill Authoring Guide for Partners + +**File structure:** +``` +dagster-skills/ +├── skills/ +│ ├── dagster/ +│ │ ├── creating-dagster-assets/ +│ │ │ └── SKILL.md +│ │ ├── debugging-dagster-runs/ +│ │ │ └── SKILL.md +│ │ ├── scheduling-dagster-jobs/ +│ │ │ └── SKILL.md +│ │ └── testing-dagster-assets/ +│ │ └── SKILL.md +│ └── index.json # For remote discovery +├── .claude-plugin/ +│ └── marketplace.json # For Claude Code marketplace +├── CONTRIBUTING.md +└── README.md +``` + +**SKILL.md format:** +```yaml +--- +name: creating-dagster-assets +description: | + Creates Dagster assets following project conventions. Use when: + (1) Creating new software-defined assets + (2) Task mentions "create", "build", "add" a Dagster asset + (3) Working with Dagster's asset-based orchestration +--- + +# Creating Dagster Assets + +**Read project structure before writing. Validate after creation.** + +## Critical Rules +1. ALWAYS use `@asset` decorator, never raw `@op` for new work +2. ALWAYS define `AssetSpec` with proper metadata +3. ALWAYS add asset checks for data quality +4. Use `dg` CLI for scaffolding when available + +## Workflow +1. **Explore** — Read existing assets in the project for conventions +2. **Scaffold** — Use `dg scaffold asset` if `dg` CLI available +3. **Implement** — Write the asset following project patterns +4. **Test** — Run `dagster asset materialize` to verify +5. **Validate** — Check asset appears in Dagster UI lineage graph + +## Anti-Patterns +- Do NOT use `@op` + `@job` for new data assets (legacy pattern) +- Do NOT hardcode partition definitions (use config) +- Do NOT skip `@asset_check` for critical data assets +``` + +**`index.json` format (for remote discovery via `skills.urls`):** +```json +{ + "skills": [ + { + "name": "creating-dagster-assets", + "description": "Creates Dagster assets following best practices", + "files": [ + "skills/dagster/creating-dagster-assets/SKILL.md" + ] + } + ] +} +``` + +**`marketplace.json` format (for Claude Code plugin marketplace):** +```json +{ + "name": "dagster-skills", + "owner": { "name": "Dagster Labs", "email": "oss@dagster.io" }, + "metadata": { + "description": "Expert skills for Dagster asset orchestration", + "version": "1.0.0" + }, + "plugins": [ + { + "name": "dagster-core-skills", + "description": "Core Dagster development skills", + "source": "./", + "skills": [ + "./skills/dagster/creating-dagster-assets", + "./skills/dagster/debugging-dagster-runs", + "./skills/dagster/scheduling-dagster-jobs", + "./skills/dagster/testing-dagster-assets" + ] + } + ] +} +``` + +### 5.3 Skill Quality Checklist + +| Criterion | Required | Description | +|-----------|----------|-------------| +| Actionable workflow | Yes | Step-by-step, not reference docs | +| Read-before-write | Yes | Always explore existing patterns first | +| Verification step | Yes | How to confirm the work is correct | +| Anti-patterns section | Recommended | Common mistakes to avoid | +| Tool references | Recommended | Which MCP tools to use if available | +| Benchmark tested | Recommended | Measured improvement on real tasks | + +### 5.4 Portability + +Skills authored for Altimate Code automatically work in: +- Claude Code (native SKILL.md support) +- Cursor (via rules import) +- VS Code Copilot (via agent skills) +- Gemini CLI (SKILL.md compatible) +- Codex CLI (SKILL.md compatible) +- Any product supporting the [agentskills.io](https://agentskills.io) standard + +This is the **key selling point for partners**: write once, distribute everywhere. + +--- + +## 6. Layer 2: MCP Servers + +### 6.1 Why MCP Servers + +MCP servers give the AI actual tools to call. While skills teach *how to think*, MCP servers provide *ability to act*. + +**The combination is powerful:** +- Skill says: "Run `dbt build --select model_name` to verify your changes" +- MCP server provides: the `dbt_build` tool that actually executes it + +### 6.2 What Partners Already Have + +Most data vendors already ship MCP servers: + +**dbt (58 tools):** +``` +dbt_build, dbt_run, dbt_test, dbt_compile, dbt_parse, +semantic_layer_query, discovery_api_query, admin_api_*, +code_generate_model, docs_search, ... +``` + +**Dagster:** +``` +dg scaffold, dg asset materialize, dg check, +pipeline status, run logs, sensor management, ... +``` + +**Airbyte:** +``` +create_pipeline, list_connectors, sync_connection, +search_docs, build_connector, ... +``` + +### 6.3 MCP Server Integration Guide for Partners + +**Option A: Partner publishes MCP server, we document the config** + +The partner publishes their MCP server to PyPI/npm. We add documentation and a recommended configuration: + +```jsonc +// Recommended config for Altimate Code users +{ + "mcp": { + "dagster": { + "type": "stdio", + "command": ["uvx", "dg", "mcp", "serve"], + "env": { + "DAGSTER_HOME": "${DAGSTER_HOME}" + } + } + } +} +``` + +**Option B: Bundle as part of a plugin (recommended for deep integration)** + +The partner's plugin includes `.mcp.json` that auto-configures their MCP server: + +```json +// .mcp.json inside the plugin package +{ + "mcpServers": { + "dagster": { + "type": "stdio", + "command": ["uvx", "dg", "mcp", "serve"], + "description": "Dagster asset orchestration" + } + } +} +``` + +**Option C: Altimate Code ships pre-configured connections** + +For strategic partners, we bundle MCP server configs that auto-detect the tool: +- Detect `dbt_project.yml` → suggest enabling dbt MCP +- Detect `dagster.yaml` → suggest enabling Dagster MCP +- Detect `airbyte/` directory → suggest enabling Airbyte MCP + +### 6.4 MCP Server Quality Requirements + +| Criterion | Required | Description | +|-----------|----------|-------------| +| Tool descriptions | Yes | Clear, actionable descriptions for each tool | +| Error messages | Yes | Structured errors the LLM can reason about | +| Timeout handling | Yes | Graceful handling of long-running operations | +| Auth documentation | Yes | Clear setup instructions for API keys/tokens | +| < 20 tools exposed | Recommended | Semantic Kernel research shows LLMs degrade above 20 | +| Tool filtering | Recommended | Support `available_tools` to limit exposed surface | + +--- + +## 7. Layer 3: Plugins (Deep Integration) + +### 7.1 When Partners Need Plugins + +Plugins are for partners who need to: +- Add custom authentication flows (OAuth with their cloud service) +- Intercept and modify tool execution (add warehouse-specific context) +- Inject system prompts (add vendor-specific instructions) +- Modify chat parameters (adjust for their use case) +- Add custom tools with complex logic + +### 7.2 Plugin Interface + +```typescript +import type { Plugin, PluginInput, Hooks, ToolDefinition } from "@altimate/cli-plugin" + +const dagsterPlugin: Plugin = async (input: PluginInput): Promise => { + const { client, project, directory, $ } = input + + return { + // Add custom tools + tool: { + "dagster.materialize": { + description: "Materialize a Dagster asset", + parameters: z.object({ + asset_key: z.string().describe("The asset key to materialize"), + partition: z.string().optional(), + }), + execute: async (args) => { + const result = await $`dg asset materialize ${args.asset_key}` + return { title: "Materialized", output: result.stdout, metadata: {} } + } + } + }, + + // Custom auth flow + auth: { + match: (provider) => provider.id === "dagster-cloud", + login: async () => { /* OAuth flow */ }, + logout: async () => { /* Cleanup */ }, + }, + + // Intercept tool execution + "tool.execute.before": async (input, output) => { + // Add Dagster context to SQL tools + if (input.toolID.startsWith("sql.")) { + output.args = { ...output.args, context: "dagster-managed" } + } + }, + + // Inject system prompt + "experimental.chat.system.transform": async (input, output) => { + output.system += "\nThis project uses Dagster for orchestration. Prefer asset-based patterns." + }, + + // React to events + event: async ({ event }) => { + if (event.type === "session.start") { + // Detect Dagster project and auto-configure + } + } + } +} + +export default dagsterPlugin +``` + +### 7.3 Available Hook Points + +| Hook | When It Fires | Partner Use Case | +|------|--------------|-----------------| +| `auth` | Authentication needed | OAuth with vendor cloud | +| `event` | Any system event | Project detection, telemetry | +| `config` | Config loaded | Inject vendor-specific defaults | +| `chat.message` | Message received | Message preprocessing | +| `chat.params` | Before LLM call | Adjust temperature, model | +| `chat.headers` | Before LLM call | Add custom headers | +| `permission.ask` | Permission requested | Auto-approve vendor tools | +| `command.execute.before` | Before command runs | Modify command | +| `tool.execute.before` | Before tool runs | Modify tool arguments | +| `tool.execute.after` | After tool runs | Process/enrich output | +| `tool.definition` | Tool registered | Modify tool descriptions | +| `shell.env` | Shell command runs | Inject env vars | +| `experimental.chat.system.transform` | System prompt built | Add vendor context | +| `experimental.session.compacting` | Context compaction | Preserve vendor state | + +### 7.4 Plugin Distribution + +```bash +# Published to npm +npm publish @dagster/altimate-plugin + +# Users install via config +# opencode.jsonc: +{ + "plugin": ["@dagster/altimate-plugin@latest"] +} + +# Or via CLI +altimate-code plugin install @dagster/altimate-plugin +``` + +### 7.5 Plugin Package Structure + +``` +@dagster/altimate-plugin/ +├── package.json +│ { +│ "name": "@dagster/altimate-plugin", +│ "version": "1.0.0", +│ "main": "./dist/index.js", +│ "peerDependencies": { +│ "@altimate/cli-plugin": "^1.2.0" +│ } +│ } +├── src/ +│ └── index.ts # Default export: Plugin function +├── skills/ # Bundled skills (optional) +│ └── dagster/ +│ └── creating-assets/SKILL.md +├── .mcp.json # Bundled MCP config (optional) +└── README.md +``` + +--- + +## 8. Kits: The Distribution Unit + +### 8.1 The Missing Piece + +Goose's most innovative pattern is **Recipes** — YAML files that bundle extensions + prompts + settings into shareable workflows. We should adopt this concept (renamed to **Kits** for differentiation). + +**Why kits matter for partners:** +- A Dagster skill alone is useful. A Dagster skill + Dagster MCP server + curated prompt + recommended settings = a **complete workflow**. +- Kits are the unit of distribution that partners can share with their community. + +### 8.2 Proposed Kit Format + +```yaml +# dagster-asset-development/KIT.yaml +name: dagster-asset-development +version: "1.0" +description: "Complete workflow for building Dagster assets with AI assistance" + +# Skills to activate +skills: + - source: "github:DagsterHQ/dagster-skills" + select: ["creating-dagster-assets", "testing-dagster-assets"] + +# MCP servers to enable +mcp: + dagster: + type: stdio + command: ["uvx", "dg", "mcp", "serve"] + env_keys: ["DAGSTER_HOME"] + +# Plugin to install (optional) +plugins: + - "@dagster/altimate-plugin@^1.0" + +# System instructions added to every conversation +instructions: | + This project uses Dagster for data orchestration. + Always prefer asset-based patterns over op/job patterns. + Use the `dg` CLI for scaffolding and validation. + +# Parameters the user must provide +parameters: + - key: dagster_home + description: "Path to your Dagster project" + required: true + env: DAGSTER_HOME + +# Recommended settings +settings: + tools: + dagster.materialize: true + dagster.check: true +``` + +### 8.3 Kit Installation + +```bash +# From URL +altimate-code kit install https://dagster.io/kits/asset-development + +# From GitHub +altimate-code kit install DagsterHQ/dagster-kits/asset-development + +# One-liner deep link (for docs/blog posts) +altimate-code://kit?url=https://dagster.io/kits/asset-development +``` + +### 8.4 Kit as the Partner Onboarding Unit + +When a partner says "I want my tool to work with Altimate Code," the deliverable is a kit: +1. Partner writes skills (Layer 1) — 1 day +2. Partner already has MCP server (Layer 2) — 0 days (usually exists) +3. Partner bundles into kit — 1 hour +4. Kit goes into their docs: "Use Dagster with AI → install this kit" + +--- + +## 9. data-engineering-skills: The Open-Source Foundation + +### 9.1 Current State + +**Repo:** [AltimateAI/data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) (73 stars, MIT licensed) + +**Current skills (10):** +| Vendor | Skills | Benchmark Impact | +|--------|--------|-----------------| +| dbt | 7 (create, debug, test, document, migrate, refactor, incremental) | +7% on ADE-bench (46.5% → 53%) | +| Snowflake | 3 (find expensive queries, optimize by ID, optimize by text) | 84% pass on TPC-H 1TB | + +**Already uses Claude Code plugin format** (`.claude-plugin/marketplace.json`) + +### 9.2 Strategy: Make It the Central Hub + +Transform `data-engineering-skills` from "our skills repo" to "the community skills repo for data engineering": + +``` +data-engineering-skills/ +├── skills/ +│ ├── dbt/ # ✅ Exists (7 skills) +│ ├── snowflake/ # ✅ Exists (3 skills) +│ ├── dagster/ # 🆕 Partner-contributed +│ ├── airbyte/ # 🆕 Partner-contributed +│ ├── fivetran/ # 🆕 Partner-contributed +│ ├── airflow/ # 🆕 Community-contributed +│ ├── spark/ # 🆕 Community-contributed +│ ├── bigquery/ # 🆕 Community-contributed +│ ├── databricks/ # 🆕 Community-contributed +│ └── great-expectations/ # 🆕 Community-contributed +├── kits/ # 🆕 Bundled kits +│ ├── dagster-development/KIT.yaml +│ ├── dbt-snowflake-pipeline/KIT.yaml +│ └── airbyte-ingestion/KIT.yaml +├── .claude-plugin/ +│ └── marketplace.json +├── benchmarks/ # 🆕 Benchmark results per skill +│ ├── ade-bench/ +│ └── spider2-dbt/ +├── CONTRIBUTING.md # Enhanced partner guide +├── PARTNER_GUIDE.md # 🆕 Detailed partner onboarding +└── README.md +``` + +### 9.3 Why This Works for Partners + +1. **Low barrier:** Partner writes 3-5 SKILL.md files in a PR — no SDK, no build system +2. **Credibility:** Published benchmarks prove skills improve AI performance +3. **Distribution:** Every Altimate Code user gets the skills; Claude Code users can install via marketplace +4. **Cross-promotion:** Partner's name appears in the repo, README, and marketplace listing +5. **Portable:** Skills work across 30+ AI agent products (not locked to Altimate Code) + +### 9.4 Partner Contribution Template + +```markdown + + +## Vendor: [Dagster] + +### Skills Added +- [ ] `creating-dagster-assets` — Asset creation workflow +- [ ] `debugging-dagster-runs` — Run failure diagnosis +- [ ] `testing-dagster-assets` — Asset testing patterns + +### Quality Checklist +- [ ] Each skill has actionable workflow steps (not reference docs) +- [ ] Each skill has a verification step +- [ ] Each skill has an anti-patterns section +- [ ] Skills reference MCP tools where applicable +- [ ] Skills tested with Claude/GPT-4 on real tasks +- [ ] Benchmark results included (if available) + +### MCP Server (optional) +- Package: `dg[mcp]` +- Install: `pip install "dg[mcp]"` +- Docs: https://dagster.io/docs/mcp + +### Kit (optional) +- [ ] KIT.yaml included in `kits/` +``` + +--- + +## 10. Partner Onboarding Playbook + +### 10.1 Timeline + +``` +Week 0 (Kickoff) +├── Partner intro call +├── Share this document + CONTRIBUTING.md +└── Partner identifies 3-5 initial skills + +Week 1 (Skills) +├── Partner writes SKILL.md files +├── We review for quality (checklist above) +└── PR merged to data-engineering-skills + +Week 2 (MCP — if applicable) +├── Partner confirms their MCP server works with Altimate Code +├── We add recommended config to our docs +└── Test skill + MCP combination + +Week 3 (Kit + Launch) +├── Bundle into KIT.yaml +├── Co-authored blog post / announcement +├── Listed in our extension directory +└── Partner adds "Works with Altimate Code" badge to their docs +``` + +### 10.2 Support We Provide + +| Support | Description | +|---------|-------------| +| Skill review | Code review of SKILL.md files for quality | +| MCP testing | Verify their MCP server works in our runtime | +| Benchmark run | Run their skills through ADE-bench or Spider2 | +| Co-marketing | Blog post, social, newsletter mention | +| Badge/logo | "Works with Altimate Code" badge for their docs | +| Direct Slack channel | Shared Slack channel for partner support | + +### 10.3 What Partners Deliver + +| Deliverable | Required? | Format | +|-------------|-----------|--------| +| 3-5 SKILL.md files | Yes | Markdown (PR to data-engineering-skills) | +| MCP server config | If they have one | JSON snippet for our docs | +| KIT.yaml | Recommended | YAML file | +| Plugin package | Optional | npm package | +| Blog post draft | Recommended | Markdown (co-authored) | + +--- + +## 11. What We Need to Build + +### 11.1 Priority 1: Kit System (Weeks 1-3) + +The single biggest gap vs. Goose. Kits bundle skills + MCP + plugins + instructions into one installable unit. + +**Implementation:** +- KIT.yaml schema and parser +- `altimate-code kit install ` CLI command +- Kit auto-detection (suggest kit when project type detected) +- Kit storage in `~/.altimate/kits/` + +**Files to modify:** +- New: `packages/opencode/src/kit/` (schema, loader, installer) +- New: `packages/opencode/src/cli/cmd/kit.ts` (CLI command) +- Modify: `packages/opencode/src/config/` (kit config integration) + +### 11.2 Priority 2: Extension Directory (Weeks 2-4) + +A browseable catalog of skills, MCP servers, and kits. + +**Options:** +- **Minimal:** Curated page on docs site (like Goose's browse page) +- **Medium:** GitHub-based registry (index.json in a repo, auto-generated site) +- **Full:** API-backed marketplace with search, ratings, install counts + +**Recommendation:** Start with a GitHub-based registry. The `data-engineering-skills` repo already has `index.json` support via our `Discovery.pull()` mechanism. + +### 11.3 Priority 3: Auto-Detection & Suggestion (Weeks 3-5) + +When a user opens Altimate Code in a Dagster project, automatically suggest: +- "Detected Dagster project. Install Dagster skills + MCP server?" + +**Implementation:** +- Project type detection (look for `dagster.yaml`, `dbt_project.yml`, `airbyte/`, etc.) +- Suggestion UI in TUI +- One-command install of recommended kit + +### 11.4 Priority 4: Partner SDK Documentation (Week 1) + +Publish clear documentation for each layer: +- Skill Authoring Guide (from Section 5 above) +- MCP Integration Guide (from Section 6 above) +- Plugin Development Guide (from Section 7 above) +- Kit Bundling Guide (from Section 8 above) + +### 11.5 Priority 5: Skill Versioning (Weeks 4-6) + +Current gap: no way to pin skill versions or handle updates. + +**Proposed:** Use git tags/releases in skill repos. `skills.urls` entries become: +```json +{ + "skills": { + "urls": ["https://github.com/DagsterHQ/dagster-skills/releases/download/v1.2.0/"] + } +} +``` + +### 11.6 Engineering Work Summary + +| Item | Effort | Priority | Dependency | +|------|--------|----------|------------| +| KIT.yaml schema + parser | 3 days | P0 | None | +| `kit install` CLI command | 2 days | P0 | Schema | +| Kit auto-detection | 2 days | P1 | Kit system | +| Extension directory (GitHub-based) | 3 days | P1 | None | +| Partner SDK documentation site | 3 days | P1 | None | +| Skill versioning (git tags) | 2 days | P2 | None | +| Deep links (`altimate-code://`) | 2 days | P2 | Kit system | +| Extension malware scanning | 3 days | P3 | None | +| Install count telemetry | 1 day | P3 | None | + +--- + +## 12. Competitive Positioning + +### 12.1 Our Advantages vs. Goose + +| Dimension | Goose | Altimate Code | Winner | +|-----------|-------|---------------|--------| +| Data engineering focus | Generic | Purpose-built (99+ DE tools) | **Altimate** | +| Skills system | No skills | SKILL.md + benchmark-proven | **Altimate** | +| MCP support | Primary interface | Full support + auto-detect | Tie | +| Plugin hooks | None (MCP only) | 20+ hooks for deep integration | **Altimate** | +| Recipes / Kits | Yes (mature) | Kits (planned) | **Goose** | +| Extension directory | 70+ servers listed | Not yet (planned) | **Goose** | +| Deep links | Yes | Not yet (planned) | **Goose** | +| Warehouse integrations | None built-in | 10 warehouses native | **Altimate** | +| SQL/dbt tools | Via MCP only | 99+ native tools | **Altimate** | +| Custom distros | Documented | Possible but undocumented | **Goose** | + +### 12.2 Our Advantages vs. Generic AI Agents + +- **Vertical expertise:** 11 data engineering skills + 99 specialized tools +- **Benchmark-proven:** ADE-bench, Spider2-dbt results published +- **Warehouse-native:** Direct connections to 10 data warehouses +- **dbt-native:** Deep dbt integration (not just MCP proxy) +- **Python bridge:** Full Python analysis engine (altimate-engine) + +### 12.3 Positioning Statement + +> **Altimate Code is the AI data engineering agent that works with your entire data stack.** Install skills and MCP servers from your favorite tools — dbt, Dagster, Airbyte, Snowflake, and more — and get an AI assistant that truly understands your data platform. + +--- + +## 13. Appendix + +### 13.1 Research Sources + +**Goose (Block):** +- [GitHub](https://github.com/block/goose) | [Architecture](https://block.github.io/goose/docs/goose-architecture/) | [Extensions](https://block.github.io/goose/docs/getting-started/using-extensions/) | [Custom Extensions Tutorial](https://block.github.io/goose/docs/tutorials/custom-extensions/) | [Recipes](https://block.github.io/goose/docs/guides/recipes/) | [Custom Distros](https://github.com/block/goose/blob/main/CUSTOM_DISTROS.md) | [Browse Extensions](https://block.github.io/goose/extensions/) + +**Data Vendor MCP Servers:** +- [dbt MCP](https://docs.getdbt.com/docs/cloud/mcp-server) (58 tools) | [Dagster MCP](https://dagster.io/blog/dagsters-mcp-server) | [Airbyte MCP](https://airbyte.com/blog/how-we-built-an-mcp-server-to-create-data-pipelines) | [DataHub MCP](https://datahub.com/blog/datahub-mcp-server-block-ai-agents-use-case/) | [OpenMetadata Recipe](https://blog.open-metadata.org/announcing-our-first-openmetadata-goose-recipe-67d9249c2fd3) + +**Extension Ecosystems:** +- [agentskills.io](https://agentskills.io) (open standard, 30+ adopters) | [MCP Registry](https://registry.modelcontextprotocol.io) | [awesome-opencode](https://github.com/awesome-opencode/awesome-opencode) (50+ plugins) | [SkillsMP](https://skillsmp.com/) (2,300+ skills) | [awesome-agent-skills](https://github.com/heilcheng/awesome-agent-skills) (1,300+ skills) + +**Framework Patterns:** +- [Composio](https://github.com/ComposioHQ/composio) (hub-and-spoke providers) | [LangChain](https://github.com/langchain-ai/langchain) (separate packages) | [CrewAI](https://github.com/crewai/crewai) (decorator + class tools) | [Semantic Kernel](https://learn.microsoft.com/semantic-kernel/) (DI plugins, <20 tool recommendation) + +**Partner Ecosystem Benchmarks:** +- Marketplace review processes: 24 hours (Zoho) to 10 business days (HubSpot) +- Recertification: every 2 years +- VS Code pattern (5-day domain verification, automated checks) = lightest weight + +### 13.2 Glossary + +| Term | Definition | +|------|-----------| +| **SKILL.md** | Markdown file with YAML frontmatter teaching an AI how to approach a task | +| **MCP** | Model Context Protocol — standard for AI tools (Anthropic-led, adopted by industry) | +| **MCP Server** | A process that exposes tools/resources via the MCP protocol | +| **Plugin** | npm package that hooks into Altimate Code's runtime (auth, tools, chat) | +| **Kit** | YAML bundle of skills + MCP + plugins + instructions (KIT.yaml) | +| **Hook** | Interception point in plugin system (e.g., `tool.execute.before`) | +| **Agent Skills Standard** | Open standard at agentskills.io for portable AI skills | diff --git a/docs/docs/configure/index.md b/docs/docs/configure/index.md index d2df2d3ed8..aeefb15ca0 100644 --- a/docs/docs/configure/index.md +++ b/docs/docs/configure/index.md @@ -38,6 +38,14 @@ Set up your warehouses, LLM providers, and preferences. For agents, tools, skill [:octicons-arrow-right-24: MCP Servers](mcp-servers.md) · [:octicons-arrow-right-24: ACP Support](acp.md) +- :material-package-variant:{ .lg .middle } **Kits** + + --- + + Bundles of skills, MCP servers, and instructions. Activate a kit to get a complete development setup for dbt, Snowflake, Dagster, and more. + + [:octicons-arrow-right-24: Kits](kits.md) + - :material-palette:{ .lg .middle } **Appearance** --- diff --git a/docs/docs/configure/kits.md b/docs/docs/configure/kits.md new file mode 100644 index 0000000000..32ffb2ab6d --- /dev/null +++ b/docs/docs/configure/kits.md @@ -0,0 +1,192 @@ +# Kits + +Kits bundle skills, MCP servers, and instructions into a single activatable unit. Instead of configuring each piece separately, activate a kit to get a complete development setup. + +## Quick Start + +```bash +# List available kits +altimate-code kit list + +# Auto-detect kits for your project +altimate-code kit detect + +# Activate a kit +altimate-code kit activate dbt-snowflake + +# Check active kits +altimate-code kit status + +# Deactivate +altimate-code kit deactivate dbt-snowflake +``` + +## Installing Kits + +Install kits from GitHub repositories or local paths: + +```bash +# From GitHub +altimate-code kit install AltimateAI/data-engineering-skills + +# From local path +altimate-code kit install ./my-kits + +# Install globally (available in all projects) +altimate-code kit install AltimateAI/data-engineering-skills --global +``` + +## KIT.yaml Format + +Kits are defined in `KIT.yaml` files: + +```yaml +name: my-kit +description: What this kit configures +version: 1.0.0 + +# Skills to install +skills: + - source: "owner/repo" + select: ["skill-a", "skill-b"] + +# MCP servers to configure +mcp: + server-name: + type: stdio + command: ["uvx", "my-mcp-server"] + env_keys: ["API_KEY"] + description: "Server description" + +# Instructions for every conversation +instructions: | + Project-specific conventions and rules. + +# Auto-detection rules +detect: + - files: ["config.yaml"] + message: "Detected my-tool — activate kit?" +``` + +## What `kit activate` Does + +When you activate a kit, it: + +1. **Installs skills** from referenced repositories into `.opencode/skills/` +2. **Configures MCP servers** by merging entries into your project's config file +3. **Creates instruction files** at `.opencode/instructions/kit-.md` +4. **Registers the kit** as active in `.opencode/active-kits` + +All changes are reversible with `kit deactivate`. + +## Creating Your Own Kit + +```bash +altimate-code kit create my-team-standards +``` + +This scaffolds `.opencode/kits/my-team-standards/KIT.yaml` with a template. Edit it, then activate: + +```bash +altimate-code kit activate my-team-standards +``` + +### Validating + +Check your kit for issues before sharing: + +```bash +altimate-code kit validate my-team-standards +``` + +## Multiple Active Kits + +You can activate multiple kits simultaneously. Their MCP servers are merged and instruction files coexist: + +```bash +altimate-code kit activate dbt-snowflake +altimate-code kit activate my-team-standards +altimate-code kit status # shows both +``` + +## Trust Tiers + +| Tier | Description | +|------|-------------| +| `built-in` | Ships with Altimate Code, maintained by the team | +| `verified` | Published by official vendors, reviewed | +| `community` | Created by anyone, use at your discretion | + +## Kit Locations + +Kits are discovered from: + +1. **Project**: `.opencode/kits/` and `.altimate-code/kits/` +2. **Global**: `~/.config/altimate-code/kits/` +3. **Config paths**: `kits.paths` in your config file +4. **Installed**: `~/.local/share/altimate-code/kits/` + +## CLI Reference + +| Command | Description | +|---------|-------------| +| `kit list` | List all available kits | +| `kit list --json` | JSON output for scripting | +| `kit list --detect` | Show only project-matching kits | +| `kit create ` | Scaffold a new kit | +| `kit show ` | Display full kit details | +| `kit install ` | Install from GitHub or local path | +| `kit activate ` | Install skills, configure MCP, enable | +| `kit activate --yes` | Skip confirmation prompt | +| `kit deactivate ` | Remove from active kits, clean up | +| `kit remove ` | Delete an installed kit | +| `kit detect` | Find kits matching current project | +| `kit search [query]` | Search the kit registry | +| `kit status` | Show active kits | +| `kit validate [name]` | Validate kit format and references | + +## Sharing Kits + +Share kits via Git repositories. The recommended structure: + +``` +my-kits/ + kits/ + kit-a/KIT.yaml + kit-b/KIT.yaml + README.md +``` + +Others install with: `altimate-code kit install owner/my-kits` + +## Available Kits + +See [data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) for the official kit registry. + +## Roadmap + +The kit system is actively evolving based on community feedback. Here's what's planned: + +### Coming Soon + +| Feature | Description | Status | +|---------|-------------|--------| +| **`kit switch`** | Switch between kits in one command (deactivate all, activate one) | Planned | +| **Kit inheritance** | `extends: base-kit` to share conventions across kits | Planned | +| **`kit update`** | Pull newer versions of installed kits from source | Planned | +| **Registry expansion** | More built-in kits for BigQuery, Databricks, Airflow, Dagster | In progress | +| **`kit enforce`** | CI command that fails if required kits are not active | Planned | + +### Future + +| Feature | Description | +|---------|-------------| +| **Auto-activation** | Automatically suggest or activate kits when detection rules match on project open | +| **Kit locking** | Prevent deactivation of compliance-critical kits without admin override | +| **Conflict detection** | Warn when two active kits have contradictory instructions | +| **Kit analytics** | Activation counts and skill usage metrics for kit authors | +| **MCP tool filtering** | Allow kits to expose only specific tools from an MCP server | + +### Contributing to the Roadmap + +Have a feature request? [Open an issue](https://github.com/AltimateAI/altimate-code/issues) with the `kit` label, or contribute directly to the [data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) repo. diff --git a/docs/docs/develop/ecosystem.md b/docs/docs/develop/ecosystem.md index 3f847d5d41..92b789dd8f 100644 --- a/docs/docs/develop/ecosystem.md +++ b/docs/docs/develop/ecosystem.md @@ -18,6 +18,24 @@ altimate has a growing ecosystem of plugins, tools, and integrations. - **MCP**: Model Context Protocol servers - **ACP**: Agent Communication Protocol for editors +## Kits + +Kits bundle skills, MCP servers, and instructions into shareable development setups. Anyone can create and distribute kits. + +| Kit | Description | +|-----|-------------| +| [dbt-snowflake](https://github.com/AltimateAI/data-engineering-skills/tree/main/kits/dbt-snowflake) | Complete dbt + Snowflake setup | + +Browse the [kit registry](https://github.com/AltimateAI/data-engineering-skills/blob/main/registry.json) for more. + +### Creating Kits + +See the [Kit documentation](../configure/kits.md) for the full guide, or run: + +```bash +altimate-code kit create my-kit +``` + ## Community - [GitHub Repository](https://github.com/AltimateAI/altimate-code): Source code, issues, discussions diff --git a/docs/docs/develop/kits.md b/docs/docs/develop/kits.md new file mode 100644 index 0000000000..d450298bbe --- /dev/null +++ b/docs/docs/develop/kits.md @@ -0,0 +1,422 @@ +# Building Kits + +This guide is for anyone who wants to **create and distribute kits** — vendors, solution architects, team leads, or community contributors. For using kits, see [Configure > Kits](../configure/kits.md). + +## What's in a Kit? + +A kit is a `KIT.yaml` file that bundles: + +- **Skills** — teach the AI how to approach tasks (from any Git repo) +- **MCP servers** — give the AI tools to execute tasks (standard MCP protocol) +- **Instructions** — project-specific rules injected into every conversation +- **Detection rules** — auto-suggest the kit when matching files exist + +## Tutorial: Build Your First Kit in 5 Minutes + +### Step 1: Scaffold + +```bash +altimate-code kit create my-first-kit +``` + +This creates `.opencode/kits/my-first-kit/KIT.yaml`: + +```yaml +name: my-first-kit +description: TODO — describe what this kit configures +version: 1.0.0 + +skills: + # - source: "owner/repo" + # select: ["skill-a", "skill-b"] + +mcp: + # my-server: + # command: ["uvx", "my-mcp-server"] + # env_keys: ["MY_API_KEY"] + +detect: + # - files: ["config.yaml"] + # message: "Detected my-tool — activate kit?" + +instructions: | + TODO — add project-specific instructions here. +``` + +### Step 2: Edit + +Fill in real content. Here's a complete example for an internal team: + +```yaml +name: acme-data-team +description: ACME Corp data engineering standards and conventions +version: 1.0.0 + +skills: + - source: "AltimateAI/data-engineering-skills" + select: + - creating-dbt-models + - testing-dbt-models + - debugging-dbt-errors + +mcp: + dbt: + type: stdio + command: ["uvx", "dbt-mcp"] + env: + DBT_PROJECT_DIR: "./" + env_keys: ["DBT_PROJECT_DIR"] + description: "dbt MCP server for model development" + +detect: + - files: ["dbt_project.yml"] + message: "Detected dbt project — activate ACME data team kit?" + +instructions: | + ## ACME Data Team Conventions + + - Table naming: dim_*, fct_*, stg_*, int_* + - All models must have unique + not_null tests on primary keys + - Use ref() for all model references + - Warehouse sizing: XS for dev, M for staging, L for prod + - Code review required for any model touching PII columns +``` + +### Step 3: Validate + +```bash +altimate-code kit validate my-first-kit +``` + +Output: +``` +Validating: my-first-kit + + ✓ Name "my-first-kit" is valid + ✓ Description present + ✓ Version "1.0.0" is valid semver + ✓ 1 skill source(s) defined + ✓ MCP "dbt": command defined + ⚠ MCP "dbt": env var DBT_PROJECT_DIR is NOT set + ✓ 1 detection rule(s) defined + ✓ Instructions present (10 lines) + +Validation: PASS +``` + +### Step 4: Activate + +```bash +altimate-code kit activate my-first-kit +``` + +### Step 5: Share + +Commit the kit to your repo. Others install with: + +```bash +altimate-code kit install owner/repo +``` + +## KIT.yaml Schema Reference + +### Required Fields + +| Field | Type | Description | +|-------|------|-------------| +| `name` | string | Lowercase, hyphens, 2-64 chars. Must match `^[a-z][a-z0-9]*(-[a-z0-9]+)*$` | +| `description` | string | One-line summary of what the kit configures | + +### Optional Fields + +| Field | Type | Default | Description | +|-------|------|---------|-------------| +| `version` | string | `"1.0.0"` | Semver version | +| `author` | string | — | Author name or organization | +| `tier` | string | `"community"` | Trust tier: `built-in`, `verified`, `community`, `archived` | +| `skills` | array | `[]` | Skills to install (see below) | +| `skill_packs` | object | `{}` | Grouped skills with activation modes (see below) | +| `mcp` | object | `{}` | MCP servers to configure (see below) | +| `plugins` | array | `[]` | npm packages to install | +| `instructions` | string | — | Text injected into every AI conversation | +| `detect` | array | `[]` | File patterns that trigger kit suggestion | + +### Skills + +Skills reference external repositories containing `SKILL.md` files: + +```yaml +skills: + # Install specific skills from a repo + - source: "AltimateAI/data-engineering-skills" + select: + - creating-dbt-models + - testing-dbt-models + + # Install all skills from a repo (omit select) + - source: "owner/skills-repo" + + # Reference an already-installed skill by name + - "my-existing-skill" +``` + +The `source` field accepts: +- GitHub shorthand: `owner/repo` +- Full URL: `https://github.com/owner/repo` +- Local path: `./my-skills` + +### Skill Packs + +For kits with many skills, organize them into packs with activation modes: + +```yaml +skill_packs: + core: + description: "Essential skills loaded every session" + activation: always + skills: + - source: "owner/repo" + select: ["skill-a", "skill-b"] + + advanced: + description: "Skills loaded when matching files exist" + activation: detect + detect: + - files: ["**/advanced/**"] + skills: + - source: "owner/repo" + select: ["skill-c"] + + specialized: + description: "Skills loaded only on explicit request" + activation: manual + skills: + - source: "owner/repo" + select: ["skill-d"] +``` + +| Activation | Behavior | +|-----------|----------| +| `always` | Skills loaded every session when kit is active | +| `detect` | Skills loaded when matching files exist in the project | +| `manual` | Skills loaded only when the user explicitly requests them | + +!!! note + When `skill_packs` is present, it takes precedence over the flat `skills` array. Use one or the other, not both. + +### MCP Servers + +Configure MCP (Model Context Protocol) servers that give the AI tools to call: + +```yaml +mcp: + my-server: + type: stdio # "stdio" for local, "sse" or "remote" for HTTP + command: ["uvx", "my-server"] # Command to start the server + args: ["--port", "8080"] # Additional arguments (merged with command) + env: # Environment variables passed to the server + API_KEY: "default-value" + env_keys: ["API_KEY"] # Env vars the user must set (warns if missing) + description: "What this server provides" +``` + +**Type mapping:** The kit uses user-friendly names that are translated to the config format: + +| Kit type | Config type | Use case | +|----------|-----------|----------| +| `stdio` (default) | `local` | Local process via stdin/stdout | +| `sse` | `remote` | Server-sent events over HTTP | +| `streamable-http` | `remote` | Streamable HTTP | + +**Environment variables:** + +- `env`: Default values passed to the MCP server process +- `env_keys`: Names of variables the user must set. Kit activation warns if these are missing. Use this for API keys and secrets that shouldn't have defaults. + +### Detection Rules + +Auto-suggest the kit when certain files exist in the project: + +```yaml +detect: + - files: ["dbt_project.yml", "dbt_project.yaml"] + message: "Detected dbt project — activate this kit?" + + - files: ["**/dagster/**", "workspace.yaml"] + message: "Detected Dagster project" +``` + +- `files`: Array of glob patterns matched against the project directory +- `message`: Optional suggestion text shown to the user + +Users discover matching kits via `kit detect` or `kit list --detect`. The TUI also shows a nudge on startup when matching kits are found. + +### Instructions + +Free-form text injected into the AI's system context for every conversation when the kit is active: + +```yaml +instructions: | + ## Team Conventions + + - Use snake_case for all column names + - All monetary values in cents (integer), not dollars + - Every model must have a primary key test + - Do NOT use SELECT * in production models +``` + +**Best practices for instructions:** + +- Keep them under 50 lines — longer instructions consume more context tokens +- Be specific and actionable — "use snake_case" is better than "follow naming conventions" +- Use markdown headers to organize sections +- Include "DO NOT" rules for common mistakes +- Avoid duplicating what skills already teach + +## Publishing to the Registry + +The kit registry is hosted at [AltimateAI/data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills). + +### For Community Contributors + +1. Create your kit in your own GitHub repo +2. Test with `kit validate` and `kit activate` +3. Submit a PR to [data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) adding an entry to `registry.json`: + +```json +{ + "name": "my-kit", + "description": "What it does", + "version": "1.0.0", + "author": "Your Name", + "tier": "community", + "repo": "your-org/your-repo", + "path": "kits/my-kit", + "tags": ["dbt", "bigquery"], + "detect": ["dbt_project.yml"] +} +``` + +### For Vendors (Verified Tier) + +To get your kit listed as `verified`: + +1. Create skills and a kit in your organization's GitHub repo +2. Test thoroughly with `kit validate` and real-world projects +3. Submit a PR to the registry with `"tier": "verified"` +4. The Altimate team reviews the kit for quality and correctness +5. Once approved, your kit appears with a `[verified]` badge + +**Verified tier requirements:** + +- Skills follow the [Agent Skills](https://agentskills.io) specification +- MCP server is published to PyPI or npm +- Detection rules are accurate (no false positives) +- Instructions are clear and well-structured +- Kit is actively maintained + +## Examples + +### Instructions-Only Kit (Team Standards) + +No skills, no MCP — just team conventions: + +```yaml +name: team-standards +description: Engineering standards for the analytics team +version: 1.0.0 + +instructions: | + - All SQL in lowercase + - CTEs over subqueries + - No SELECT * in production + - Every PR needs a dbt test + +detect: + - files: ["dbt_project.yml"] +``` + +### MCP-Only Kit (Tool Integration) + +No skills, no instructions — just MCP configuration: + +```yaml +name: airbyte-connector +description: Airbyte PyAirbyte MCP server for data pipeline development +version: 1.0.0 + +mcp: + airbyte: + type: stdio + command: ["uvx", "pyairbyte-mcp"] + env_keys: ["AIRBYTE_API_KEY"] + description: "PyAirbyte — generate pipelines with 600+ connectors" + +detect: + - files: ["**/airbyte_*.py", "airbyte.yaml"] +``` + +### Full Kit (Skills + MCP + Instructions) + +The complete package: + +```yaml +name: dbt-snowflake +description: Complete dbt + Snowflake development setup +version: 1.0.0 +author: Altimate AI +tier: built-in + +skills: + - source: "AltimateAI/data-engineering-skills" + select: + - creating-dbt-models + - testing-dbt-models + - debugging-dbt-errors + +mcp: + dbt: + type: stdio + command: ["uvx", "dbt-mcp"] + env: + DBT_PROJECT_DIR: "./" + DBT_PROFILES_DIR: "~/.dbt" + env_keys: ["DBT_PROJECT_DIR", "DBT_PROFILES_DIR"] + description: "dbt MCP server — SQL execution, semantic layer, discovery API" + +instructions: | + This project uses dbt with Snowflake. + - Use ref() for all model references + - Follow staging → intermediate → marts layering + - Run dbt build (not just compile) to verify changes + +detect: + - files: ["dbt_project.yml"] + message: "Detected dbt project — activate dbt-snowflake kit?" +``` + +## Troubleshooting + +### Kit not showing in `kit list` + +- Check the `KIT.yaml` file is valid: `kit validate ` +- Ensure the file is named exactly `KIT.yaml` (case-sensitive) +- Check the kit directory is under `.opencode/kits/` or another scanned location + +### Skills fail to install during `kit activate` + +- The `source` repo must be accessible (public GitHub or reachable URL) +- Skills that already exist locally are skipped with a warning +- If a source fails, other components (MCP, instructions) still install + +### MCP server doesn't start after activation + +- Check `kit validate` for missing environment variables +- Set required env vars in your shell profile or `.env` file +- Verify the MCP command is installed: run the command manually (e.g., `uvx dbt-mcp --help`) + +### `kit deactivate` didn't clean up + +- `kit deactivate` removes: instruction files, active-kits entry, and MCP config entries +- Skills installed by the kit are NOT removed (they may be shared with other kits) +- To fully clean up skills, remove them from `.opencode/skills/` manually diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index aadb7db2f4..cae88d1793 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -121,6 +121,7 @@ nav: - MCPs & ACPs: - MCP Servers: configure/mcp-servers.md - ACP Support: configure/acp.md + - Kits: configure/kits.md - Appearance: - Themes: configure/themes.md - Keybinds: configure/keybinds.md @@ -147,4 +148,5 @@ nav: - SDK: develop/sdk.md - Server API: develop/server.md - Plugins: develop/plugins.md + - Building Kits: develop/kits.md - Ecosystem: develop/ecosystem.md diff --git a/packages/opencode/src/altimate/telemetry/index.ts b/packages/opencode/src/altimate/telemetry/index.ts index 9e1564eae6..c1d560c44b 100644 --- a/packages/opencode/src/altimate/telemetry/index.ts +++ b/packages/opencode/src/altimate/telemetry/index.ts @@ -387,6 +387,42 @@ export namespace Telemetry { source: "cli" | "tui" } // altimate_change end + // altimate_change start — kit: telemetry events for kit management + | { + type: "kit_created" + timestamp: number + session_id: string + kit_name: string + source: "cli" | "tui" + } + | { + type: "kit_installed" + timestamp: number + session_id: string + install_source: string + kit_count: number + kit_names: string[] + source: "cli" | "tui" + } + | { + type: "kit_applied" + timestamp: number + session_id: string + kit_name: string + skill_count: number + mcp_count: number + plugin_count: number + has_instructions: boolean + source: "cli" | "tui" + } + | { + type: "kit_removed" + timestamp: number + session_id: string + kit_name: string + source: "cli" | "tui" + } + // altimate_change end | { type: "sql_execute_failure" timestamp: number diff --git a/packages/opencode/src/cli/cmd/kit.ts b/packages/opencode/src/cli/cmd/kit.ts new file mode 100644 index 0000000000..abe560b049 --- /dev/null +++ b/packages/opencode/src/cli/cmd/kit.ts @@ -0,0 +1,1366 @@ +// altimate_change start — kit: top-level `kit` command for managing kit bundles +import { EOL } from "os" +import path from "path" +import fs from "fs/promises" +import { Kit } from "../../kit" +import { Skill } from "../../skill" +import { bootstrap } from "../bootstrap" +import { cmd } from "./cmd" +import { Instance } from "../../project/instance" +import { Global } from "@/global" +import { Telemetry } from "@/altimate/telemetry" +// altimate_change start — kit: jsonc-parser for comment-preserving config writes +import { modify, applyEdits } from "jsonc-parser" +// altimate_change end + +// --------------------------------------------------------------------------- +// KIT.yaml template +// --------------------------------------------------------------------------- + +function kitTemplate(name: string): string { + return `name: ${name} +description: TODO — describe what this kit configures +version: 1.0.0 + +# Skills to install (from external repos or already-installed names) +skills: + # - source: "owner/repo" + # select: ["skill-a", "skill-b"] + +# MCP servers to configure +mcp: + # my-server: + # command: ["uvx", "my-mcp-server"] + # env_keys: ["MY_API_KEY"] + +# Auto-detection rules +detect: + # - files: ["config.yaml"] + # message: "Detected my-tool — activate kit?" + +# Instructions added to every conversation +instructions: | + TODO — add project-specific instructions here. +` +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +async function findConfigFile(rootDir: string): Promise<{ filePath: string; config: Record }> { + const candidates = [ + path.join(rootDir, ".opencode", "opencode.json"), + path.join(rootDir, ".opencode", "opencode.jsonc"), + path.join(rootDir, ".altimate-code", "altimate-code.json"), + path.join(rootDir, ".altimate-code", "altimate-code.jsonc"), + path.join(rootDir, "opencode.json"), + path.join(rootDir, "opencode.jsonc"), + path.join(rootDir, "altimate-code.json"), + path.join(rootDir, "altimate-code.jsonc"), + ] + + for (const candidate of candidates) { + try { + const raw = await fs.readFile(candidate, "utf-8") + // Strip single-line comments for JSONC files + const cleaned = candidate.endsWith(".jsonc") + ? raw.replace(/^\s*\/\/.*$/gm, "").replace(/,(\s*[}\]])/g, "$1") + : raw + return { filePath: candidate, config: JSON.parse(cleaned) } + } catch { + // try next + } + } + + // No config found — create one in .opencode/ + const defaultPath = path.join(rootDir, ".opencode", "opencode.json") + await fs.mkdir(path.dirname(defaultPath), { recursive: true }) + const defaultConfig: Record = {} + await fs.writeFile(defaultPath, JSON.stringify(defaultConfig, null, 2) + EOL, "utf-8") + return { filePath: defaultPath, config: defaultConfig } +} + +// altimate_change start — kit: JSONC-aware config writes that preserve comments +async function writeConfigField(filePath: string, fieldPath: string[], value: unknown): Promise { + let text = "{}" + try { text = await fs.readFile(filePath, "utf-8") } catch {} + const edits = modify(text, fieldPath, value, { + formattingOptions: { tabSize: 2, insertSpaces: true }, + }) + const result = applyEdits(text, edits) + await fs.writeFile(filePath, result, "utf-8") +} + +async function removeConfigField(filePath: string, fieldPath: string[]): Promise { + let text: string + try { text = await fs.readFile(filePath, "utf-8") } catch { return false } + const edits = modify(text, fieldPath, undefined, { + formattingOptions: { tabSize: 2, insertSpaces: true }, + }) + if (edits.length === 0) return false + const result = applyEdits(text, edits) + await fs.writeFile(filePath, result, "utf-8") + return true +} +// altimate_change end + +async function cloneSource(source: string): Promise<{ dir: string; cloned: boolean }> { + let url: string | undefined + let normalized = source.trim().replace(/\.git$/, "") + + // Normalize GitHub web URLs (e.g. /tree/main/path) + const ghWebMatch = normalized.match(/^https?:\/\/github\.com\/([^/]+\/[^/]+?)(?:\/(?:tree|blob)\/.*)?$/) + if (ghWebMatch) { + url = `https://github.com/${ghWebMatch[1]}.git` + } else if (normalized.startsWith("http://") || normalized.startsWith("https://")) { + url = normalized + } else if (normalized.match(/^[a-zA-Z0-9_-]+\/[a-zA-Z0-9._-]+$/)) { + // Check if it's a local path first (e.g., "examples/kits" looks like "owner/repo") + const resolvedLocal = path.isAbsolute(normalized) ? normalized : path.resolve(normalized) + try { + await fs.access(resolvedLocal) + // It exists on disk — treat as local path, not GitHub shorthand + return { dir: resolvedLocal, cloned: false } + } catch { + // Not a local path — treat as GitHub shorthand + url = `https://github.com/${normalized}.git` + } + } + + if (url) { + const tmpDir = path.join(Global.Path.cache, "kit-install-" + Date.now()) + const proc = Bun.spawnSync(["git", "clone", "--depth", "1", "--", url, tmpDir], { + stdout: "pipe", + stderr: "pipe", + }) + if (proc.exitCode !== 0) { + throw new Error(`Failed to clone ${url}: ${proc.stderr.toString()}`) + } + return { dir: tmpDir, cloned: true } + } + + // Local path + const resolved = path.isAbsolute(normalized) ? normalized : path.resolve(normalized) + try { + await fs.access(resolved) + } catch { + throw new Error(`Path not found: ${resolved}`) + } + return { dir: resolved, cloned: false } +} + +async function cleanupTmp(dir: string, cloned: boolean) { + if (cloned && dir.startsWith(Global.Path.cache)) { + await fs.rm(dir, { recursive: true, force: true }) + } +} + +// --------------------------------------------------------------------------- +// Subcommands +// --------------------------------------------------------------------------- + +const KitListCommand = cmd({ + command: "list", + describe: "list all available kits", + builder: (yargs) => + yargs + .option("json", { + type: "boolean", + describe: "output as JSON", + default: false, + }) + .option("detect", { + type: "boolean", + describe: "show only kits matching the current project", + default: false, + }), + async handler(args) { + await bootstrap(process.cwd(), async () => { + let kits = await Kit.all() + + if (args.detect) { + const detected = await Kit.detect() + const detectedNames = new Set(detected.map((d) => d.kit.name)) + kits = kits.filter((r) => detectedNames.has(r.name)) + } + + // Sort alphabetically + kits.sort((a, b) => a.name.localeCompare(b.name)) + + if (args.json) { + // altimate_change start — kit: add tier + skill_packs to JSON output + const enriched = kits.map((kit) => { + const hasPacks = kit.skill_packs && Object.keys(kit.skill_packs).length > 0 + return { + name: kit.name, + tier: kit.tier || "community", + version: kit.version, + author: kit.author, + description: kit.description, + components: { + skills: hasPacks + ? Object.values(kit.skill_packs!).reduce((sum, pack) => sum + (pack.skills?.length || 0), 0) + : (Array.isArray(kit.skills) ? kit.skills.length : 0), + skill_packs: hasPacks ? Object.keys(kit.skill_packs!).length : 0, + mcp: kit.mcp ? Object.keys(kit.mcp).length : 0, + plugins: Array.isArray(kit.plugins) ? kit.plugins.length : 0, + }, + location: kit.location, + } + }) + // altimate_change end + process.stdout.write(JSON.stringify(enriched, null, 2) + EOL) + return + } + + // Human-readable table output + if (kits.length === 0) { + if (args.detect) { + process.stdout.write("No kits matched detection rules for this project." + EOL) + process.stdout.write(EOL + `See all kits: altimate-code kit list` + EOL) + } else { + process.stdout.write("No kits found." + EOL) + process.stdout.write(EOL + `Create one with: altimate-code kit create ` + EOL) + } + return + } + + // altimate_change start — kit: add tier column to table output + // Calculate column widths + const nameWidth = Math.max(6, ...kits.map((r) => r.name.length)) + const tierWidth = 12 + const versionWidth = Math.max(7, ...kits.map((r) => (r.version || "").length)) + + const header = `${"KIT".padEnd(nameWidth)} ${"TIER".padEnd(tierWidth)} ${"VERSION".padEnd(versionWidth)} ${"COMPONENTS".padEnd(20)} DESCRIPTION` + const separator = "─".repeat(header.length) + + process.stdout.write(EOL) + process.stdout.write(header + EOL) + process.stdout.write(separator + EOL) + + for (const kit of kits) { + // Count skills from skill_packs if present, otherwise flat skills array + const hasPacks = kit.skill_packs && Object.keys(kit.skill_packs).length > 0 + const skillCount = hasPacks + ? Object.values(kit.skill_packs!).reduce((sum, pack) => sum + (pack.skills?.length || 0), 0) + : (Array.isArray(kit.skills) ? kit.skills.length : 0) + const mcpCount = kit.mcp ? Object.keys(kit.mcp).length : 0 + const pluginCount = Array.isArray(kit.plugins) ? kit.plugins.length : 0 + const packCount = hasPacks ? Object.keys(kit.skill_packs!).length : 0 + const components = hasPacks + ? `${skillCount}sk ${packCount}pk ${mcpCount}mcp` + : `${skillCount}sk ${mcpCount}mcp ${pluginCount}pl` + + const tier = kit.tier || "community" + const tierBadge = tier !== "community" ? `[${tier}]` : "" + + let desc = kit.description || "" + if (desc.length > 50) { + desc = desc.slice(0, 50) + const lastSpace = desc.lastIndexOf(" ") + if (lastSpace > 30) desc = desc.slice(0, lastSpace) + desc += "..." + } + + process.stdout.write( + `${kit.name.padEnd(nameWidth)} ${tierBadge.padEnd(tierWidth)} ${(kit.version || "—").padEnd(versionWidth)} ${components.padEnd(20)} ${desc}` + EOL, + ) + } + // altimate_change end + + process.stdout.write(EOL) + process.stdout.write(`${kits.length} kit(s) found.` + EOL) + process.stdout.write(`Create a new kit: altimate-code kit create ` + EOL) + }) + }, +}) + +const KitCreateCommand = cmd({ + command: "create ", + describe: "scaffold a new kit", + builder: (yargs) => + yargs.positional("name", { + type: "string", + describe: "name of the kit to create", + demandOption: true, + }), + async handler(args) { + const name = args.name as string + + // Validate name before bootstrap (fast fail) + if (!/^[a-z][a-z0-9]*(-[a-z0-9]+)*$/.test(name) || name.length < 2) { + process.stderr.write( + `Error: Kit name must be lowercase alphanumeric with hyphens, at least 2 chars (e.g., "dbt-snowflake")` + EOL, + ) + process.exit(1) + } + if (name.length > 64) { + process.stderr.write(`Error: Kit name must be 64 characters or fewer` + EOL) + process.exit(1) + } + + await bootstrap(process.cwd(), async () => { + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + + const kitDir = path.join(rootDir, ".opencode", "kits", name) + const kitFile = path.join(kitDir, "KIT.yaml") + + try { + await fs.access(kitFile) + process.stderr.write(`Error: Kit already exists at ${kitFile}` + EOL) + process.exit(1) + } catch { + // File doesn't exist, good + } + + await fs.mkdir(kitDir, { recursive: true }) + await fs.writeFile(kitFile, kitTemplate(name), "utf-8") + process.stdout.write(`✓ Created kit: ${path.relative(rootDir, kitFile)}` + EOL) + + // altimate_change start — telemetry + try { + Telemetry.track({ + type: "kit_created", + timestamp: Date.now(), + session_id: Telemetry.getContext().sessionId || "", + kit_name: name, + source: "cli", + }) + } catch {} + // altimate_change end + + process.stdout.write(EOL) + process.stdout.write(`Next steps:` + EOL) + process.stdout.write(` 1. Edit .opencode/kits/${name}/KIT.yaml — configure skills, MCP servers, and instructions` + EOL) + process.stdout.write(` 2. Activate it: altimate-code kit activate ${name}` + EOL) + }) + }, +}) + +const KitShowCommand = cmd({ + command: "show ", + describe: "display kit details", + builder: (yargs) => + yargs.positional("name", { + type: "string", + describe: "name of the kit to show", + demandOption: true, + }), + async handler(args) { + const name = args.name as string + await bootstrap(process.cwd(), async () => { + const kit = await Kit.get(name) + if (!kit) { + process.stderr.write(`Error: Kit "${name}" not found.` + EOL) + process.exit(1) + } + + const hasPacks = kit.skill_packs && Object.keys(kit.skill_packs).length > 0 + const skillCount = hasPacks + ? Object.values(kit.skill_packs!).reduce((sum, pack) => sum + (pack.skills?.length || 0), 0) + : (Array.isArray(kit.skills) ? kit.skills.length : 0) + const mcpCount = kit.mcp ? Object.keys(kit.mcp).length : 0 + const pluginCount = Array.isArray(kit.plugins) ? kit.plugins.length : 0 + + process.stdout.write(EOL) + process.stdout.write(` Name: ${kit.name}` + EOL) + process.stdout.write(` Description: ${kit.description || "—"}` + EOL) + process.stdout.write(` Version: ${kit.version || "—"}` + EOL) + process.stdout.write(` Author: ${kit.author || "—"}` + EOL) + process.stdout.write(` Tier: ${kit.tier || "community"}` + EOL) + process.stdout.write(` Location: ${kit.location}` + EOL) + process.stdout.write(EOL) + + // Skill packs (if present, takes precedence over flat skills) + if (hasPacks) { + const packs = Object.entries(kit.skill_packs!) + process.stdout.write(` Skill Packs (${packs.length}):` + EOL) + for (const [packName, pack] of packs) { + const badge = pack.activation === "always" ? "●" : pack.activation === "detect" ? "◐" : "○" + process.stdout.write(` ${badge} ${packName} (${pack.activation}, ${pack.skills.length} skills)` + EOL) + if (pack.description) { + process.stdout.write(` ${pack.description}` + EOL) + } + for (const skill of pack.skills) { + if (typeof skill === "string") { + process.stdout.write(` - ${skill}` + EOL) + } else { + const selected = skill.select ? ` [${skill.select.join(", ")}]` : "" + process.stdout.write(` - ${skill.source}${selected}` + EOL) + } + } + } + } else { + // Flat skills + process.stdout.write(` Skills (${skillCount}):` + EOL) + if (skillCount > 0) { + for (const skill of kit.skills!) { + if (typeof skill === "string") { + process.stdout.write(` - ${skill}` + EOL) + } else { + const selected = skill.select ? ` [${skill.select.join(", ")}]` : "" + process.stdout.write(` - ${skill.source}${selected}` + EOL) + } + } + } else { + process.stdout.write(` (none)` + EOL) + } + } + + // MCP servers + process.stdout.write(` MCP Servers (${mcpCount}):` + EOL) + if (mcpCount > 0) { + for (const [serverName, serverConfig] of Object.entries(kit.mcp!)) { + const desc = (serverConfig as Record).description || "" + process.stdout.write(` - ${serverName}${desc ? `: ${desc}` : ""}` + EOL) + } + } else { + process.stdout.write(` (none)` + EOL) + } + + // Plugins + process.stdout.write(` Plugins (${pluginCount}):` + EOL) + if (pluginCount > 0) { + for (const plugin of kit.plugins!) { + process.stdout.write(` - ${plugin}` + EOL) + } + } else { + process.stdout.write(` (none)` + EOL) + } + + // Detection rules + const detectCount = Array.isArray(kit.detect) ? kit.detect.length : 0 + if (detectCount > 0) { + process.stdout.write(EOL) + process.stdout.write(` Detection Rules (${detectCount}):` + EOL) + for (const rule of kit.detect!) { + const files = Array.isArray(rule.files) ? rule.files.join(", ") : "—" + process.stdout.write(` - files: [${files}]` + EOL) + if (rule.message) { + process.stdout.write(` message: ${rule.message}` + EOL) + } + } + } + + // Instructions + if (kit.instructions) { + process.stdout.write(EOL + "─".repeat(60) + EOL + EOL) + process.stdout.write(`Instructions:` + EOL + EOL) + process.stdout.write(kit.instructions + EOL) + } + }) + }, +}) + +const KitInstallCommand = cmd({ + command: "install ", + describe: "install a kit from GitHub or a local path", + builder: (yargs) => + yargs + .positional("source", { + type: "string", + describe: "GitHub repo (owner/repo), URL, or local path", + demandOption: true, + }) + .option("global", { + alias: "g", + type: "boolean", + describe: "install globally instead of per-project", + default: false, + }), + async handler(args) { + const source = (args.source as string).trim().replace(/\.git$/, "") + const isGlobal = args.global as boolean + + if (!source) { + process.stderr.write(`Error: Source is required. Use owner/repo, URL, or local path.` + EOL) + process.exit(1) + } + + await bootstrap(process.cwd(), async () => { + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + const targetDir = isGlobal + ? path.join(Global.Path.config, "kits") + : path.join(rootDir, ".opencode", "kits") + + let fetchDir: string + let cloned = false + + try { + const result = await cloneSource(source) + fetchDir = result.dir + cloned = result.cloned + if (cloned) { + process.stdout.write(`Fetching from ${source}...` + EOL) + } + } catch (err) { + process.stderr.write(`Error: ${(err as Error).message}` + EOL) + process.exit(1) + return // unreachable but satisfies TS + } + + // Find all KIT.yaml / KIT.yml / KIT.md files + const { Glob: BunGlob } = globalThis.Bun + const patterns = ["**/KIT.yaml", "**/KIT.yml", "**/KIT.md"] + const matches: string[] = [] + for (const pattern of patterns) { + const glob = new BunGlob(pattern) + for await (const match of glob.scan({ cwd: fetchDir, absolute: true })) { + if (!match.includes("/.git/")) matches.push(match) + } + } + + if (matches.length === 0) { + process.stderr.write(`Error: No KIT.yaml/KIT.yml/KIT.md files found in ${source}` + EOL) + await cleanupTmp(fetchDir, cloned) + process.exit(1) + } + + let installed = 0 + const installedNames: string[] = [] + + for (const kitFile of matches) { + const kitParent = path.dirname(kitFile) + + // Parse the YAML to get the kit name (don't rely on directory name) + let kitName: string + try { + const matter = (await import("gray-matter")).default + const raw = await fs.readFile(kitFile, "utf-8") + const ext = path.extname(kitFile).toLowerCase() + const parsed = ext === ".md" ? matter(raw) : matter("---\n" + raw + "\n---") + kitName = (parsed.data.name as string) || path.basename(kitParent) + } catch { + kitName = path.basename(kitParent) + } + + // Avoid using temp dir names as kit names + if (kitName.startsWith("kit-install-")) { + process.stdout.write(` ⚠ Skipping "${kitFile}" — could not determine kit name` + EOL) + continue + } + + const dest = path.join(targetDir, kitName) + + // Check if already installed + try { + await fs.access(dest) + process.stdout.write(` ⚠ Skipping "${kitName}" — already exists` + EOL) + continue + } catch { + // Not installed, proceed + } + + // Copy only the kit directory (not repo root — skip .git, node_modules, etc.) + await fs.mkdir(dest, { recursive: true }) + const files = await fs.readdir(kitParent) + for (const file of files) { + // Skip common non-kit files when copying from repo root + if ([".git", "node_modules", ".github", "LICENSE", "README.md"].includes(file)) continue + const src = path.join(kitParent, file) + const dst = path.join(dest, file) + const stat = await fs.lstat(src) + if (stat.isSymbolicLink()) continue + if (stat.isFile()) { + await fs.copyFile(src, dst) + } else if (stat.isDirectory()) { + await fs.cp(src, dst, { recursive: true, dereference: false }) + } + } + process.stdout.write(` ✓ Installed "${kitName}" → ${path.relative(rootDir, dest)}` + EOL) + installedNames.push(kitName) + installed++ + } + + await cleanupTmp(fetchDir, cloned) + + process.stdout.write(EOL) + if (installed > 0) { + process.stdout.write(`${installed} kit(s) installed${isGlobal ? " globally" : ""}.` + EOL) + // altimate_change start — telemetry + try { + Telemetry.track({ + type: "kit_installed", + timestamp: Date.now(), + session_id: Telemetry.getContext().sessionId || "", + install_source: source, + kit_count: installed, + kit_names: installedNames, + source: "cli", + }) + } catch {} + // altimate_change end + } else { + process.stdout.write(`No new kits installed.` + EOL) + } + }) + }, +}) + +// altimate_change start — kit: KitApplyCommand removed, functionality merged into KitActivateCommand +// altimate_change end + +const KitRemoveCommand = cmd({ + command: "remove ", + describe: "remove an installed kit", + builder: (yargs) => + yargs.positional("name", { + type: "string", + describe: "name of the kit to remove", + demandOption: true, + }), + async handler(args) { + const name = args.name as string + await bootstrap(process.cwd(), async () => { + const kit = await Kit.get(name) + if (!kit) { + process.stderr.write(`Error: Kit "${name}" not found.` + EOL) + process.exit(1) + } + + // Check if kit is tracked by git (part of the repo, not user-installed) + const kitDir = path.dirname(kit.location) + const gitCheck = Bun.spawnSync(["git", "ls-files", "--error-unmatch", kit.location], { + cwd: path.dirname(kitDir), + stdout: "pipe", + stderr: "pipe", + }) + if (gitCheck.exitCode === 0) { + process.stderr.write(`Error: Cannot remove "${name}" — it is tracked by git.` + EOL) + process.stderr.write(`This kit is part of the repository, not user-installed.` + EOL) + process.exit(1) + } + + // Safety: only remove if the directory looks like a kit directory + // (contains the KIT file and is not a top-level scan directory) + const kitBasename = path.basename(kitDir) + if (kitBasename === "kits" || kitBasename === "kit" || kitDir === Instance.directory) { + // The KIT.yaml is at a scan root — only remove the file, not the directory + await fs.rm(kit.location, { force: true }) + process.stdout.write(` ✓ Removed kit file: ${kit.location}` + EOL) + } else { + await fs.rm(kitDir, { recursive: true, force: true }) + process.stdout.write(` ✓ Removed kit: ${kitDir}` + EOL) + } + + // Deactivate if active, then invalidate cache + await Kit.deactivate(name) + Kit.invalidate() + + // altimate_change start — kit: clean up instruction file on remove + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + const instructionsFile = path.join(rootDir, ".opencode", "instructions", `kit-${name}.md`) + try { + await fs.access(instructionsFile) + await fs.rm(instructionsFile, { force: true }) + process.stdout.write(` ✓ Removed instructions: ${path.relative(rootDir, instructionsFile)}` + EOL) + } catch { + // No instructions file, that's fine + } + // altimate_change end + + // altimate_change start — telemetry + try { + Telemetry.track({ + type: "kit_removed", + timestamp: Date.now(), + session_id: Telemetry.getContext().sessionId || "", + kit_name: name, + source: "cli", + }) + } catch {} + // altimate_change end + + process.stdout.write(EOL + `Kit "${name}" removed.` + EOL) + }) + }, +}) + +const KitDetectCommand = cmd({ + command: "detect", + describe: "auto-detect which kits match the current project", + builder: (yargs) => yargs, + async handler() { + await bootstrap(process.cwd(), async () => { + const detected = await Kit.detect() + + if (detected.length === 0) { + process.stdout.write("No matching kits detected for this project." + EOL) + process.stdout.write(EOL + `Browse available kits: altimate-code kit list` + EOL) + return + } + + process.stdout.write(EOL) + process.stdout.write(`Detected ${detected.length} matching kit(s):` + EOL + EOL) + + for (const match of detected) { + process.stdout.write(` ${match.kit.name}` + EOL) + if (match.kit.description) { + process.stdout.write(` ${match.kit.description}` + EOL) + } + if (match.matched && match.matched.length > 0) { + process.stdout.write(` Matched files: ${match.matched.join(", ")}` + EOL) + } + // Show the first detection rule that has a message + const firstRuleWithMessage = match.kit.detect?.find((d) => d.message) + if (firstRuleWithMessage?.message) { + process.stdout.write(` ${firstRuleWithMessage.message}` + EOL) + } + process.stdout.write(EOL) + } + + process.stdout.write(`Activate a kit: altimate-code kit activate ` + EOL) + }) + }, +}) + +// altimate_change start — kit: activate subcommand (merged apply + activate into one command) +const KitActivateCommand = cmd({ + command: "activate ", + describe: "activate a kit — install skills, configure MCP, and enable for this project", + builder: (yargs) => + yargs + .positional("name", { + type: "string", + describe: "name of the kit to activate", + demandOption: true, + }) + .option("yes", { + alias: "y", + type: "boolean", + describe: "skip confirmation prompt", + default: false, + }), + async handler(args) { + const name = args.name as string + await bootstrap(process.cwd(), async () => { + const kit = await Kit.get(name) + if (!kit) { + process.stderr.write(`Error: Kit "${name}" not found. Install it first with: altimate-code kit install ` + EOL) + process.exit(1) + } + + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + const tier = kit.tier || "community" + const tierBadge = tier !== "community" ? ` [${tier}]` : "" + + // Get all skills — from skill_packs if present, otherwise flat skills + const allSkills = (kit.skill_packs && Object.keys(kit.skill_packs).length > 0) + ? Kit.allSkillsFromPacks(kit) + : (kit.skills || []) + const skillCount = allSkills.length + const mcpCount = kit.mcp ? Object.keys(kit.mcp).length : 0 + const pluginCount = Array.isArray(kit.plugins) ? kit.plugins.length : 0 + const hasInstructions = !!kit.instructions && !kit.instructions.startsWith("TODO") + + // --- Preview --- + process.stdout.write(EOL) + process.stdout.write(`Kit: ${kit.name}${tierBadge} (v${kit.version || "0.0.0"})` + EOL) + process.stdout.write(`${kit.description || ""}` + EOL) + process.stdout.write(EOL + "The following changes will be applied:" + EOL + EOL) + + if (skillCount > 0) { + process.stdout.write(` Skills (${skillCount}):` + EOL) + for (const skill of allSkills) { + if (typeof skill === "string") { + process.stdout.write(` + ${skill} (reference existing)` + EOL) + } else { + const selected = skill.select ? skill.select.join(", ") : "all" + process.stdout.write(` + ${skill.source} [${selected}]` + EOL) + } + } + process.stdout.write(EOL) + } + + if (mcpCount > 0) { + process.stdout.write(` MCP Servers (${mcpCount}):` + EOL) + for (const [serverName, serverConfig] of Object.entries(kit.mcp!)) { + const desc = (serverConfig as Record).description || "" + process.stdout.write(` + ${serverName}${desc ? ` — ${desc}` : ""}` + EOL) + } + process.stdout.write(EOL) + } + + if (hasInstructions) { + process.stdout.write(` Instructions:` + EOL) + process.stdout.write(` + .opencode/instructions/kit-${name}.md` + EOL) + process.stdout.write(EOL) + } + + if (skillCount === 0 && mcpCount === 0 && pluginCount === 0 && !hasInstructions) { + // Still activate (add to active-kits) even if empty — user explicitly asked + await Kit.activate(name) + Kit.invalidate() + process.stdout.write(`Kit "${name}" activated (no changes to apply — kit is empty).` + EOL) + return + } + + // --- Confirmation --- + if (!args.yes) { + process.stdout.write(`Activate this kit? [y/N] `) + const response = await new Promise((resolve) => { + let data = "" + const onData = (chunk: Buffer) => { + data += chunk.toString() + if (data.includes("\n")) { + process.stdin.removeListener("data", onData) + process.stdin.pause() + resolve(data.trim().toLowerCase()) + } + } + const onEnd = () => { + process.stdin.removeListener("data", onData) + resolve(data.trim().toLowerCase()) + } + process.stdin.resume() + process.stdin.on("data", onData) + process.stdin.on("end", onEnd) + }) + + if (response !== "y" && response !== "yes") { + process.stdout.write(`Cancelled.` + EOL) + return + } + } + + process.stdout.write(EOL) + + // altimate_change start — kit: track skill install failures for accurate status message + let skillFailures = 0 + // altimate_change end + + // --- 1. Install skills --- + if (skillCount > 0) { + for (const skill of allSkills) { + if (typeof skill === "string") { + const existing = await Skill.get(skill) + if (!existing) { + process.stdout.write(` ⚠ Skill "${skill}" not found — install it separately` + EOL) + } else { + process.stdout.write(` ✓ Skill "${skill}" already available` + EOL) + } + } else { + let fetchDir: string + let cloned = false + try { + const result = await cloneSource(skill.source) + fetchDir = result.dir + cloned = result.cloned + } catch (err) { + process.stdout.write(` ✗ Failed to fetch ${skill.source}: ${(err as Error).message}` + EOL) + skillFailures++ + continue + } + + const { Glob: BunGlob } = globalThis.Bun + const glob = new BunGlob("**/SKILL.md") + const skillMatches: string[] = [] + for await (const match of glob.scan({ cwd: fetchDir, absolute: true })) { + if (!match.includes("/.git/")) skillMatches.push(match) + } + + const targetSkillsDir = path.join(rootDir, ".opencode", "skills") + + for (const skillFile of skillMatches) { + const skillParent = path.dirname(skillFile) + const skillName = path.basename(skillParent) + + if (skill.select && !skill.select.includes(skillName)) continue + + const dest = path.join(targetSkillsDir, skillName) + try { + await fs.access(dest) + process.stdout.write(` ⚠ Skill "${skillName}" already exists, skipping` + EOL) + continue + } catch { /* good */ } + + await fs.mkdir(dest, { recursive: true }) + const files = await fs.readdir(skillParent) + for (const file of files) { + const src = path.join(skillParent, file) + const dst = path.join(dest, file) + const stat = await fs.lstat(src) + if (stat.isSymbolicLink()) continue + if (stat.isFile()) { + await fs.copyFile(src, dst) + } else if (stat.isDirectory()) { + await fs.cp(src, dst, { recursive: true, dereference: false }) + } + } + process.stdout.write(` ✓ Installed skill "${skillName}"` + EOL) + } + + await cleanupTmp(fetchDir, cloned) + } + } + } + + // --- 2. Configure MCP servers and plugins (JSONC-aware, preserves comments) --- + if (mcpCount > 0 || pluginCount > 0) { + const { filePath } = await findConfigFile(rootDir) + const missingEnvKeys: string[] = [] + + if (mcpCount > 0) { + for (const [serverName, serverDef] of Object.entries(kit.mcp!)) { + const def = serverDef as Record + const kitType = (def.type as string) || "stdio" + let configEntry: Record + + if (kitType === "sse" || kitType === "streamable-http" || kitType === "remote") { + configEntry = { type: "remote", url: def.url as string, ...(def.headers ? { headers: def.headers } : {}) } + } else { + const command = [...((def.command as string[]) || []), ...((def.args as string[]) || [])] + configEntry = { type: "local", command, ...(def.env ? { environment: def.env } : {}) } + } + + // Write each MCP server using JSONC-preserving modify + await writeConfigField(filePath, ["mcp", serverName], configEntry) + process.stdout.write(` ✓ Configured MCP server "${serverName}"` + EOL) + + const envKeys = def.env_keys + if (Array.isArray(envKeys)) { + for (const key of envKeys as string[]) { + if (!process.env[key]) missingEnvKeys.push(key) + } + } + } + } + + if (pluginCount > 0) { + // Read current plugins, add new ones, write back + const { config } = await findConfigFile(rootDir) + const plugins = (config.plugin ?? []) as string[] + let changed = false + for (const plugin of kit.plugins!) { + if (!plugins.includes(plugin)) { + plugins.push(plugin) + changed = true + process.stdout.write(` ✓ Added plugin "${plugin}"` + EOL) + } + } + if (changed) { + await writeConfigField(filePath, ["plugin"], plugins) + } + } + + process.stdout.write(` ✓ Updated config: ${path.relative(rootDir, filePath)}` + EOL) + + if (missingEnvKeys.length > 0) { + process.stdout.write(EOL) + process.stdout.write(` ⚠ Missing environment variables:` + EOL) + for (const key of missingEnvKeys) { + process.stdout.write(` - ${key}` + EOL) + } + process.stdout.write(` Set them in your shell profile or .env file.` + EOL) + } + } + + // --- 3. Add instructions --- + if (hasInstructions) { + const instructionsDir = path.join(rootDir, ".opencode", "instructions") + const instructionsFile = path.join(instructionsDir, `kit-${name}.md`) + await fs.mkdir(instructionsDir, { recursive: true }) + await fs.writeFile(instructionsFile, kit.instructions!, "utf-8") + process.stdout.write(` ✓ Created instructions: ${path.relative(rootDir, instructionsFile)}` + EOL) + } + + // --- 4. Activate (add to active-kits) --- + await Kit.activate(name) + Kit.invalidate() + + process.stdout.write(EOL) + // altimate_change start — kit: report partial failures in activation message + if (skillFailures > 0) { + process.stdout.write(`Kit "${name}" activated with ${skillFailures} skill source(s) unavailable.` + EOL) + process.stdout.write(`Run 'altimate-code kit show ${name}' to see expected skills.` + EOL) + } else { + process.stdout.write(`Kit "${name}" activated successfully.` + EOL) + } + // altimate_change end + + try { + Telemetry.track({ + type: "kit_applied", + timestamp: Date.now(), + session_id: Telemetry.getContext().sessionId || "", + kit_name: name, + skill_count: skillCount, + mcp_count: mcpCount, + plugin_count: pluginCount, + has_instructions: hasInstructions, + source: "cli", + }) + } catch {} + }) + }, +}) +// altimate_change end + +// altimate_change start — kit: deactivate subcommand +const KitDeactivateCommand = cmd({ + command: "deactivate ", + describe: "deactivate a kit for the current project", + builder: (yargs) => + yargs.positional("name", { + type: "string", + describe: "name of the kit to deactivate", + demandOption: true, + }), + async handler(args) { + const name = args.name as string + await bootstrap(process.cwd(), async () => { + // Read kit BEFORE deactivating so we know what MCP servers to clean + const kit = await Kit.get(name) + + await Kit.deactivate(name) + process.stdout.write(`✓ Deactivated kit: ${name}` + EOL) + + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + + // altimate_change start — kit: clean up instruction file on deactivate + const instructionsFile = path.join(rootDir, ".opencode", "instructions", `kit-${name}.md`) + try { + await fs.access(instructionsFile) + await fs.rm(instructionsFile, { force: true }) + process.stdout.write(` ✓ Removed instructions: ${path.relative(rootDir, instructionsFile)}` + EOL) + } catch {} + // altimate_change end + + // altimate_change start — kit: clean up MCP config entries added by this kit (JSONC-preserving) + if (kit?.mcp && Object.keys(kit.mcp).length > 0) { + try { + const { filePath } = await findConfigFile(rootDir) + let removed = 0 + for (const serverName of Object.keys(kit.mcp)) { + if (await removeConfigField(filePath, ["mcp", serverName])) { + removed++ + } + } + if (removed > 0) { + process.stdout.write(` ✓ Removed ${removed} MCP server(s) from config` + EOL) + } + } catch {} + } + // altimate_change end + }) + }, +}) +// altimate_change end + +// altimate_change start — kit: search subcommand +const REGISTRY_URL = "https://raw.githubusercontent.com/AltimateAI/data-engineering-skills/main/registry.json" + +const KitSearchCommand = cmd({ + command: "search [query]", + describe: "search the kit registry", + builder: (yargs) => + yargs + .positional("query", { + type: "string", + describe: "search query (matches name, description, tags)", + }) + .option("json", { + type: "boolean", + describe: "output as JSON", + default: false, + }), + async handler(args) { + const query = ((args.query as string) || "").toLowerCase().trim() + + await bootstrap(process.cwd(), async () => { + process.stdout.write(`Searching kit registry...` + EOL) + + // altimate_change start — kit: graceful 404 + timeout for registry fetch + let registry: any + const controller = new AbortController() + const timeout = setTimeout(() => controller.abort(), 5000) + try { + const response = await fetch(REGISTRY_URL, { signal: controller.signal }) + clearTimeout(timeout) + if (!response.ok) { + if (response.status === 404) { + process.stdout.write(`Kit registry not available yet.` + EOL) + process.stdout.write(EOL + `Browse local kits: altimate-code kit list` + EOL) + process.stdout.write(`Create your own: altimate-code kit create ` + EOL) + return + } + process.stderr.write(`Error: Failed to fetch registry (${response.status})` + EOL) + process.exit(1) + } + registry = await response.json() + } catch (err) { + clearTimeout(timeout) + if ((err as Error).name === "AbortError") { + process.stdout.write(`Kit registry unavailable (timeout).` + EOL) + } else { + process.stderr.write(`Error: Failed to fetch registry: ${(err as Error).message}` + EOL) + } + process.stdout.write(EOL + `Browse local kits: altimate-code kit list` + EOL) + process.exit(1) + } + // altimate_change end + + const kits = (registry.kits || []) as Array<{ + name: string + description: string + version: string + author: string + tier: string + repo: string + path: string + tags: string[] + detect: string[] + stats?: { installs?: number; last_updated?: string } + }> + + // Filter by query + const results = query + ? kits.filter((r) => { + const searchable = [r.name, r.description, ...(r.tags || []), r.author || ""].join(" ").toLowerCase() + return searchable.includes(query) + }) + : kits + + if (args.json) { + process.stdout.write(JSON.stringify(results, null, 2) + EOL) + return + } + + if (results.length === 0) { + process.stdout.write(`No kits found${query ? ` matching "${query}"` : ""}.` + EOL) + return + } + + // Table output + const nameWidth = Math.max(6, ...results.map((r) => r.name.length)) + const tierWidth = 10 + + const header = `${"KIT".padEnd(nameWidth)} ${"TIER".padEnd(tierWidth)} DESCRIPTION` + const separator = "─".repeat(header.length) + + process.stdout.write(EOL) + process.stdout.write(header + EOL) + process.stdout.write(separator + EOL) + + for (const kit of results) { + let desc = kit.description || "" + if (desc.length > 50) { + desc = desc.slice(0, 50) + const lastSpace = desc.lastIndexOf(" ") + if (lastSpace > 30) desc = desc.slice(0, lastSpace) + desc += "..." + } + + const tier = kit.tier || "community" + process.stdout.write(`${kit.name.padEnd(nameWidth)} ${tier.padEnd(tierWidth)} ${desc}` + EOL) + } + + process.stdout.write(EOL) + process.stdout.write(`${results.length} kit(s) found in registry.` + EOL) + process.stdout.write(`Install with: altimate-code kit install ` + EOL) + }) + }, +}) +// altimate_change end + +// altimate_change start — kit: status subcommand +const KitStatusCommand = cmd({ + command: "status", + describe: "show active kits for the current project", + builder: (yargs) => yargs, + async handler() { + await bootstrap(process.cwd(), async () => { + const activeKits = await Kit.active() + + if (activeKits.length === 0) { + process.stdout.write("No active kits for this project." + EOL) + process.stdout.write(EOL + `Activate one: altimate-code kit activate ` + EOL) + process.stdout.write(`Auto-detect: altimate-code kit detect` + EOL) + return + } + + process.stdout.write(EOL) + process.stdout.write(`Active kits (${activeKits.length}):` + EOL + EOL) + + for (const kit of activeKits) { + const tier = kit.tier || "community" + const tierBadge = tier !== "community" ? ` [${tier}]` : "" + process.stdout.write(` ${kit.name}${tierBadge}` + EOL) + if (kit.description) { + process.stdout.write(` ${kit.description}` + EOL) + } + + // Show skill packs if any + if (kit.skill_packs && Object.keys(kit.skill_packs).length > 0) { + for (const [packName, pack] of Object.entries(kit.skill_packs)) { + const badge = pack.activation === "always" ? "●" : pack.activation === "detect" ? "◐" : "○" + process.stdout.write(` ${badge} ${packName} (${pack.activation}, ${pack.skills.length} skills)` + EOL) + } + } + + process.stdout.write(EOL) + } + }) + }, +}) +// altimate_change end + +// altimate_change start — kit: validate subcommand +const KitValidateCommand = cmd({ + command: "validate [name]", + describe: "validate a kit's YAML format and references", + builder: (yargs) => + yargs.positional("name", { + type: "string", + describe: "name of the kit to validate (defaults to all)", + }), + async handler(args) { + const targetName = args.name as string | undefined + await bootstrap(process.cwd(), async () => { + const kits = targetName ? [await Kit.get(targetName)].filter(Boolean) : await Kit.all() + + if (kits.length === 0) { + if (targetName) { + process.stderr.write(`Error: Kit "${targetName}" not found.` + EOL) + process.exit(1) + } + process.stdout.write("No kits to validate." + EOL) + return + } + + let hasErrors = false + const pass = (msg: string) => process.stdout.write(` ✓ ${msg}` + EOL) + const fail = (msg: string) => { process.stdout.write(` ✗ ${msg}` + EOL); hasErrors = true } + const warn = (msg: string) => process.stdout.write(` ⚠ ${msg}` + EOL) + + for (const kit of kits as Kit.Info[]) { + process.stdout.write(EOL + `Validating: ${kit.name}` + EOL + EOL) + + // 1. Name format + if (/^[a-z][a-z0-9]*(-[a-z0-9]+)*$/.test(kit.name)) { + pass(`Name "${kit.name}" is valid`) + } else { + fail(`Name "${kit.name}" has invalid format (must be lowercase, hyphens, 2+ chars)`) + } + + // 2. Description + if (kit.description && !kit.description.startsWith("TODO")) { + pass(`Description present`) + } else { + warn(`Description is missing or starts with TODO`) + } + + // 3. Version + if (kit.version && /^\d+\.\d+\.\d+/.test(kit.version)) { + pass(`Version "${kit.version}" is valid semver`) + } else { + warn(`Version "${kit.version || "(none)"}" may not be valid semver`) + } + + // 4. Skills references + const allSkills = (kit.skill_packs && Object.keys(kit.skill_packs).length > 0) + ? Kit.allSkillsFromPacks(kit) + : (kit.skills || []) + if (allSkills.length > 0) { + pass(`${allSkills.length} skill source(s) defined`) + for (const skill of allSkills) { + if (typeof skill === "string") { + pass(` Skill reference: "${skill}"`) + } else { + if (!skill.source) { + fail(` Skill source is empty`) + } else { + pass(` Skill source: "${skill.source}"${skill.select ? ` [${skill.select.join(", ")}]` : ""}`) + } + } + } + } else { + warn(`No skills defined`) + } + + // 5. MCP servers + if (kit.mcp && Object.keys(kit.mcp).length > 0) { + for (const [name, config] of Object.entries(kit.mcp)) { + const cfg = config as Record + const type = (cfg.type as string) || "stdio" + if (type === "stdio" || type === "local") { + if (cfg.command && Array.isArray(cfg.command) && (cfg.command as string[]).length > 0) { + pass(`MCP "${name}": command defined`) + } else { + fail(`MCP "${name}": missing command for stdio server`) + } + } else if (type === "sse" || type === "streamable-http" || type === "remote") { + if (cfg.url) { + pass(`MCP "${name}": URL defined`) + } else { + fail(`MCP "${name}": missing url for remote server`) + } + } + + // Check env_keys + if (Array.isArray(cfg.env_keys)) { + for (const key of cfg.env_keys as string[]) { + if (process.env[key]) { + pass(`MCP "${name}": env var ${key} is set`) + } else { + warn(`MCP "${name}": env var ${key} is NOT set`) + } + } + } + } + } + + // 6. Detection rules + if (kit.detect && kit.detect.length > 0) { + pass(`${kit.detect.length} detection rule(s) defined`) + } else { + warn(`No detection rules — kit won't appear in 'kit detect'`) + } + + // 7. Instructions + if (kit.instructions && !kit.instructions.startsWith("TODO")) { + pass(`Instructions present (${kit.instructions.split("\n").length} lines)`) + } else { + warn(`Instructions missing or placeholder`) + } + } + + process.stdout.write(EOL) + if (hasErrors) { + process.stdout.write(`Validation: FAIL — fix the issues above` + EOL) + process.exitCode = 1 + } else { + process.stdout.write(`Validation: PASS` + EOL) + } + }) + }, +}) +// altimate_change end + +// --------------------------------------------------------------------------- +// Top-level kit command +// --------------------------------------------------------------------------- + +export const KitCommand = cmd({ + command: "kit", + describe: "manage kits — bundles of skills, MCP servers, and plugins", + builder: (yargs) => + yargs + .command(KitListCommand) + .command(KitCreateCommand) + .command(KitShowCommand) + .command(KitInstallCommand) + .command(KitRemoveCommand) + .command(KitDetectCommand) + // altimate_change start — kit: register new subcommands + .command(KitActivateCommand) + .command(KitDeactivateCommand) + .command(KitSearchCommand) + .command(KitStatusCommand) + .command(KitValidateCommand) + // altimate_change end + .demandCommand(), + async handler() {}, +}) +// altimate_change end diff --git a/packages/opencode/src/cli/cmd/tui/thread.ts b/packages/opencode/src/cli/cmd/tui/thread.ts index 1fa1540fd8..20efe06ec8 100644 --- a/packages/opencode/src/cli/cmd/tui/thread.ts +++ b/packages/opencode/src/cli/cmd/tui/thread.ts @@ -14,6 +14,10 @@ import type { EventSource } from "./context/sdk" import { win32DisableProcessedInput, win32InstallCtrlCGuard } from "./win32" import { TuiConfig } from "@/config/tui" import { Instance } from "@/project/instance" +// altimate_change start — kit: import Kit for startup detection nudge +import { Kit } from "@/kit/kit" +import { EOL } from "os" +// altimate_change end declare global { const OPENCODE_WORKER_PATH: string @@ -173,6 +177,27 @@ export const TuiThreadCommand = cmd({ fn: () => TuiConfig.get(), }) + // altimate_change start — kit: non-blocking kit detection nudge on TUI startup + Instance.provide({ + directory: cwd, + fn: async () => { + try { + const activeKits = await Kit.active() + if (activeKits.length > 0) return // already has active kits, no nudge needed + const detected = await Kit.detect() + if (detected.length > 0) { + const first = detected[0] + process.stderr.write( + `\x1b[2m\u{1F4A1} Kit available: ${first.kit.name} \u2014 run /kit activate ${first.kit.name}\x1b[0m` + EOL, + ) + } + } catch { + // Kit detection is best-effort; never block startup + } + }, + }).catch(() => {}) + // altimate_change end + const network = await resolveNetworkOptions(args) const external = process.argv.includes("--port") || diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index a19a18379c..52c3b81521 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -1076,6 +1076,9 @@ export namespace Config { .optional() .describe("Command configuration, see https://altimate.ai/docs/commands"), skills: Skills.optional().describe("Additional skill folder paths"), + // altimate_change start — kit: config schema for kit paths and URLs + kits: Skills.optional().describe("Additional kit folder paths and URLs (same shape as skills config)"), + // altimate_change end watcher: z .object({ ignore: z.array(z.string()).optional(), diff --git a/packages/opencode/src/index.ts b/packages/opencode/src/index.ts index 9a32dd9670..5772514d48 100644 --- a/packages/opencode/src/index.ts +++ b/packages/opencode/src/index.ts @@ -36,6 +36,9 @@ import { TraceCommand } from "./cli/cmd/trace" // altimate_change start — top-level skill command import { SkillCommand } from "./cli/cmd/skill" // altimate_change end +// altimate_change start — kit: top-level kit command +import { KitCommand } from "./cli/cmd/kit" +// altimate_change end // altimate_change start — check: deterministic SQL check command import { CheckCommand } from "./cli/cmd/check" // altimate_change end @@ -208,6 +211,9 @@ let cli = yargs(hideBin(process.argv)) // altimate_change start — top-level skill command .command(SkillCommand) // altimate_change end + // altimate_change start — kit: register kit command + .command(KitCommand) + // altimate_change end // altimate_change start — check: register deterministic SQL check command .command(CheckCommand) // altimate_change end diff --git a/packages/opencode/src/kit/index.ts b/packages/opencode/src/kit/index.ts new file mode 100644 index 0000000000..010ec61362 --- /dev/null +++ b/packages/opencode/src/kit/index.ts @@ -0,0 +1,3 @@ +// altimate_change start — kit: re-export kit module +export { Kit } from "./kit" +// altimate_change end diff --git a/packages/opencode/src/kit/kit.ts b/packages/opencode/src/kit/kit.ts new file mode 100644 index 0000000000..efd7cd9243 --- /dev/null +++ b/packages/opencode/src/kit/kit.ts @@ -0,0 +1,430 @@ +// altimate_change start — kit: core kit module for bundling skills + MCP + plugins + instructions +import z from "zod" +import path from "path" +import { mkdir, writeFile, unlink } from "fs/promises" +import matter from "gray-matter" +import { Config } from "../config/config" +import { Instance } from "../project/instance" +import { State } from "../project/state" +import { Log } from "../util/log" +import { Global } from "@/global" +import { Filesystem } from "@/util/filesystem" +import { Glob } from "../util/glob" + +export namespace Kit { + const log = Log.create({ service: "kit" }) + + // Kit YAML schema - this is what goes in KIT.yaml frontmatter or body + export const McpConfig = z.object({ + // Kit uses user-friendly names: "stdio" → mapped to "local", "sse"/"streamable-http" → mapped to "remote" + type: z.enum(["stdio", "sse", "streamable-http", "local", "remote"]).default("stdio"), + command: z.array(z.string()).optional(), + args: z.array(z.string()).optional(), + url: z.string().optional(), + env: z.record(z.string(), z.string()).optional(), + env_keys: z + .array(z.string()) + .optional() + .describe("Env var names that must be set by the user"), + description: z.string().optional(), + }) + + // altimate_change start — kit: trust tier enum for kit provenance + export const Tier = z + .string() + .transform((v) => v?.toLowerCase()) + .pipe(z.enum(["built-in", "verified", "community", "archived"])) + .default("community") + export type Tier = z.infer + // altimate_change end + + // altimate_change start — kit: skill pack schema for grouped skill activation + export const SkillPack = z.object({ + description: z.string().optional(), + skills: z + .array( + z.union([ + z.string(), + z.object({ + source: z.string(), + select: z.array(z.string()).optional(), + }), + ]), + ) + .default([]), + activation: z.enum(["always", "detect", "manual", "deferred"]).default("always"), + detect: z + .array( + z.object({ + files: z.array(z.string()), + }), + ) + .nullable() + .optional() + .transform((v) => v ?? []) + .default([]), + }) + export type SkillPack = z.infer + // altimate_change end + + export const Info = z.object({ + name: z.string(), + description: z.string(), + version: z.string().optional().default("1.0.0"), + author: z.string().optional(), + location: z.string(), // filesystem path where the kit was loaded from + + // altimate_change start — kit: trust tier field + // Trust tier + tier: Tier.nullable().optional().transform((v) => v ?? "community").default("community"), + // altimate_change end + + // altimate_change start — kit: skill packs with activation modes + // Skill packs — organized groups of skills with activation modes + // When present, takes precedence over flat `skills` array + skill_packs: z + .record(z.string(), SkillPack) + .nullable() + .optional() + .transform((v) => v ?? {}) + .default({}), + // altimate_change end + + // What the kit bundles + // Note: YAML parses `key: []` with trailing comments as null, so we accept nullable + skills: z + .array( + z.union([ + z.string(), // skill name (already installed) + z.object({ + source: z + .string() + .describe("GitHub repo (owner/repo) or URL to fetch skills from"), + select: z + .array(z.string()) + .optional() + .describe("Specific skill names to install from source"), + }), + ]), + ) + .nullable() + .optional() + .transform((v) => v ?? []) + .default([]), + + mcp: z + .record(z.string(), McpConfig) + .nullable() + .optional() + .transform((v) => v ?? {}) + .default({}), + + plugins: z + .array(z.string()) + .nullable() + .optional() + .transform((v) => v ?? []) + .default([]) + .describe("npm package specs, e.g. @dagster/altimate-plugin@^1.0"), + + instructions: z + .string() + .nullable() + .optional() + .transform((v) => v ?? undefined) + .describe("Additional system instructions added to every conversation"), + + // Auto-detection: when to suggest this kit + detect: z + .array( + z.object({ + files: z + .array(z.string()) + .describe("Glob patterns that indicate this kit is relevant"), + message: z + .string() + .optional() + .describe("Custom suggestion message"), + }), + ) + .nullable() + .optional() + .transform((v) => v ?? []) + .default([]), + + // The full markdown content (instructions, docs, etc.) + content: z.string().nullable().optional().transform((v) => v ?? "").default(""), + }) + export type Info = z.infer + + // --- State management (mirrors Skill.state pattern) --- + + const KIT_FILE_PATTERN = "KIT.{yaml,yml,md}" + + const stateInit: () => Promise<{ + kits: Record + dirs: string[] + }> = async () => { + const kits: Record = {} + const dirs = new Set() + const config = await Config.get() + + // 1. Scan .opencode/kits/ and .altimate-code/kits/ directories + for (const dir of await Config.directories()) { + const matches = await Glob.scan(`{kit,kits}/**/${KIT_FILE_PATTERN}`, { + cwd: dir, + absolute: true, + dot: true, + symlink: true, + }) + for (const item of matches) { + const kit = await loadKit(item) + if (kit) { + kits[kit.name] = kit + dirs.add(path.dirname(item)) + } + } + } + + // 2. Load from config paths + if (config.kits?.paths) { + for (let p of config.kits.paths) { + if (p.startsWith("~/")) p = path.join(Global.Path.home, p.slice(2)) + if (!path.isAbsolute(p)) p = path.resolve(Instance.directory, p) + + const stat = Filesystem.stat(p) + if (!stat) continue + + if (stat.isDirectory()) { + const matches = await Glob.scan(KIT_FILE_PATTERN, { + cwd: p, + absolute: true, + dot: true, + symlink: true, + }) + for (const item of matches) { + const kit = await loadKit(item) + if (kit) { + kits[kit.name] = kit + dirs.add(p) + } + } + } else { + const kit = await loadKit(p) + if (kit) { + kits[kit.name] = kit + dirs.add(path.dirname(p)) + } + } + } + } + + // 3. Load from installed kits directory + const installedDir = path.join(Global.Path.data, "kits") + if (await Filesystem.exists(installedDir)) { + const matches = await Glob.scan(KIT_FILE_PATTERN, { + cwd: installedDir, + absolute: true, + dot: true, + symlink: true, + }) + for (const item of matches) { + const kit = await loadKit(item) + if (kit) { + kits[kit.name] = kit + dirs.add(installedDir) + } + } + } + + return { kits, dirs: Array.from(dirs) } + } + + export const state = Instance.state(stateInit) + + export function invalidate() { + State.invalidate(Instance.directory, stateInit) + } + + // --- Loading --- + + async function loadKit(filePath: string): Promise { + try { + const raw = await Filesystem.readText(filePath) + if (!raw) return undefined + + const ext = path.extname(filePath).toLowerCase() + let data: Record = {} + let content = "" + + if (ext === ".md") { + // Markdown with YAML frontmatter + const parsed = matter(raw) + data = parsed.data + content = parsed.content.trim() + } else { + // YAML file - parse the whole thing via gray-matter + const parsed = matter("---\n" + raw + "\n---") + data = parsed.data + content = (data.content as string) || "" + delete data.content + } + + const result = Info.safeParse({ + ...data, + location: filePath, + content, + }) + + if (!result.success) { + log.warn("invalid kit", { + path: filePath, + issues: result.error.issues, + }) + return undefined + } + + // Validate name to prevent path traversal + if (result.data.name && !/^[a-z][a-z0-9]*(-[a-z0-9]+)*$/.test(result.data.name)) { + log.warn("invalid kit name", { path: filePath, name: result.data.name }) + return undefined + } + + return result.data + } catch (err) { + log.error("failed to load kit", { path: filePath, err }) + return undefined + } + } + + // --- Public API --- + + export async function get(name: string): Promise { + return state().then((s) => s.kits[name]) + } + + export async function all(): Promise { + return state().then((s) => Object.values(s.kits)) + } + + export async function dirs(): Promise { + return state().then((s) => s.dirs) + } + + // --- Detection --- + + /** Check which installed kits match the current project */ + export async function detect(): Promise< + Array<{ kit: Info; matched: string[] }> + > { + const kits = await all() + const results: Array<{ kit: Info; matched: string[] }> = [] + + for (const kit of kits) { + if (!kit.detect || kit.detect.length === 0) continue + + const matchedFiles: string[] = [] + for (const rule of kit.detect) { + for (const pattern of rule.files) { + const matches = await Glob.scan(pattern, { + cwd: Instance.directory, + absolute: false, + dot: true, + symlink: true, + }) + if (matches.length > 0) { + matchedFiles.push(...matches.slice(0, 3)) // limit to 3 examples + } + } + } + + if (matchedFiles.length > 0) { + results.push({ kit, matched: [...new Set(matchedFiles)] }) + } + } + + return results + } + + // altimate_change start — kit: active kit management and context scoping + /** Get active kits for the current project (reads .opencode/active-kits) */ + export async function active(): Promise { + const activeFile = await findActiveKitsFile() + if (!activeFile) return [] + + try { + const raw = await Filesystem.readText(activeFile) + if (!raw) return [] + const names = raw.split("\n").map((l) => l.trim()).filter(Boolean) + const all = await state().then((s) => s.kits) + return names.map((n) => all[n]).filter((r): r is Info => !!r) + } catch { + return [] + } + } + + /** Activate a kit for the current project */ + export async function activate(name: string): Promise { + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + const activeFile = path.join(rootDir, ".opencode", "active-kits") + + let names: string[] = [] + try { + const raw = await Filesystem.readText(activeFile) + if (raw) names = raw.split("\n").map((l) => l.trim()).filter(Boolean) + } catch {} + + if (!names.includes(name)) { + names.push(name) + } + + await mkdir(path.dirname(activeFile), { recursive: true }) + await writeFile(activeFile, names.join("\n") + "\n", "utf-8") + } + + /** Deactivate a kit for the current project */ + export async function deactivate(name: string): Promise { + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + const activeFile = path.join(rootDir, ".opencode", "active-kits") + + let names: string[] = [] + try { + const raw = await Filesystem.readText(activeFile) + if (raw) names = raw.split("\n").map((l) => l.trim()).filter(Boolean) + } catch { return } + + names = names.filter((n) => n !== name) + + if (names.length === 0) { + try { await unlink(activeFile) } catch {} + } else { + await writeFile(activeFile, names.join("\n") + "\n", "utf-8") + } + } + + async function findActiveKitsFile(): Promise { + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + const candidates = [ + path.join(rootDir, ".opencode", "active-kits"), + path.join(rootDir, ".altimate-code", "active-kits"), + ] + for (const f of candidates) { + if (await Filesystem.exists(f)) return f + } + return undefined + } + + /** Get all skills referenced by a kit's skill_packs */ + export function allSkillsFromPacks(kit: Info): Array { + if (!kit.skill_packs || Object.keys(kit.skill_packs).length === 0) { + return kit.skills + } + const result: Array = [] + for (const [, pack] of Object.entries(kit.skill_packs)) { + result.push(...pack.skills) + } + return result + } + // altimate_change end +} +// altimate_change end From 6e1b0e15b655d7c209eaa56a973d895503f26f97 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Sat, 28 Mar 2026 22:36:45 -0700 Subject: [PATCH 2/7] =?UTF-8?q?feat:=20add=20Kit=20system=20=E2=80=94=20sh?= =?UTF-8?q?areable=20bundles=20of=20skills,=20MCP=20servers,=20and=20instr?= =?UTF-8?q?uctions?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduces the Kit extension system that enables anyone — vendors, solution architects, team leads, individual engineers — to create and distribute shareable development setups. ## What's included **Core runtime** (`packages/opencode/src/kit/`): - `Kit` namespace with Zod schemas, state management, YAML loading - Trust tiers (`built-in`, `verified`, `community`) - Skill packs with activation modes (`always`, `detect`, `manual`) - Activate/deactivate lifecycle with full cleanup **11 CLI commands** (`packages/opencode/src/cli/cmd/kit.ts`): - `kit list`, `kit create`, `kit show`, `kit install`, `kit remove` - `kit activate` — one command: installs skills, configures MCP, enables - `kit deactivate` — clean removal (instructions + MCP config + active-kits) - `kit detect`, `kit search`, `kit status`, `kit validate` **TUI startup nudge** (`packages/opencode/src/cli/cmd/tui/thread.ts`): - Non-blocking detection on TUI startup - Shows one-line suggestion when matching kits found **JSONC-preserving config writes**: - Uses `jsonc-parser` `modify`/`applyEdits` to preserve user comments - MCP servers added on activate, removed on deactivate **Documentation** (`docs/`): - User guide: `docs/docs/configure/kits.md` (CLI reference, locations, tiers) - Author guide: `docs/docs/develop/kits.md` (full schema, tutorial, examples) - Ecosystem plan: `docs/PARTNER_ECOSYSTEM_PLAN.md` (strategy + simulation results) - Roadmap with planned features (`kit switch`, inheritance, `kit enforce`) ## Testing - 60/60 automated E2E tests passing (name validation, activate/deactivate lifecycle, MCP merge, JSONC preservation, detect, validate, install) - 10 stakeholder simulations across 5 scenarios (Snowflake, Dagster, dbt Labs, Airbyte, Healthcare, MSP consulting, OSS contributor, self-serve, enterprise) - 29 bugs found and fixed across 3 review rounds ## External - Kit content lives in `AltimateAI/data-engineering-skills` (merged PR #9) - Registry at `data-engineering-skills/registry.json` with 1 real entry - `dbt-snowflake` kit: 9 skills + dbt MCP server Co-Authored-By: Claude Opus 4.6 (1M context) --- packages/opencode/src/cli/cmd/tui/thread.ts | 37 ++++++++++----------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/packages/opencode/src/cli/cmd/tui/thread.ts b/packages/opencode/src/cli/cmd/tui/thread.ts index 20efe06ec8..099157ae42 100644 --- a/packages/opencode/src/cli/cmd/tui/thread.ts +++ b/packages/opencode/src/cli/cmd/tui/thread.ts @@ -14,8 +14,7 @@ import type { EventSource } from "./context/sdk" import { win32DisableProcessedInput, win32InstallCtrlCGuard } from "./win32" import { TuiConfig } from "@/config/tui" import { Instance } from "@/project/instance" -// altimate_change start — kit: import Kit for startup detection nudge -import { Kit } from "@/kit/kit" +// altimate_change start — kit: Kit imported dynamically in setTimeout below to avoid test mock issues import { EOL } from "os" // altimate_change end @@ -178,24 +177,24 @@ export const TuiThreadCommand = cmd({ }) // altimate_change start — kit: non-blocking kit detection nudge on TUI startup - Instance.provide({ - directory: cwd, - fn: async () => { - try { - const activeKits = await Kit.active() - if (activeKits.length > 0) return // already has active kits, no nudge needed - const detected = await Kit.detect() - if (detected.length > 0) { - const first = detected[0] - process.stderr.write( - `\x1b[2m\u{1F4A1} Kit available: ${first.kit.name} \u2014 run /kit activate ${first.kit.name}\x1b[0m` + EOL, - ) - } - } catch { - // Kit detection is best-effort; never block startup + // Deferred to avoid interfering with TUI initialization and test mocks. + // Uses setTimeout + dynamic import so Kit module is not required at parse time. + setTimeout(async () => { + try { + const { Kit } = await import("../../../kit") + const activeKits = await Kit.active() + if (activeKits.length > 0) return + const detected = await Kit.detect() + if (detected.length > 0) { + const first = detected[0] + process.stderr.write( + `\x1b[2m\u{1F4A1} Kit available: ${first.kit.name} \u2014 run /kit activate ${first.kit.name}\x1b[0m` + EOL, + ) } - }, - }).catch(() => {}) + } catch { + // Kit detection is best-effort; never block startup + } + }, 100) // altimate_change end const network = await resolveNetworkOptions(args) From 8dec1e55d2d97190098170c26b20046465ef27cf Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Fri, 17 Apr 2026 12:43:38 +0530 Subject: [PATCH 3/7] =?UTF-8?q?refactor:=20rename=20`Kit`=20=E2=86=92=20`P?= =?UTF-8?q?ack`=20for=20clearer=20vendor-distribution=20semantics?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Renames the extension-system primitive from "Kit" to "Pack" before any vendor content has shipped publicly. "Pack" reads more naturally ("skill pack" is already common language), differentiates from Claude Code's "plugin" noun, and avoids the generic-SDK connotation of "kit". Scope of rename: - `Kit` namespace/types → `Pack` (runtime, CLI, config, telemetry) - `KIT.yaml` → `PACK.yaml` file convention - `.opencode/kits/` → `.opencode/packs/`; `active-kits` → `active-packs` - Nested `SkillPack` type → `SkillGroup` / `skill_packs` field → `skill_groups` to resolve the "pack contains packs" nesting collision - Telemetry events `kit_*` → `pack_*` - File moves: `src/kit/` → `src/pack/`; `cli/cmd/kit.ts` → `cli/cmd/pack.ts`; `docs/{configure,develop}/kits.md` → `packs.md` - CLI command group: `altimate-code kit ...` → `altimate-code pack ...` No behavior change — pure rename + internal variable-shadow fix in `allSkillsFromGroups` (inner loop variable `pack` → `group`). Co-Authored-By: Claude Opus 4.7 (1M context) --- .github/meta/commit.txt | 23 +- docs/PARTNER_ECOSYSTEM_PLAN.md | 96 +-- docs/docs/configure/index.md | 6 +- docs/docs/configure/kits.md | 192 ------ docs/docs/configure/packs.md | 192 ++++++ docs/docs/develop/ecosystem.md | 18 +- docs/docs/develop/{kits.md => packs.md} | 116 ++-- docs/mkdocs.yml | 4 +- packages/opencode/AGENTS.md | 2 +- .../opencode/src/altimate/telemetry/index.ts | 20 +- .../opencode/src/cli/cmd/{kit.ts => pack.ts} | 606 +++++++++--------- packages/opencode/src/cli/cmd/tui/thread.ts | 18 +- packages/opencode/src/config/config.ts | 4 +- packages/opencode/src/index.ts | 8 +- packages/opencode/src/kit/index.ts | 3 - packages/opencode/src/pack/index.ts | 3 + .../opencode/src/{kit/kit.ts => pack/pack.ts} | 146 ++--- 17 files changed, 739 insertions(+), 718 deletions(-) delete mode 100644 docs/docs/configure/kits.md create mode 100644 docs/docs/configure/packs.md rename docs/docs/develop/{kits.md => packs.md} (71%) rename packages/opencode/src/cli/cmd/{kit.ts => pack.ts} (63%) delete mode 100644 packages/opencode/src/kit/index.ts create mode 100644 packages/opencode/src/pack/index.ts rename packages/opencode/src/{kit/kit.ts => pack/pack.ts} (69%) diff --git a/.github/meta/commit.txt b/.github/meta/commit.txt index 7be432b286..364c3b352d 100644 --- a/.github/meta/commit.txt +++ b/.github/meta/commit.txt @@ -1 +1,22 @@ -release: v0.5.20 +refactor: rename `Kit` → `Pack` for clearer vendor-distribution semantics + +Renames the extension-system primitive from "Kit" to "Pack" before any vendor +content has shipped publicly. "Pack" reads more naturally ("skill pack" is +already common language), differentiates from Claude Code's "plugin" noun, +and avoids the generic-SDK connotation of "kit". + +Scope of rename: +- `Kit` namespace/types → `Pack` (runtime, CLI, config, telemetry) +- `KIT.yaml` → `PACK.yaml` file convention +- `.opencode/kits/` → `.opencode/packs/`; `active-kits` → `active-packs` +- Nested `SkillPack` type → `SkillGroup` / `skill_packs` field → `skill_groups` + to resolve the "pack contains packs" nesting collision +- Telemetry events `kit_*` → `pack_*` +- File moves: `src/kit/` → `src/pack/`; `cli/cmd/kit.ts` → `cli/cmd/pack.ts`; + `docs/{configure,develop}/kits.md` → `packs.md` +- CLI command group: `altimate-code kit ...` → `altimate-code pack ...` + +No behavior change — pure rename + internal variable-shadow fix in +`allSkillsFromGroups` (inner loop variable `pack` → `group`). + +Co-Authored-By: Claude Opus 4.7 (1M context) diff --git a/docs/PARTNER_ECOSYSTEM_PLAN.md b/docs/PARTNER_ECOSYSTEM_PLAN.md index 73546710b2..81f252c127 100644 --- a/docs/PARTNER_ECOSYSTEM_PLAN.md +++ b/docs/PARTNER_ECOSYSTEM_PLAN.md @@ -1,10 +1,10 @@ # Altimate Code — Extension Ecosystem Plan -> **Purpose:** Enable anyone — vendors, solution architects, team leads, individual engineers — to extend Altimate Code with kits that bundle skills, MCP servers, and instructions. +> **Purpose:** Enable anyone — vendors, solution architects, team leads, individual engineers — to extend Altimate Code with packs that bundle skills, MCP servers, and instructions. > > **Date:** 2026-03-28 | **Status:** Validated through 5 scenario simulations (12 personas) > -> **Key rename:** "Recipe" → "Kit" (differentiation from Goose, clearer mental model) +> **Key rename:** "Recipe" → "Pack" (differentiation from Goose, clearer mental model) ### Simulation Results (2026-03-28) | Scenario | Score | Key Finding | @@ -12,8 +12,8 @@ | Snowflake (Large Enterprise) | 5/10 | Demo-ready core, 5 deal blockers | | Dagster (Growth Startup) | 6/10 | Would partner conditionally | | Fortune 500 Bank (Enterprise) | 3/10 | Missing enforcement, use AGENTS.md today | -| Solo Consultant (SA) | 5/10 | Best natural fit, needs `kit switch` + cleanup | -| Series A Self-Serve | 3/10 | Nobody discovers kit without being told | +| Solo Consultant (SA) | 5/10 | Best natural fit, needs `pack switch` + cleanup | +| Series A Self-Serve | 3/10 | Nobody discovers pack without being told | **Universal finding:** Authoring experience is good. Single-developer workflow works. Discovery and multi-person story are broken. Auto-detect on startup is the #1 priority. @@ -28,7 +28,7 @@ 5. [Layer 1: Agent Skills (SKILL.md)](#5-layer-1-agent-skills) 6. [Layer 2: MCP Servers](#6-layer-2-mcp-servers) 7. [Layer 3: Plugins (Deep Integration)](#7-layer-3-plugins) -8. [Kits: The Distribution Unit](#8-kits-the-distribution-unit) +8. [Packs: The Distribution Unit](#8-packs-the-distribution-unit) 9. [data-engineering-skills: The Open-Source Foundation](#9-data-engineering-skills-the-open-source-foundation) 10. [Onboarding Playbook](#10-onboarding-playbook) 11. [What We Need to Build](#11-what-we-need-to-build) @@ -67,11 +67,11 @@ Goose made the boldest architectural decision: **Extensions ARE MCP servers.** N | Pattern | How Goose Does It | Our Equivalent | |---------|-------------------|----------------| | Extension = MCP server | Any MCP server is auto-discovered | We support this via `config.mcp` | -| **Recipes** | YAML bundles: extensions + prompts + settings + parameters | **Kits** (KIT.yaml) — our equivalent | +| **Recipes** | YAML bundles: extensions + prompts + settings + parameters | **Packs** (PACK.yaml) — our equivalent | | Deep links | `goose://extension?cmd=...` one-click install | Not yet | | Extension directory | Curated browse page (70+ servers) | Not yet | | Custom distros | Full white-label with bundled extensions | Possible via our config system | -| Subagent composition | Recipes spawn parallel sub-agents | We have agents but no kit system yet | +| Subagent composition | Recipes spawn parallel sub-agents | We have agents but no pack system yet | | Malware scanning | Auto-scan before extension activation | Not yet | **Goose's real partner integrations:** @@ -588,20 +588,20 @@ altimate-code plugin install @dagster/altimate-plugin --- -## 8. Kits: The Distribution Unit +## 8. Packs: The Distribution Unit ### 8.1 The Missing Piece -Goose's most innovative pattern is **Recipes** — YAML files that bundle extensions + prompts + settings into shareable workflows. We should adopt this concept (renamed to **Kits** for differentiation). +Goose's most innovative pattern is **Recipes** — YAML files that bundle extensions + prompts + settings into shareable workflows. We should adopt this concept (renamed to **Packs** for differentiation). -**Why kits matter for partners:** +**Why packs matter for partners:** - A Dagster skill alone is useful. A Dagster skill + Dagster MCP server + curated prompt + recommended settings = a **complete workflow**. -- Kits are the unit of distribution that partners can share with their community. +- Packs are the unit of distribution that partners can share with their community. -### 8.2 Proposed Kit Format +### 8.2 Proposed Pack Format ```yaml -# dagster-asset-development/KIT.yaml +# dagster-asset-development/PACK.yaml name: dagster-asset-development version: "1.0" description: "Complete workflow for building Dagster assets with AI assistance" @@ -642,26 +642,26 @@ settings: dagster.check: true ``` -### 8.3 Kit Installation +### 8.3 Pack Installation ```bash # From URL -altimate-code kit install https://dagster.io/kits/asset-development +altimate-code pack install https://dagster.io/packs/asset-development # From GitHub -altimate-code kit install DagsterHQ/dagster-kits/asset-development +altimate-code pack install DagsterHQ/dagster-packs/asset-development # One-liner deep link (for docs/blog posts) -altimate-code://kit?url=https://dagster.io/kits/asset-development +altimate-code://pack?url=https://dagster.io/packs/asset-development ``` -### 8.4 Kit as the Partner Onboarding Unit +### 8.4 Pack as the Partner Onboarding Unit -When a partner says "I want my tool to work with Altimate Code," the deliverable is a kit: +When a partner says "I want my tool to work with Altimate Code," the deliverable is a pack: 1. Partner writes skills (Layer 1) — 1 day 2. Partner already has MCP server (Layer 2) — 0 days (usually exists) -3. Partner bundles into kit — 1 hour -4. Kit goes into their docs: "Use Dagster with AI → install this kit" +3. Partner bundles into pack — 1 hour +4. Pack goes into their docs: "Use Dagster with AI → install this pack" --- @@ -696,10 +696,10 @@ data-engineering-skills/ │ ├── bigquery/ # 🆕 Community-contributed │ ├── databricks/ # 🆕 Community-contributed │ └── great-expectations/ # 🆕 Community-contributed -├── kits/ # 🆕 Bundled kits -│ ├── dagster-development/KIT.yaml -│ ├── dbt-snowflake-pipeline/KIT.yaml -│ └── airbyte-ingestion/KIT.yaml +├── packs/ # 🆕 Bundled packs +│ ├── dagster-development/PACK.yaml +│ ├── dbt-snowflake-pipeline/PACK.yaml +│ └── airbyte-ingestion/PACK.yaml ├── .claude-plugin/ │ └── marketplace.json ├── benchmarks/ # 🆕 Benchmark results per skill @@ -743,8 +743,8 @@ data-engineering-skills/ - Install: `pip install "dg[mcp]"` - Docs: https://dagster.io/docs/mcp -### Kit (optional) -- [ ] KIT.yaml included in `kits/` +### Pack (optional) +- [ ] PACK.yaml included in `packs/` ``` --- @@ -769,8 +769,8 @@ Week 2 (MCP — if applicable) ├── We add recommended config to our docs └── Test skill + MCP combination -Week 3 (Kit + Launch) -├── Bundle into KIT.yaml +Week 3 (Pack + Launch) +├── Bundle into PACK.yaml ├── Co-authored blog post / announcement ├── Listed in our extension directory └── Partner adds "Works with Altimate Code" badge to their docs @@ -793,7 +793,7 @@ Week 3 (Kit + Launch) |-------------|-----------|--------| | 3-5 SKILL.md files | Yes | Markdown (PR to data-engineering-skills) | | MCP server config | If they have one | JSON snippet for our docs | -| KIT.yaml | Recommended | YAML file | +| PACK.yaml | Recommended | YAML file | | Plugin package | Optional | npm package | | Blog post draft | Recommended | Markdown (co-authored) | @@ -801,24 +801,24 @@ Week 3 (Kit + Launch) ## 11. What We Need to Build -### 11.1 Priority 1: Kit System (Weeks 1-3) +### 11.1 Priority 1: Pack System (Weeks 1-3) -The single biggest gap vs. Goose. Kits bundle skills + MCP + plugins + instructions into one installable unit. +The single biggest gap vs. Goose. Packs bundle skills + MCP + plugins + instructions into one installable unit. **Implementation:** -- KIT.yaml schema and parser -- `altimate-code kit install ` CLI command -- Kit auto-detection (suggest kit when project type detected) -- Kit storage in `~/.altimate/kits/` +- PACK.yaml schema and parser +- `altimate-code pack install ` CLI command +- Pack auto-detection (suggest pack when project type detected) +- Pack storage in `~/.altimate/packs/` **Files to modify:** -- New: `packages/opencode/src/kit/` (schema, loader, installer) -- New: `packages/opencode/src/cli/cmd/kit.ts` (CLI command) -- Modify: `packages/opencode/src/config/` (kit config integration) +- New: `packages/opencode/src/pack/` (schema, loader, installer) +- New: `packages/opencode/src/cli/cmd/pack.ts` (CLI command) +- Modify: `packages/opencode/src/config/` (pack config integration) ### 11.2 Priority 2: Extension Directory (Weeks 2-4) -A browseable catalog of skills, MCP servers, and kits. +A browseable catalog of skills, MCP servers, and packs. **Options:** - **Minimal:** Curated page on docs site (like Goose's browse page) @@ -835,7 +835,7 @@ When a user opens Altimate Code in a Dagster project, automatically suggest: **Implementation:** - Project type detection (look for `dagster.yaml`, `dbt_project.yml`, `airbyte/`, etc.) - Suggestion UI in TUI -- One-command install of recommended kit +- One-command install of recommended pack ### 11.4 Priority 4: Partner SDK Documentation (Week 1) @@ -843,7 +843,7 @@ Publish clear documentation for each layer: - Skill Authoring Guide (from Section 5 above) - MCP Integration Guide (from Section 6 above) - Plugin Development Guide (from Section 7 above) -- Kit Bundling Guide (from Section 8 above) +- Pack Bundling Guide (from Section 8 above) ### 11.5 Priority 5: Skill Versioning (Weeks 4-6) @@ -862,13 +862,13 @@ Current gap: no way to pin skill versions or handle updates. | Item | Effort | Priority | Dependency | |------|--------|----------|------------| -| KIT.yaml schema + parser | 3 days | P0 | None | -| `kit install` CLI command | 2 days | P0 | Schema | -| Kit auto-detection | 2 days | P1 | Kit system | +| PACK.yaml schema + parser | 3 days | P0 | None | +| `pack install` CLI command | 2 days | P0 | Schema | +| Pack auto-detection | 2 days | P1 | Pack system | | Extension directory (GitHub-based) | 3 days | P1 | None | | Partner SDK documentation site | 3 days | P1 | None | | Skill versioning (git tags) | 2 days | P2 | None | -| Deep links (`altimate-code://`) | 2 days | P2 | Kit system | +| Deep links (`altimate-code://`) | 2 days | P2 | Pack system | | Extension malware scanning | 3 days | P3 | None | | Install count telemetry | 1 day | P3 | None | @@ -884,7 +884,7 @@ Current gap: no way to pin skill versions or handle updates. | Skills system | No skills | SKILL.md + benchmark-proven | **Altimate** | | MCP support | Primary interface | Full support + auto-detect | Tie | | Plugin hooks | None (MCP only) | 20+ hooks for deep integration | **Altimate** | -| Recipes / Kits | Yes (mature) | Kits (planned) | **Goose** | +| Recipes / Packs | Yes (mature) | Packs (planned) | **Goose** | | Extension directory | 70+ servers listed | Not yet (planned) | **Goose** | | Deep links | Yes | Not yet (planned) | **Goose** | | Warehouse integrations | None built-in | 10 warehouses native | **Altimate** | @@ -934,6 +934,6 @@ Current gap: no way to pin skill versions or handle updates. | **MCP** | Model Context Protocol — standard for AI tools (Anthropic-led, adopted by industry) | | **MCP Server** | A process that exposes tools/resources via the MCP protocol | | **Plugin** | npm package that hooks into Altimate Code's runtime (auth, tools, chat) | -| **Kit** | YAML bundle of skills + MCP + plugins + instructions (KIT.yaml) | +| **Pack** | YAML bundle of skills + MCP + plugins + instructions (PACK.yaml) | | **Hook** | Interception point in plugin system (e.g., `tool.execute.before`) | | **Agent Skills Standard** | Open standard at agentskills.io for portable AI skills | diff --git a/docs/docs/configure/index.md b/docs/docs/configure/index.md index aeefb15ca0..8bf86320f4 100644 --- a/docs/docs/configure/index.md +++ b/docs/docs/configure/index.md @@ -38,13 +38,13 @@ Set up your warehouses, LLM providers, and preferences. For agents, tools, skill [:octicons-arrow-right-24: MCP Servers](mcp-servers.md) · [:octicons-arrow-right-24: ACP Support](acp.md) -- :material-package-variant:{ .lg .middle } **Kits** +- :material-package-variant:{ .lg .middle } **Packs** --- - Bundles of skills, MCP servers, and instructions. Activate a kit to get a complete development setup for dbt, Snowflake, Dagster, and more. + Bundles of skills, MCP servers, and instructions. Activate a pack to get a complete development setup for dbt, Snowflake, Dagster, and more. - [:octicons-arrow-right-24: Kits](kits.md) + [:octicons-arrow-right-24: Packs](packs.md) - :material-palette:{ .lg .middle } **Appearance** diff --git a/docs/docs/configure/kits.md b/docs/docs/configure/kits.md deleted file mode 100644 index 32ffb2ab6d..0000000000 --- a/docs/docs/configure/kits.md +++ /dev/null @@ -1,192 +0,0 @@ -# Kits - -Kits bundle skills, MCP servers, and instructions into a single activatable unit. Instead of configuring each piece separately, activate a kit to get a complete development setup. - -## Quick Start - -```bash -# List available kits -altimate-code kit list - -# Auto-detect kits for your project -altimate-code kit detect - -# Activate a kit -altimate-code kit activate dbt-snowflake - -# Check active kits -altimate-code kit status - -# Deactivate -altimate-code kit deactivate dbt-snowflake -``` - -## Installing Kits - -Install kits from GitHub repositories or local paths: - -```bash -# From GitHub -altimate-code kit install AltimateAI/data-engineering-skills - -# From local path -altimate-code kit install ./my-kits - -# Install globally (available in all projects) -altimate-code kit install AltimateAI/data-engineering-skills --global -``` - -## KIT.yaml Format - -Kits are defined in `KIT.yaml` files: - -```yaml -name: my-kit -description: What this kit configures -version: 1.0.0 - -# Skills to install -skills: - - source: "owner/repo" - select: ["skill-a", "skill-b"] - -# MCP servers to configure -mcp: - server-name: - type: stdio - command: ["uvx", "my-mcp-server"] - env_keys: ["API_KEY"] - description: "Server description" - -# Instructions for every conversation -instructions: | - Project-specific conventions and rules. - -# Auto-detection rules -detect: - - files: ["config.yaml"] - message: "Detected my-tool — activate kit?" -``` - -## What `kit activate` Does - -When you activate a kit, it: - -1. **Installs skills** from referenced repositories into `.opencode/skills/` -2. **Configures MCP servers** by merging entries into your project's config file -3. **Creates instruction files** at `.opencode/instructions/kit-.md` -4. **Registers the kit** as active in `.opencode/active-kits` - -All changes are reversible with `kit deactivate`. - -## Creating Your Own Kit - -```bash -altimate-code kit create my-team-standards -``` - -This scaffolds `.opencode/kits/my-team-standards/KIT.yaml` with a template. Edit it, then activate: - -```bash -altimate-code kit activate my-team-standards -``` - -### Validating - -Check your kit for issues before sharing: - -```bash -altimate-code kit validate my-team-standards -``` - -## Multiple Active Kits - -You can activate multiple kits simultaneously. Their MCP servers are merged and instruction files coexist: - -```bash -altimate-code kit activate dbt-snowflake -altimate-code kit activate my-team-standards -altimate-code kit status # shows both -``` - -## Trust Tiers - -| Tier | Description | -|------|-------------| -| `built-in` | Ships with Altimate Code, maintained by the team | -| `verified` | Published by official vendors, reviewed | -| `community` | Created by anyone, use at your discretion | - -## Kit Locations - -Kits are discovered from: - -1. **Project**: `.opencode/kits/` and `.altimate-code/kits/` -2. **Global**: `~/.config/altimate-code/kits/` -3. **Config paths**: `kits.paths` in your config file -4. **Installed**: `~/.local/share/altimate-code/kits/` - -## CLI Reference - -| Command | Description | -|---------|-------------| -| `kit list` | List all available kits | -| `kit list --json` | JSON output for scripting | -| `kit list --detect` | Show only project-matching kits | -| `kit create ` | Scaffold a new kit | -| `kit show ` | Display full kit details | -| `kit install ` | Install from GitHub or local path | -| `kit activate ` | Install skills, configure MCP, enable | -| `kit activate --yes` | Skip confirmation prompt | -| `kit deactivate ` | Remove from active kits, clean up | -| `kit remove ` | Delete an installed kit | -| `kit detect` | Find kits matching current project | -| `kit search [query]` | Search the kit registry | -| `kit status` | Show active kits | -| `kit validate [name]` | Validate kit format and references | - -## Sharing Kits - -Share kits via Git repositories. The recommended structure: - -``` -my-kits/ - kits/ - kit-a/KIT.yaml - kit-b/KIT.yaml - README.md -``` - -Others install with: `altimate-code kit install owner/my-kits` - -## Available Kits - -See [data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) for the official kit registry. - -## Roadmap - -The kit system is actively evolving based on community feedback. Here's what's planned: - -### Coming Soon - -| Feature | Description | Status | -|---------|-------------|--------| -| **`kit switch`** | Switch between kits in one command (deactivate all, activate one) | Planned | -| **Kit inheritance** | `extends: base-kit` to share conventions across kits | Planned | -| **`kit update`** | Pull newer versions of installed kits from source | Planned | -| **Registry expansion** | More built-in kits for BigQuery, Databricks, Airflow, Dagster | In progress | -| **`kit enforce`** | CI command that fails if required kits are not active | Planned | - -### Future - -| Feature | Description | -|---------|-------------| -| **Auto-activation** | Automatically suggest or activate kits when detection rules match on project open | -| **Kit locking** | Prevent deactivation of compliance-critical kits without admin override | -| **Conflict detection** | Warn when two active kits have contradictory instructions | -| **Kit analytics** | Activation counts and skill usage metrics for kit authors | -| **MCP tool filtering** | Allow kits to expose only specific tools from an MCP server | - -### Contributing to the Roadmap - -Have a feature request? [Open an issue](https://github.com/AltimateAI/altimate-code/issues) with the `kit` label, or contribute directly to the [data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) repo. diff --git a/docs/docs/configure/packs.md b/docs/docs/configure/packs.md new file mode 100644 index 0000000000..b47682ff56 --- /dev/null +++ b/docs/docs/configure/packs.md @@ -0,0 +1,192 @@ +# Packs + +Packs bundle skills, MCP servers, and instructions into a single activatable unit. Instead of configuring each piece separately, activate a pack to get a complete development setup. + +## Quick Start + +```bash +# List available packs +altimate-code pack list + +# Auto-detect packs for your project +altimate-code pack detect + +# Activate a pack +altimate-code pack activate dbt-snowflake + +# Check active packs +altimate-code pack status + +# Deactivate +altimate-code pack deactivate dbt-snowflake +``` + +## Installing Packs + +Install packs from GitHub repositories or local paths: + +```bash +# From GitHub +altimate-code pack install AltimateAI/data-engineering-skills + +# From local path +altimate-code pack install ./my-packs + +# Install globally (available in all projects) +altimate-code pack install AltimateAI/data-engineering-skills --global +``` + +## PACK.yaml Format + +Packs are defined in `PACK.yaml` files: + +```yaml +name: my-pack +description: What this pack configures +version: 1.0.0 + +# Skills to install +skills: + - source: "owner/repo" + select: ["skill-a", "skill-b"] + +# MCP servers to configure +mcp: + server-name: + type: stdio + command: ["uvx", "my-mcp-server"] + env_keys: ["API_KEY"] + description: "Server description" + +# Instructions for every conversation +instructions: | + Project-specific conventions and rules. + +# Auto-detection rules +detect: + - files: ["config.yaml"] + message: "Detected my-tool — activate pack?" +``` + +## What `pack activate` Does + +When you activate a pack, it: + +1. **Installs skills** from referenced repositories into `.opencode/skills/` +2. **Configures MCP servers** by merging entries into your project's config file +3. **Creates instruction files** at `.opencode/instructions/pack-.md` +4. **Registers the pack** as active in `.opencode/active-packs` + +All changes are reversible with `pack deactivate`. + +## Creating Your Own Pack + +```bash +altimate-code pack create my-team-standards +``` + +This scaffolds `.opencode/packs/my-team-standards/PACK.yaml` with a template. Edit it, then activate: + +```bash +altimate-code pack activate my-team-standards +``` + +### Validating + +Check your pack for issues before sharing: + +```bash +altimate-code pack validate my-team-standards +``` + +## Multiple Active Packs + +You can activate multiple packs simultaneously. Their MCP servers are merged and instruction files coexist: + +```bash +altimate-code pack activate dbt-snowflake +altimate-code pack activate my-team-standards +altimate-code pack status # shows both +``` + +## Trust Tiers + +| Tier | Description | +|------|-------------| +| `built-in` | Ships with Altimate Code, maintained by the team | +| `verified` | Published by official vendors, reviewed | +| `community` | Created by anyone, use at your discretion | + +## Pack Locations + +Packs are discovered from: + +1. **Project**: `.opencode/packs/` and `.altimate-code/packs/` +2. **Global**: `~/.config/altimate-code/packs/` +3. **Config paths**: `packs.paths` in your config file +4. **Installed**: `~/.local/share/altimate-code/packs/` + +## CLI Reference + +| Command | Description | +|---------|-------------| +| `pack list` | List all available packs | +| `pack list --json` | JSON output for scripting | +| `pack list --detect` | Show only project-matching packs | +| `pack create ` | Scaffold a new pack | +| `pack show ` | Display full pack details | +| `pack install ` | Install from GitHub or local path | +| `pack activate ` | Install skills, configure MCP, enable | +| `pack activate --yes` | Skip confirmation prompt | +| `pack deactivate ` | Remove from active packs, clean up | +| `pack remove ` | Delete an installed pack | +| `pack detect` | Find packs matching current project | +| `pack search [query]` | Search the pack registry | +| `pack status` | Show active packs | +| `pack validate [name]` | Validate pack format and references | + +## Sharing Packs + +Share packs via Git repositories. The recommended structure: + +``` +my-packs/ + packs/ + pack-a/PACK.yaml + pack-b/PACK.yaml + README.md +``` + +Others install with: `altimate-code pack install owner/my-packs` + +## Available Packs + +See [data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) for the official pack registry. + +## Roadmap + +The pack system is actively evolving based on community feedback. Here's what's planned: + +### Coming Soon + +| Feature | Description | Status | +|---------|-------------|--------| +| **`pack switch`** | Switch between packs in one command (deactivate all, activate one) | Planned | +| **Pack inheritance** | `extends: base-pack` to share conventions across packs | Planned | +| **`pack update`** | Pull newer versions of installed packs from source | Planned | +| **Registry expansion** | More built-in packs for BigQuery, Databricks, Airflow, Dagster | In progress | +| **`pack enforce`** | CI command that fails if required packs are not active | Planned | + +### Future + +| Feature | Description | +|---------|-------------| +| **Auto-activation** | Automatically suggest or activate packs when detection rules match on project open | +| **Pack locking** | Prevent deactivation of compliance-critical packs without admin override | +| **Conflict detection** | Warn when two active packs have contradictory instructions | +| **Pack analytics** | Activation counts and skill usage metrics for pack authors | +| **MCP tool filtering** | Allow packs to expose only specific tools from an MCP server | + +### Contributing to the Roadmap + +Have a feature request? [Open an issue](https://github.com/AltimateAI/altimate-code/issues) with the `pack` label, or contribute directly to the [data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) repo. diff --git a/docs/docs/develop/ecosystem.md b/docs/docs/develop/ecosystem.md index b00e14e978..c217c2adf7 100644 --- a/docs/docs/develop/ecosystem.md +++ b/docs/docs/develop/ecosystem.md @@ -8,7 +8,7 @@ altimate has a growing ecosystem of plugins, tools, and integrations. |---------|------------| | `@altimateai/altimate-code` | CLI and TUI | | `@altimateai/altimate-code-sdk` | TypeScript SDK | -| `@altimateai/altimate-code-plugin` | Plugin development kit | +| `@altimateai/altimate-code-plugin` | Plugin development pack | ## Integrations @@ -19,22 +19,22 @@ altimate has a growing ecosystem of plugins, tools, and integrations. - **MCP**: Model Context Protocol servers - **ACP**: Agent Communication Protocol for editors -## Kits +## Packs -Kits bundle skills, MCP servers, and instructions into shareable development setups. Anyone can create and distribute kits. +Packs bundle skills, MCP servers, and instructions into shareable development setups. Anyone can create and distribute packs. -| Kit | Description | +| Pack | Description | |-----|-------------| -| [dbt-snowflake](https://github.com/AltimateAI/data-engineering-skills/tree/main/kits/dbt-snowflake) | Complete dbt + Snowflake setup | +| [dbt-snowflake](https://github.com/AltimateAI/data-engineering-skills/tree/main/packs/dbt-snowflake) | Complete dbt + Snowflake setup | -Browse the [kit registry](https://github.com/AltimateAI/data-engineering-skills/blob/main/registry.json) for more. +Browse the [pack registry](https://github.com/AltimateAI/data-engineering-skills/blob/main/registry.json) for more. -### Creating Kits +### Creating Packs -See the [Kit documentation](../configure/kits.md) for the full guide, or run: +See the [Pack documentation](../configure/packs.md) for the full guide, or run: ```bash -altimate-code kit create my-kit +altimate-code pack create my-pack ``` ## Community diff --git a/docs/docs/develop/kits.md b/docs/docs/develop/packs.md similarity index 71% rename from docs/docs/develop/kits.md rename to docs/docs/develop/packs.md index d450298bbe..e0dc317882 100644 --- a/docs/docs/develop/kits.md +++ b/docs/docs/develop/packs.md @@ -1,29 +1,29 @@ -# Building Kits +# Building Packs -This guide is for anyone who wants to **create and distribute kits** — vendors, solution architects, team leads, or community contributors. For using kits, see [Configure > Kits](../configure/kits.md). +This guide is for anyone who wants to **create and distribute packs** — vendors, solution architects, team leads, or community contributors. For using packs, see [Configure > Packs](../configure/packs.md). -## What's in a Kit? +## What's in a Pack? -A kit is a `KIT.yaml` file that bundles: +A pack is a `PACK.yaml` file that bundles: - **Skills** — teach the AI how to approach tasks (from any Git repo) - **MCP servers** — give the AI tools to execute tasks (standard MCP protocol) - **Instructions** — project-specific rules injected into every conversation -- **Detection rules** — auto-suggest the kit when matching files exist +- **Detection rules** — auto-suggest the pack when matching files exist -## Tutorial: Build Your First Kit in 5 Minutes +## Tutorial: Build Your First Pack in 5 Minutes ### Step 1: Scaffold ```bash -altimate-code kit create my-first-kit +altimate-code pack create my-first-pack ``` -This creates `.opencode/kits/my-first-kit/KIT.yaml`: +This creates `.opencode/packs/my-first-pack/PACK.yaml`: ```yaml -name: my-first-kit -description: TODO — describe what this kit configures +name: my-first-pack +description: TODO — describe what this pack configures version: 1.0.0 skills: @@ -37,7 +37,7 @@ mcp: detect: # - files: ["config.yaml"] - # message: "Detected my-tool — activate kit?" + # message: "Detected my-tool — activate pack?" instructions: | TODO — add project-specific instructions here. @@ -70,7 +70,7 @@ mcp: detect: - files: ["dbt_project.yml"] - message: "Detected dbt project — activate ACME data team kit?" + message: "Detected dbt project — activate ACME data team pack?" instructions: | ## ACME Data Team Conventions @@ -85,14 +85,14 @@ instructions: | ### Step 3: Validate ```bash -altimate-code kit validate my-first-kit +altimate-code pack validate my-first-pack ``` Output: ``` -Validating: my-first-kit +Validating: my-first-pack - ✓ Name "my-first-kit" is valid + ✓ Name "my-first-pack" is valid ✓ Description present ✓ Version "1.0.0" is valid semver ✓ 1 skill source(s) defined @@ -107,25 +107,25 @@ Validation: PASS ### Step 4: Activate ```bash -altimate-code kit activate my-first-kit +altimate-code pack activate my-first-pack ``` ### Step 5: Share -Commit the kit to your repo. Others install with: +Commit the pack to your repo. Others install with: ```bash -altimate-code kit install owner/repo +altimate-code pack install owner/repo ``` -## KIT.yaml Schema Reference +## PACK.yaml Schema Reference ### Required Fields | Field | Type | Description | |-------|------|-------------| | `name` | string | Lowercase, hyphens, 2-64 chars. Must match `^[a-z][a-z0-9]*(-[a-z0-9]+)*$` | -| `description` | string | One-line summary of what the kit configures | +| `description` | string | One-line summary of what the pack configures | ### Optional Fields @@ -135,11 +135,11 @@ altimate-code kit install owner/repo | `author` | string | — | Author name or organization | | `tier` | string | `"community"` | Trust tier: `built-in`, `verified`, `community`, `archived` | | `skills` | array | `[]` | Skills to install (see below) | -| `skill_packs` | object | `{}` | Grouped skills with activation modes (see below) | +| `skill_groups` | object | `{}` | Grouped skills with activation modes (see below) | | `mcp` | object | `{}` | MCP servers to configure (see below) | | `plugins` | array | `[]` | npm packages to install | | `instructions` | string | — | Text injected into every AI conversation | -| `detect` | array | `[]` | File patterns that trigger kit suggestion | +| `detect` | array | `[]` | File patterns that trigger pack suggestion | ### Skills @@ -165,12 +165,12 @@ The `source` field accepts: - Full URL: `https://github.com/owner/repo` - Local path: `./my-skills` -### Skill Packs +### Skill Groups -For kits with many skills, organize them into packs with activation modes: +For packs with many skills, organize them into packs with activation modes: ```yaml -skill_packs: +skill_groups: core: description: "Essential skills loaded every session" activation: always @@ -197,12 +197,12 @@ skill_packs: | Activation | Behavior | |-----------|----------| -| `always` | Skills loaded every session when kit is active | +| `always` | Skills loaded every session when pack is active | | `detect` | Skills loaded when matching files exist in the project | | `manual` | Skills loaded only when the user explicitly requests them | !!! note - When `skill_packs` is present, it takes precedence over the flat `skills` array. Use one or the other, not both. + When `skill_groups` is present, it takes precedence over the flat `skills` array. Use one or the other, not both. ### MCP Servers @@ -220,9 +220,9 @@ mcp: description: "What this server provides" ``` -**Type mapping:** The kit uses user-friendly names that are translated to the config format: +**Type mapping:** The pack uses user-friendly names that are translated to the config format: -| Kit type | Config type | Use case | +| Pack type | Config type | Use case | |----------|-----------|----------| | `stdio` (default) | `local` | Local process via stdin/stdout | | `sse` | `remote` | Server-sent events over HTTP | @@ -231,16 +231,16 @@ mcp: **Environment variables:** - `env`: Default values passed to the MCP server process -- `env_keys`: Names of variables the user must set. Kit activation warns if these are missing. Use this for API keys and secrets that shouldn't have defaults. +- `env_keys`: Names of variables the user must set. Pack activation warns if these are missing. Use this for API keys and secrets that shouldn't have defaults. ### Detection Rules -Auto-suggest the kit when certain files exist in the project: +Auto-suggest the pack when certain files exist in the project: ```yaml detect: - files: ["dbt_project.yml", "dbt_project.yaml"] - message: "Detected dbt project — activate this kit?" + message: "Detected dbt project — activate this pack?" - files: ["**/dagster/**", "workspace.yaml"] message: "Detected Dagster project" @@ -249,11 +249,11 @@ detect: - `files`: Array of glob patterns matched against the project directory - `message`: Optional suggestion text shown to the user -Users discover matching kits via `kit detect` or `kit list --detect`. The TUI also shows a nudge on startup when matching kits are found. +Users discover matching packs via `pack detect` or `pack list --detect`. The TUI also shows a nudge on startup when matching packs are found. ### Instructions -Free-form text injected into the AI's system context for every conversation when the kit is active: +Free-form text injected into the AI's system context for every conversation when the pack is active: ```yaml instructions: | @@ -275,23 +275,23 @@ instructions: | ## Publishing to the Registry -The kit registry is hosted at [AltimateAI/data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills). +The pack registry is hosted at [AltimateAI/data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills). ### For Community Contributors -1. Create your kit in your own GitHub repo -2. Test with `kit validate` and `kit activate` +1. Create your pack in your own GitHub repo +2. Test with `pack validate` and `pack activate` 3. Submit a PR to [data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) adding an entry to `registry.json`: ```json { - "name": "my-kit", + "name": "my-pack", "description": "What it does", "version": "1.0.0", "author": "Your Name", "tier": "community", "repo": "your-org/your-repo", - "path": "kits/my-kit", + "path": "packs/my-pack", "tags": ["dbt", "bigquery"], "detect": ["dbt_project.yml"] } @@ -299,13 +299,13 @@ The kit registry is hosted at [AltimateAI/data-engineering-skills](https://githu ### For Vendors (Verified Tier) -To get your kit listed as `verified`: +To get your pack listed as `verified`: -1. Create skills and a kit in your organization's GitHub repo -2. Test thoroughly with `kit validate` and real-world projects +1. Create skills and a pack in your organization's GitHub repo +2. Test thoroughly with `pack validate` and real-world projects 3. Submit a PR to the registry with `"tier": "verified"` -4. The Altimate team reviews the kit for quality and correctness -5. Once approved, your kit appears with a `[verified]` badge +4. The Altimate team reviews the pack for quality and correctness +5. Once approved, your pack appears with a `[verified]` badge **Verified tier requirements:** @@ -313,11 +313,11 @@ To get your kit listed as `verified`: - MCP server is published to PyPI or npm - Detection rules are accurate (no false positives) - Instructions are clear and well-structured -- Kit is actively maintained +- Pack is actively maintained ## Examples -### Instructions-Only Kit (Team Standards) +### Instructions-Only Pack (Team Standards) No skills, no MCP — just team conventions: @@ -336,7 +336,7 @@ detect: - files: ["dbt_project.yml"] ``` -### MCP-Only Kit (Tool Integration) +### MCP-Only Pack (Tool Integration) No skills, no instructions — just MCP configuration: @@ -356,7 +356,7 @@ detect: - files: ["**/airbyte_*.py", "airbyte.yaml"] ``` -### Full Kit (Skills + MCP + Instructions) +### Full Pack (Skills + MCP + Instructions) The complete package: @@ -392,18 +392,18 @@ instructions: | detect: - files: ["dbt_project.yml"] - message: "Detected dbt project — activate dbt-snowflake kit?" + message: "Detected dbt project — activate dbt-snowflake pack?" ``` ## Troubleshooting -### Kit not showing in `kit list` +### Pack not showing in `pack list` -- Check the `KIT.yaml` file is valid: `kit validate ` -- Ensure the file is named exactly `KIT.yaml` (case-sensitive) -- Check the kit directory is under `.opencode/kits/` or another scanned location +- Check the `PACK.yaml` file is valid: `pack validate ` +- Ensure the file is named exactly `PACK.yaml` (case-sensitive) +- Check the pack directory is under `.opencode/packs/` or another scanned location -### Skills fail to install during `kit activate` +### Skills fail to install during `pack activate` - The `source` repo must be accessible (public GitHub or reachable URL) - Skills that already exist locally are skipped with a warning @@ -411,12 +411,12 @@ detect: ### MCP server doesn't start after activation -- Check `kit validate` for missing environment variables +- Check `pack validate` for missing environment variables - Set required env vars in your shell profile or `.env` file - Verify the MCP command is installed: run the command manually (e.g., `uvx dbt-mcp --help`) -### `kit deactivate` didn't clean up +### `pack deactivate` didn't clean up -- `kit deactivate` removes: instruction files, active-kits entry, and MCP config entries -- Skills installed by the kit are NOT removed (they may be shared with other kits) +- `pack deactivate` removes: instruction files, active-packs entry, and MCP config entries +- Skills installed by the pack are NOT removed (they may be shared with other packs) - To fully clean up skills, remove them from `.opencode/skills/` manually diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index dfaebf67bf..f8ab92622c 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -122,7 +122,7 @@ nav: - MCPs & ACPs: - MCP Servers: configure/mcp-servers.md - ACP Support: configure/acp.md - - Kits: configure/kits.md + - Packs: configure/packs.md - Appearance: - Themes: configure/themes.md - Keybinds: configure/keybinds.md @@ -149,5 +149,5 @@ nav: - SDK: develop/sdk.md - Server API: develop/server.md - Plugins: develop/plugins.md - - Building Kits: develop/kits.md + - Building Packs: develop/packs.md - Ecosystem: develop/ecosystem.md diff --git a/packages/opencode/AGENTS.md b/packages/opencode/AGENTS.md index 930297baa9..21a8951acc 100644 --- a/packages/opencode/AGENTS.md +++ b/packages/opencode/AGENTS.md @@ -4,7 +4,7 @@ - **Schema**: Drizzle schema lives in `src/**/*.sql.ts`. - **Naming**: tables and columns use snake*case; join columns are `_id`; indexes are `*\_idx`. -- **Migrations**: generated by Drizzle Kit using `drizzle.config.ts` (schema: `./src/**/*.sql.ts`, output: `./migration`). +- **Migrations**: generated by Drizzle Pack using `drizzle.config.ts` (schema: `./src/**/*.sql.ts`, output: `./migration`). - **Command**: `bun run db generate --name `. - **Output**: creates `migration/_/migration.sql` and `snapshot.json`. - **Tests**: migration tests should read the per-folder layout (no `_journal.json`). diff --git a/packages/opencode/src/altimate/telemetry/index.ts b/packages/opencode/src/altimate/telemetry/index.ts index 7e01dcbd13..59aca3f87c 100644 --- a/packages/opencode/src/altimate/telemetry/index.ts +++ b/packages/opencode/src/altimate/telemetry/index.ts @@ -440,28 +440,28 @@ export namespace Telemetry { source: "cli" | "tui" } // altimate_change end - // altimate_change start — kit: telemetry events for kit management + // altimate_change start — pack: telemetry events for pack management | { - type: "kit_created" + type: "pack_created" timestamp: number session_id: string - kit_name: string + pack_name: string source: "cli" | "tui" } | { - type: "kit_installed" + type: "pack_installed" timestamp: number session_id: string install_source: string - kit_count: number - kit_names: string[] + pack_count: number + pack_names: string[] source: "cli" | "tui" } | { - type: "kit_applied" + type: "pack_applied" timestamp: number session_id: string - kit_name: string + pack_name: string skill_count: number mcp_count: number plugin_count: number @@ -469,10 +469,10 @@ export namespace Telemetry { source: "cli" | "tui" } | { - type: "kit_removed" + type: "pack_removed" timestamp: number session_id: string - kit_name: string + pack_name: string source: "cli" | "tui" } // altimate_change end diff --git a/packages/opencode/src/cli/cmd/kit.ts b/packages/opencode/src/cli/cmd/pack.ts similarity index 63% rename from packages/opencode/src/cli/cmd/kit.ts rename to packages/opencode/src/cli/cmd/pack.ts index abe560b049..3f2e686226 100644 --- a/packages/opencode/src/cli/cmd/kit.ts +++ b/packages/opencode/src/cli/cmd/pack.ts @@ -1,25 +1,25 @@ -// altimate_change start — kit: top-level `kit` command for managing kit bundles +// altimate_change start — pack: top-level `pack` command for managing pack bundles import { EOL } from "os" import path from "path" import fs from "fs/promises" -import { Kit } from "../../kit" +import { Pack } from "../../pack" import { Skill } from "../../skill" import { bootstrap } from "../bootstrap" import { cmd } from "./cmd" import { Instance } from "../../project/instance" import { Global } from "@/global" import { Telemetry } from "@/altimate/telemetry" -// altimate_change start — kit: jsonc-parser for comment-preserving config writes +// altimate_change start — pack: jsonc-parser for comment-preserving config writes import { modify, applyEdits } from "jsonc-parser" // altimate_change end // --------------------------------------------------------------------------- -// KIT.yaml template +// PACK.yaml template // --------------------------------------------------------------------------- -function kitTemplate(name: string): string { +function packTemplate(name: string): string { return `name: ${name} -description: TODO — describe what this kit configures +description: TODO — describe what this pack configures version: 1.0.0 # Skills to install (from external repos or already-installed names) @@ -36,7 +36,7 @@ mcp: # Auto-detection rules detect: # - files: ["config.yaml"] - # message: "Detected my-tool — activate kit?" + # message: "Detected my-tool — activate pack?" # Instructions added to every conversation instructions: | @@ -81,7 +81,7 @@ async function findConfigFile(rootDir: string): Promise<{ filePath: string; conf return { filePath: defaultPath, config: defaultConfig } } -// altimate_change start — kit: JSONC-aware config writes that preserve comments +// altimate_change start — pack: JSONC-aware config writes that preserve comments async function writeConfigField(filePath: string, fieldPath: string[], value: unknown): Promise { let text = "{}" try { text = await fs.readFile(filePath, "utf-8") } catch {} @@ -116,7 +116,7 @@ async function cloneSource(source: string): Promise<{ dir: string; cloned: boole } else if (normalized.startsWith("http://") || normalized.startsWith("https://")) { url = normalized } else if (normalized.match(/^[a-zA-Z0-9_-]+\/[a-zA-Z0-9._-]+$/)) { - // Check if it's a local path first (e.g., "examples/kits" looks like "owner/repo") + // Check if it's a local path first (e.g., "examples/packs" looks like "owner/repo") const resolvedLocal = path.isAbsolute(normalized) ? normalized : path.resolve(normalized) try { await fs.access(resolvedLocal) @@ -129,7 +129,7 @@ async function cloneSource(source: string): Promise<{ dir: string; cloned: boole } if (url) { - const tmpDir = path.join(Global.Path.cache, "kit-install-" + Date.now()) + const tmpDir = path.join(Global.Path.cache, "pack-install-" + Date.now()) const proc = Bun.spawnSync(["git", "clone", "--depth", "1", "--", url, tmpDir], { stdout: "pipe", stderr: "pipe", @@ -160,9 +160,9 @@ async function cleanupTmp(dir: string, cloned: boolean) { // Subcommands // --------------------------------------------------------------------------- -const KitListCommand = cmd({ +const PackListCommand = cmd({ command: "list", - describe: "list all available kits", + describe: "list all available packs", builder: (yargs) => yargs .option("json", { @@ -172,41 +172,41 @@ const KitListCommand = cmd({ }) .option("detect", { type: "boolean", - describe: "show only kits matching the current project", + describe: "show only packs matching the current project", default: false, }), async handler(args) { await bootstrap(process.cwd(), async () => { - let kits = await Kit.all() + let packs = await Pack.all() if (args.detect) { - const detected = await Kit.detect() - const detectedNames = new Set(detected.map((d) => d.kit.name)) - kits = kits.filter((r) => detectedNames.has(r.name)) + const detected = await Pack.detect() + const detectedNames = new Set(detected.map((d) => d.pack.name)) + packs = packs.filter((r) => detectedNames.has(r.name)) } // Sort alphabetically - kits.sort((a, b) => a.name.localeCompare(b.name)) + packs.sort((a, b) => a.name.localeCompare(b.name)) if (args.json) { - // altimate_change start — kit: add tier + skill_packs to JSON output - const enriched = kits.map((kit) => { - const hasPacks = kit.skill_packs && Object.keys(kit.skill_packs).length > 0 + // altimate_change start — pack: add tier + skill_groups to JSON output + const enriched = packs.map((pack) => { + const hasPacks = pack.skill_groups && Object.keys(pack.skill_groups).length > 0 return { - name: kit.name, - tier: kit.tier || "community", - version: kit.version, - author: kit.author, - description: kit.description, + name: pack.name, + tier: pack.tier || "community", + version: pack.version, + author: pack.author, + description: pack.description, components: { skills: hasPacks - ? Object.values(kit.skill_packs!).reduce((sum, pack) => sum + (pack.skills?.length || 0), 0) - : (Array.isArray(kit.skills) ? kit.skills.length : 0), - skill_packs: hasPacks ? Object.keys(kit.skill_packs!).length : 0, - mcp: kit.mcp ? Object.keys(kit.mcp).length : 0, - plugins: Array.isArray(kit.plugins) ? kit.plugins.length : 0, + ? Object.values(pack.skill_groups!).reduce((sum, pack) => sum + (pack.skills?.length || 0), 0) + : (Array.isArray(pack.skills) ? pack.skills.length : 0), + skill_groups: hasPacks ? Object.keys(pack.skill_groups!).length : 0, + mcp: pack.mcp ? Object.keys(pack.mcp).length : 0, + plugins: Array.isArray(pack.plugins) ? pack.plugins.length : 0, }, - location: kit.location, + location: pack.location, } }) // altimate_change end @@ -215,47 +215,47 @@ const KitListCommand = cmd({ } // Human-readable table output - if (kits.length === 0) { + if (packs.length === 0) { if (args.detect) { - process.stdout.write("No kits matched detection rules for this project." + EOL) - process.stdout.write(EOL + `See all kits: altimate-code kit list` + EOL) + process.stdout.write("No packs matched detection rules for this project." + EOL) + process.stdout.write(EOL + `See all packs: altimate-code pack list` + EOL) } else { - process.stdout.write("No kits found." + EOL) - process.stdout.write(EOL + `Create one with: altimate-code kit create ` + EOL) + process.stdout.write("No packs found." + EOL) + process.stdout.write(EOL + `Create one with: altimate-code pack create ` + EOL) } return } - // altimate_change start — kit: add tier column to table output + // altimate_change start — pack: add tier column to table output // Calculate column widths - const nameWidth = Math.max(6, ...kits.map((r) => r.name.length)) + const nameWidth = Math.max(6, ...packs.map((r) => r.name.length)) const tierWidth = 12 - const versionWidth = Math.max(7, ...kits.map((r) => (r.version || "").length)) + const versionWidth = Math.max(7, ...packs.map((r) => (r.version || "").length)) - const header = `${"KIT".padEnd(nameWidth)} ${"TIER".padEnd(tierWidth)} ${"VERSION".padEnd(versionWidth)} ${"COMPONENTS".padEnd(20)} DESCRIPTION` + const header = `${"PACK".padEnd(nameWidth)} ${"TIER".padEnd(tierWidth)} ${"VERSION".padEnd(versionWidth)} ${"COMPONENTS".padEnd(20)} DESCRIPTION` const separator = "─".repeat(header.length) process.stdout.write(EOL) process.stdout.write(header + EOL) process.stdout.write(separator + EOL) - for (const kit of kits) { - // Count skills from skill_packs if present, otherwise flat skills array - const hasPacks = kit.skill_packs && Object.keys(kit.skill_packs).length > 0 + for (const pack of packs) { + // Count skills from skill_groups if present, otherwise flat skills array + const hasPacks = pack.skill_groups && Object.keys(pack.skill_groups).length > 0 const skillCount = hasPacks - ? Object.values(kit.skill_packs!).reduce((sum, pack) => sum + (pack.skills?.length || 0), 0) - : (Array.isArray(kit.skills) ? kit.skills.length : 0) - const mcpCount = kit.mcp ? Object.keys(kit.mcp).length : 0 - const pluginCount = Array.isArray(kit.plugins) ? kit.plugins.length : 0 - const packCount = hasPacks ? Object.keys(kit.skill_packs!).length : 0 + ? Object.values(pack.skill_groups!).reduce((sum, pack) => sum + (pack.skills?.length || 0), 0) + : (Array.isArray(pack.skills) ? pack.skills.length : 0) + const mcpCount = pack.mcp ? Object.keys(pack.mcp).length : 0 + const pluginCount = Array.isArray(pack.plugins) ? pack.plugins.length : 0 + const packCount = hasPacks ? Object.keys(pack.skill_groups!).length : 0 const components = hasPacks ? `${skillCount}sk ${packCount}pk ${mcpCount}mcp` : `${skillCount}sk ${mcpCount}mcp ${pluginCount}pl` - const tier = kit.tier || "community" + const tier = pack.tier || "community" const tierBadge = tier !== "community" ? `[${tier}]` : "" - let desc = kit.description || "" + let desc = pack.description || "" if (desc.length > 50) { desc = desc.slice(0, 50) const lastSpace = desc.lastIndexOf(" ") @@ -264,25 +264,25 @@ const KitListCommand = cmd({ } process.stdout.write( - `${kit.name.padEnd(nameWidth)} ${tierBadge.padEnd(tierWidth)} ${(kit.version || "—").padEnd(versionWidth)} ${components.padEnd(20)} ${desc}` + EOL, + `${pack.name.padEnd(nameWidth)} ${tierBadge.padEnd(tierWidth)} ${(pack.version || "—").padEnd(versionWidth)} ${components.padEnd(20)} ${desc}` + EOL, ) } // altimate_change end process.stdout.write(EOL) - process.stdout.write(`${kits.length} kit(s) found.` + EOL) - process.stdout.write(`Create a new kit: altimate-code kit create ` + EOL) + process.stdout.write(`${packs.length} pack(s) found.` + EOL) + process.stdout.write(`Create a new pack: altimate-code pack create ` + EOL) }) }, }) -const KitCreateCommand = cmd({ +const PackCreateCommand = cmd({ command: "create ", - describe: "scaffold a new kit", + describe: "scaffold a new pack", builder: (yargs) => yargs.positional("name", { type: "string", - describe: "name of the kit to create", + describe: "name of the pack to create", demandOption: true, }), async handler(args) { @@ -291,40 +291,40 @@ const KitCreateCommand = cmd({ // Validate name before bootstrap (fast fail) if (!/^[a-z][a-z0-9]*(-[a-z0-9]+)*$/.test(name) || name.length < 2) { process.stderr.write( - `Error: Kit name must be lowercase alphanumeric with hyphens, at least 2 chars (e.g., "dbt-snowflake")` + EOL, + `Error: Pack name must be lowercase alphanumeric with hyphens, at least 2 chars (e.g., "dbt-snowflake")` + EOL, ) process.exit(1) } if (name.length > 64) { - process.stderr.write(`Error: Kit name must be 64 characters or fewer` + EOL) + process.stderr.write(`Error: Pack name must be 64 characters or fewer` + EOL) process.exit(1) } await bootstrap(process.cwd(), async () => { const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory - const kitDir = path.join(rootDir, ".opencode", "kits", name) - const kitFile = path.join(kitDir, "KIT.yaml") + const packDir = path.join(rootDir, ".opencode", "packs", name) + const packFile = path.join(packDir, "PACK.yaml") try { - await fs.access(kitFile) - process.stderr.write(`Error: Kit already exists at ${kitFile}` + EOL) + await fs.access(packFile) + process.stderr.write(`Error: Pack already exists at ${packFile}` + EOL) process.exit(1) } catch { // File doesn't exist, good } - await fs.mkdir(kitDir, { recursive: true }) - await fs.writeFile(kitFile, kitTemplate(name), "utf-8") - process.stdout.write(`✓ Created kit: ${path.relative(rootDir, kitFile)}` + EOL) + await fs.mkdir(packDir, { recursive: true }) + await fs.writeFile(packFile, packTemplate(name), "utf-8") + process.stdout.write(`✓ Created pack: ${path.relative(rootDir, packFile)}` + EOL) // altimate_change start — telemetry try { Telemetry.track({ - type: "kit_created", + type: "pack_created", timestamp: Date.now(), session_id: Telemetry.getContext().sessionId || "", - kit_name: name, + pack_name: name, source: "cli", }) } catch {} @@ -332,50 +332,50 @@ const KitCreateCommand = cmd({ process.stdout.write(EOL) process.stdout.write(`Next steps:` + EOL) - process.stdout.write(` 1. Edit .opencode/kits/${name}/KIT.yaml — configure skills, MCP servers, and instructions` + EOL) - process.stdout.write(` 2. Activate it: altimate-code kit activate ${name}` + EOL) + process.stdout.write(` 1. Edit .opencode/packs/${name}/PACK.yaml — configure skills, MCP servers, and instructions` + EOL) + process.stdout.write(` 2. Activate it: altimate-code pack activate ${name}` + EOL) }) }, }) -const KitShowCommand = cmd({ +const PackShowCommand = cmd({ command: "show ", - describe: "display kit details", + describe: "display pack details", builder: (yargs) => yargs.positional("name", { type: "string", - describe: "name of the kit to show", + describe: "name of the pack to show", demandOption: true, }), async handler(args) { const name = args.name as string await bootstrap(process.cwd(), async () => { - const kit = await Kit.get(name) - if (!kit) { - process.stderr.write(`Error: Kit "${name}" not found.` + EOL) + const pack = await Pack.get(name) + if (!pack) { + process.stderr.write(`Error: Pack "${name}" not found.` + EOL) process.exit(1) } - const hasPacks = kit.skill_packs && Object.keys(kit.skill_packs).length > 0 + const hasPacks = pack.skill_groups && Object.keys(pack.skill_groups).length > 0 const skillCount = hasPacks - ? Object.values(kit.skill_packs!).reduce((sum, pack) => sum + (pack.skills?.length || 0), 0) - : (Array.isArray(kit.skills) ? kit.skills.length : 0) - const mcpCount = kit.mcp ? Object.keys(kit.mcp).length : 0 - const pluginCount = Array.isArray(kit.plugins) ? kit.plugins.length : 0 + ? Object.values(pack.skill_groups!).reduce((sum, pack) => sum + (pack.skills?.length || 0), 0) + : (Array.isArray(pack.skills) ? pack.skills.length : 0) + const mcpCount = pack.mcp ? Object.keys(pack.mcp).length : 0 + const pluginCount = Array.isArray(pack.plugins) ? pack.plugins.length : 0 process.stdout.write(EOL) - process.stdout.write(` Name: ${kit.name}` + EOL) - process.stdout.write(` Description: ${kit.description || "—"}` + EOL) - process.stdout.write(` Version: ${kit.version || "—"}` + EOL) - process.stdout.write(` Author: ${kit.author || "—"}` + EOL) - process.stdout.write(` Tier: ${kit.tier || "community"}` + EOL) - process.stdout.write(` Location: ${kit.location}` + EOL) + process.stdout.write(` Name: ${pack.name}` + EOL) + process.stdout.write(` Description: ${pack.description || "—"}` + EOL) + process.stdout.write(` Version: ${pack.version || "—"}` + EOL) + process.stdout.write(` Author: ${pack.author || "—"}` + EOL) + process.stdout.write(` Tier: ${pack.tier || "community"}` + EOL) + process.stdout.write(` Location: ${pack.location}` + EOL) process.stdout.write(EOL) - // Skill packs (if present, takes precedence over flat skills) + // Skill groups (if present, takes precedence over flat skills) if (hasPacks) { - const packs = Object.entries(kit.skill_packs!) - process.stdout.write(` Skill Packs (${packs.length}):` + EOL) + const packs = Object.entries(pack.skill_groups!) + process.stdout.write(` Skill Groups (${packs.length}):` + EOL) for (const [packName, pack] of packs) { const badge = pack.activation === "always" ? "●" : pack.activation === "detect" ? "◐" : "○" process.stdout.write(` ${badge} ${packName} (${pack.activation}, ${pack.skills.length} skills)` + EOL) @@ -395,7 +395,7 @@ const KitShowCommand = cmd({ // Flat skills process.stdout.write(` Skills (${skillCount}):` + EOL) if (skillCount > 0) { - for (const skill of kit.skills!) { + for (const skill of pack.skills!) { if (typeof skill === "string") { process.stdout.write(` - ${skill}` + EOL) } else { @@ -411,7 +411,7 @@ const KitShowCommand = cmd({ // MCP servers process.stdout.write(` MCP Servers (${mcpCount}):` + EOL) if (mcpCount > 0) { - for (const [serverName, serverConfig] of Object.entries(kit.mcp!)) { + for (const [serverName, serverConfig] of Object.entries(pack.mcp!)) { const desc = (serverConfig as Record).description || "" process.stdout.write(` - ${serverName}${desc ? `: ${desc}` : ""}` + EOL) } @@ -422,7 +422,7 @@ const KitShowCommand = cmd({ // Plugins process.stdout.write(` Plugins (${pluginCount}):` + EOL) if (pluginCount > 0) { - for (const plugin of kit.plugins!) { + for (const plugin of pack.plugins!) { process.stdout.write(` - ${plugin}` + EOL) } } else { @@ -430,11 +430,11 @@ const KitShowCommand = cmd({ } // Detection rules - const detectCount = Array.isArray(kit.detect) ? kit.detect.length : 0 + const detectCount = Array.isArray(pack.detect) ? pack.detect.length : 0 if (detectCount > 0) { process.stdout.write(EOL) process.stdout.write(` Detection Rules (${detectCount}):` + EOL) - for (const rule of kit.detect!) { + for (const rule of pack.detect!) { const files = Array.isArray(rule.files) ? rule.files.join(", ") : "—" process.stdout.write(` - files: [${files}]` + EOL) if (rule.message) { @@ -444,18 +444,18 @@ const KitShowCommand = cmd({ } // Instructions - if (kit.instructions) { + if (pack.instructions) { process.stdout.write(EOL + "─".repeat(60) + EOL + EOL) process.stdout.write(`Instructions:` + EOL + EOL) - process.stdout.write(kit.instructions + EOL) + process.stdout.write(pack.instructions + EOL) } }) }, }) -const KitInstallCommand = cmd({ +const PackInstallCommand = cmd({ command: "install ", - describe: "install a kit from GitHub or a local path", + describe: "install a pack from GitHub or a local path", builder: (yargs) => yargs .positional("source", { @@ -481,8 +481,8 @@ const KitInstallCommand = cmd({ await bootstrap(process.cwd(), async () => { const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory const targetDir = isGlobal - ? path.join(Global.Path.config, "kits") - : path.join(rootDir, ".opencode", "kits") + ? path.join(Global.Path.config, "packs") + : path.join(rootDir, ".opencode", "packs") let fetchDir: string let cloned = false @@ -500,9 +500,9 @@ const KitInstallCommand = cmd({ return // unreachable but satisfies TS } - // Find all KIT.yaml / KIT.yml / KIT.md files + // Find all PACK.yaml / PACK.yml / PACK.md files const { Glob: BunGlob } = globalThis.Bun - const patterns = ["**/KIT.yaml", "**/KIT.yml", "**/KIT.md"] + const patterns = ["**/PACK.yaml", "**/PACK.yml", "**/PACK.md"] const matches: string[] = [] for (const pattern of patterns) { const glob = new BunGlob(pattern) @@ -512,7 +512,7 @@ const KitInstallCommand = cmd({ } if (matches.length === 0) { - process.stderr.write(`Error: No KIT.yaml/KIT.yml/KIT.md files found in ${source}` + EOL) + process.stderr.write(`Error: No PACK.yaml/PACK.yml/PACK.md files found in ${source}` + EOL) await cleanupTmp(fetchDir, cloned) process.exit(1) } @@ -520,45 +520,45 @@ const KitInstallCommand = cmd({ let installed = 0 const installedNames: string[] = [] - for (const kitFile of matches) { - const kitParent = path.dirname(kitFile) + for (const packFile of matches) { + const packParent = path.dirname(packFile) - // Parse the YAML to get the kit name (don't rely on directory name) - let kitName: string + // Parse the YAML to get the pack name (don't rely on directory name) + let packName: string try { const matter = (await import("gray-matter")).default - const raw = await fs.readFile(kitFile, "utf-8") - const ext = path.extname(kitFile).toLowerCase() + const raw = await fs.readFile(packFile, "utf-8") + const ext = path.extname(packFile).toLowerCase() const parsed = ext === ".md" ? matter(raw) : matter("---\n" + raw + "\n---") - kitName = (parsed.data.name as string) || path.basename(kitParent) + packName = (parsed.data.name as string) || path.basename(packParent) } catch { - kitName = path.basename(kitParent) + packName = path.basename(packParent) } - // Avoid using temp dir names as kit names - if (kitName.startsWith("kit-install-")) { - process.stdout.write(` ⚠ Skipping "${kitFile}" — could not determine kit name` + EOL) + // Avoid using temp dir names as pack names + if (packName.startsWith("pack-install-")) { + process.stdout.write(` ⚠ Skipping "${packFile}" — could not determine pack name` + EOL) continue } - const dest = path.join(targetDir, kitName) + const dest = path.join(targetDir, packName) // Check if already installed try { await fs.access(dest) - process.stdout.write(` ⚠ Skipping "${kitName}" — already exists` + EOL) + process.stdout.write(` ⚠ Skipping "${packName}" — already exists` + EOL) continue } catch { // Not installed, proceed } - // Copy only the kit directory (not repo root — skip .git, node_modules, etc.) + // Copy only the pack directory (not repo root — skip .git, node_modules, etc.) await fs.mkdir(dest, { recursive: true }) - const files = await fs.readdir(kitParent) + const files = await fs.readdir(packParent) for (const file of files) { - // Skip common non-kit files when copying from repo root + // Skip common non-pack files when copying from repo root if ([".git", "node_modules", ".github", "LICENSE", "README.md"].includes(file)) continue - const src = path.join(kitParent, file) + const src = path.join(packParent, file) const dst = path.join(dest, file) const stat = await fs.lstat(src) if (stat.isSymbolicLink()) continue @@ -568,8 +568,8 @@ const KitInstallCommand = cmd({ await fs.cp(src, dst, { recursive: true, dereference: false }) } } - process.stdout.write(` ✓ Installed "${kitName}" → ${path.relative(rootDir, dest)}` + EOL) - installedNames.push(kitName) + process.stdout.write(` ✓ Installed "${packName}" → ${path.relative(rootDir, dest)}` + EOL) + installedNames.push(packName) installed++ } @@ -577,80 +577,80 @@ const KitInstallCommand = cmd({ process.stdout.write(EOL) if (installed > 0) { - process.stdout.write(`${installed} kit(s) installed${isGlobal ? " globally" : ""}.` + EOL) + process.stdout.write(`${installed} pack(s) installed${isGlobal ? " globally" : ""}.` + EOL) // altimate_change start — telemetry try { Telemetry.track({ - type: "kit_installed", + type: "pack_installed", timestamp: Date.now(), session_id: Telemetry.getContext().sessionId || "", install_source: source, - kit_count: installed, - kit_names: installedNames, + pack_count: installed, + pack_names: installedNames, source: "cli", }) } catch {} // altimate_change end } else { - process.stdout.write(`No new kits installed.` + EOL) + process.stdout.write(`No new packs installed.` + EOL) } }) }, }) -// altimate_change start — kit: KitApplyCommand removed, functionality merged into KitActivateCommand +// altimate_change start — pack: PackApplyCommand removed, functionality merged into PackActivateCommand // altimate_change end -const KitRemoveCommand = cmd({ +const PackRemoveCommand = cmd({ command: "remove ", - describe: "remove an installed kit", + describe: "remove an installed pack", builder: (yargs) => yargs.positional("name", { type: "string", - describe: "name of the kit to remove", + describe: "name of the pack to remove", demandOption: true, }), async handler(args) { const name = args.name as string await bootstrap(process.cwd(), async () => { - const kit = await Kit.get(name) - if (!kit) { - process.stderr.write(`Error: Kit "${name}" not found.` + EOL) + const pack = await Pack.get(name) + if (!pack) { + process.stderr.write(`Error: Pack "${name}" not found.` + EOL) process.exit(1) } - // Check if kit is tracked by git (part of the repo, not user-installed) - const kitDir = path.dirname(kit.location) - const gitCheck = Bun.spawnSync(["git", "ls-files", "--error-unmatch", kit.location], { - cwd: path.dirname(kitDir), + // Check if pack is tracked by git (part of the repo, not user-installed) + const packDir = path.dirname(pack.location) + const gitCheck = Bun.spawnSync(["git", "ls-files", "--error-unmatch", pack.location], { + cwd: path.dirname(packDir), stdout: "pipe", stderr: "pipe", }) if (gitCheck.exitCode === 0) { process.stderr.write(`Error: Cannot remove "${name}" — it is tracked by git.` + EOL) - process.stderr.write(`This kit is part of the repository, not user-installed.` + EOL) + process.stderr.write(`This pack is part of the repository, not user-installed.` + EOL) process.exit(1) } - // Safety: only remove if the directory looks like a kit directory - // (contains the KIT file and is not a top-level scan directory) - const kitBasename = path.basename(kitDir) - if (kitBasename === "kits" || kitBasename === "kit" || kitDir === Instance.directory) { - // The KIT.yaml is at a scan root — only remove the file, not the directory - await fs.rm(kit.location, { force: true }) - process.stdout.write(` ✓ Removed kit file: ${kit.location}` + EOL) + // Safety: only remove if the directory looks like a pack directory + // (contains the PACK file and is not a top-level scan directory) + const packBasename = path.basename(packDir) + if (packBasename === "packs" || packBasename === "pack" || packDir === Instance.directory) { + // The PACK.yaml is at a scan root — only remove the file, not the directory + await fs.rm(pack.location, { force: true }) + process.stdout.write(` ✓ Removed pack file: ${pack.location}` + EOL) } else { - await fs.rm(kitDir, { recursive: true, force: true }) - process.stdout.write(` ✓ Removed kit: ${kitDir}` + EOL) + await fs.rm(packDir, { recursive: true, force: true }) + process.stdout.write(` ✓ Removed pack: ${packDir}` + EOL) } // Deactivate if active, then invalidate cache - await Kit.deactivate(name) - Kit.invalidate() + await Pack.deactivate(name) + Pack.invalidate() - // altimate_change start — kit: clean up instruction file on remove + // altimate_change start — pack: clean up instruction file on remove const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory - const instructionsFile = path.join(rootDir, ".opencode", "instructions", `kit-${name}.md`) + const instructionsFile = path.join(rootDir, ".opencode", "instructions", `pack-${name}.md`) try { await fs.access(instructionsFile) await fs.rm(instructionsFile, { force: true }) @@ -663,67 +663,67 @@ const KitRemoveCommand = cmd({ // altimate_change start — telemetry try { Telemetry.track({ - type: "kit_removed", + type: "pack_removed", timestamp: Date.now(), session_id: Telemetry.getContext().sessionId || "", - kit_name: name, + pack_name: name, source: "cli", }) } catch {} // altimate_change end - process.stdout.write(EOL + `Kit "${name}" removed.` + EOL) + process.stdout.write(EOL + `Pack "${name}" removed.` + EOL) }) }, }) -const KitDetectCommand = cmd({ +const PackDetectCommand = cmd({ command: "detect", - describe: "auto-detect which kits match the current project", + describe: "auto-detect which packs match the current project", builder: (yargs) => yargs, async handler() { await bootstrap(process.cwd(), async () => { - const detected = await Kit.detect() + const detected = await Pack.detect() if (detected.length === 0) { - process.stdout.write("No matching kits detected for this project." + EOL) - process.stdout.write(EOL + `Browse available kits: altimate-code kit list` + EOL) + process.stdout.write("No matching packs detected for this project." + EOL) + process.stdout.write(EOL + `Browse available packs: altimate-code pack list` + EOL) return } process.stdout.write(EOL) - process.stdout.write(`Detected ${detected.length} matching kit(s):` + EOL + EOL) + process.stdout.write(`Detected ${detected.length} matching pack(s):` + EOL + EOL) for (const match of detected) { - process.stdout.write(` ${match.kit.name}` + EOL) - if (match.kit.description) { - process.stdout.write(` ${match.kit.description}` + EOL) + process.stdout.write(` ${match.pack.name}` + EOL) + if (match.pack.description) { + process.stdout.write(` ${match.pack.description}` + EOL) } if (match.matched && match.matched.length > 0) { process.stdout.write(` Matched files: ${match.matched.join(", ")}` + EOL) } // Show the first detection rule that has a message - const firstRuleWithMessage = match.kit.detect?.find((d) => d.message) + const firstRuleWithMessage = match.pack.detect?.find((d) => d.message) if (firstRuleWithMessage?.message) { process.stdout.write(` ${firstRuleWithMessage.message}` + EOL) } process.stdout.write(EOL) } - process.stdout.write(`Activate a kit: altimate-code kit activate ` + EOL) + process.stdout.write(`Activate a pack: altimate-code pack activate ` + EOL) }) }, }) -// altimate_change start — kit: activate subcommand (merged apply + activate into one command) -const KitActivateCommand = cmd({ +// altimate_change start — pack: activate subcommand (merged apply + activate into one command) +const PackActivateCommand = cmd({ command: "activate ", - describe: "activate a kit — install skills, configure MCP, and enable for this project", + describe: "activate a pack — install skills, configure MCP, and enable for this project", builder: (yargs) => yargs .positional("name", { type: "string", - describe: "name of the kit to activate", + describe: "name of the pack to activate", demandOption: true, }) .option("yes", { @@ -735,29 +735,29 @@ const KitActivateCommand = cmd({ async handler(args) { const name = args.name as string await bootstrap(process.cwd(), async () => { - const kit = await Kit.get(name) - if (!kit) { - process.stderr.write(`Error: Kit "${name}" not found. Install it first with: altimate-code kit install ` + EOL) + const pack = await Pack.get(name) + if (!pack) { + process.stderr.write(`Error: Pack "${name}" not found. Install it first with: altimate-code pack install ` + EOL) process.exit(1) } const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory - const tier = kit.tier || "community" + const tier = pack.tier || "community" const tierBadge = tier !== "community" ? ` [${tier}]` : "" - // Get all skills — from skill_packs if present, otherwise flat skills - const allSkills = (kit.skill_packs && Object.keys(kit.skill_packs).length > 0) - ? Kit.allSkillsFromPacks(kit) - : (kit.skills || []) + // Get all skills — from skill_groups if present, otherwise flat skills + const allSkills = (pack.skill_groups && Object.keys(pack.skill_groups).length > 0) + ? Pack.allSkillsFromGroups(pack) + : (pack.skills || []) const skillCount = allSkills.length - const mcpCount = kit.mcp ? Object.keys(kit.mcp).length : 0 - const pluginCount = Array.isArray(kit.plugins) ? kit.plugins.length : 0 - const hasInstructions = !!kit.instructions && !kit.instructions.startsWith("TODO") + const mcpCount = pack.mcp ? Object.keys(pack.mcp).length : 0 + const pluginCount = Array.isArray(pack.plugins) ? pack.plugins.length : 0 + const hasInstructions = !!pack.instructions && !pack.instructions.startsWith("TODO") // --- Preview --- process.stdout.write(EOL) - process.stdout.write(`Kit: ${kit.name}${tierBadge} (v${kit.version || "0.0.0"})` + EOL) - process.stdout.write(`${kit.description || ""}` + EOL) + process.stdout.write(`Pack: ${pack.name}${tierBadge} (v${pack.version || "0.0.0"})` + EOL) + process.stdout.write(`${pack.description || ""}` + EOL) process.stdout.write(EOL + "The following changes will be applied:" + EOL + EOL) if (skillCount > 0) { @@ -775,7 +775,7 @@ const KitActivateCommand = cmd({ if (mcpCount > 0) { process.stdout.write(` MCP Servers (${mcpCount}):` + EOL) - for (const [serverName, serverConfig] of Object.entries(kit.mcp!)) { + for (const [serverName, serverConfig] of Object.entries(pack.mcp!)) { const desc = (serverConfig as Record).description || "" process.stdout.write(` + ${serverName}${desc ? ` — ${desc}` : ""}` + EOL) } @@ -784,21 +784,21 @@ const KitActivateCommand = cmd({ if (hasInstructions) { process.stdout.write(` Instructions:` + EOL) - process.stdout.write(` + .opencode/instructions/kit-${name}.md` + EOL) + process.stdout.write(` + .opencode/instructions/pack-${name}.md` + EOL) process.stdout.write(EOL) } if (skillCount === 0 && mcpCount === 0 && pluginCount === 0 && !hasInstructions) { - // Still activate (add to active-kits) even if empty — user explicitly asked - await Kit.activate(name) - Kit.invalidate() - process.stdout.write(`Kit "${name}" activated (no changes to apply — kit is empty).` + EOL) + // Still activate (add to active-packs) even if empty — user explicitly asked + await Pack.activate(name) + Pack.invalidate() + process.stdout.write(`Pack "${name}" activated (no changes to apply — pack is empty).` + EOL) return } // --- Confirmation --- if (!args.yes) { - process.stdout.write(`Activate this kit? [y/N] `) + process.stdout.write(`Activate this pack? [y/N] `) const response = await new Promise((resolve) => { let data = "" const onData = (chunk: Buffer) => { @@ -826,7 +826,7 @@ const KitActivateCommand = cmd({ process.stdout.write(EOL) - // altimate_change start — kit: track skill install failures for accurate status message + // altimate_change start — pack: track skill install failures for accurate status message let skillFailures = 0 // altimate_change end @@ -902,12 +902,12 @@ const KitActivateCommand = cmd({ const missingEnvKeys: string[] = [] if (mcpCount > 0) { - for (const [serverName, serverDef] of Object.entries(kit.mcp!)) { + for (const [serverName, serverDef] of Object.entries(pack.mcp!)) { const def = serverDef as Record - const kitType = (def.type as string) || "stdio" + const packType = (def.type as string) || "stdio" let configEntry: Record - if (kitType === "sse" || kitType === "streamable-http" || kitType === "remote") { + if (packType === "sse" || packType === "streamable-http" || packType === "remote") { configEntry = { type: "remote", url: def.url as string, ...(def.headers ? { headers: def.headers } : {}) } } else { const command = [...((def.command as string[]) || []), ...((def.args as string[]) || [])] @@ -932,7 +932,7 @@ const KitActivateCommand = cmd({ const { config } = await findConfigFile(rootDir) const plugins = (config.plugin ?? []) as string[] let changed = false - for (const plugin of kit.plugins!) { + for (const plugin of pack.plugins!) { if (!plugins.includes(plugin)) { plugins.push(plugin) changed = true @@ -959,32 +959,32 @@ const KitActivateCommand = cmd({ // --- 3. Add instructions --- if (hasInstructions) { const instructionsDir = path.join(rootDir, ".opencode", "instructions") - const instructionsFile = path.join(instructionsDir, `kit-${name}.md`) + const instructionsFile = path.join(instructionsDir, `pack-${name}.md`) await fs.mkdir(instructionsDir, { recursive: true }) - await fs.writeFile(instructionsFile, kit.instructions!, "utf-8") + await fs.writeFile(instructionsFile, pack.instructions!, "utf-8") process.stdout.write(` ✓ Created instructions: ${path.relative(rootDir, instructionsFile)}` + EOL) } - // --- 4. Activate (add to active-kits) --- - await Kit.activate(name) - Kit.invalidate() + // --- 4. Activate (add to active-packs) --- + await Pack.activate(name) + Pack.invalidate() process.stdout.write(EOL) - // altimate_change start — kit: report partial failures in activation message + // altimate_change start — pack: report partial failures in activation message if (skillFailures > 0) { - process.stdout.write(`Kit "${name}" activated with ${skillFailures} skill source(s) unavailable.` + EOL) - process.stdout.write(`Run 'altimate-code kit show ${name}' to see expected skills.` + EOL) + process.stdout.write(`Pack "${name}" activated with ${skillFailures} skill source(s) unavailable.` + EOL) + process.stdout.write(`Run 'altimate-code pack show ${name}' to see expected skills.` + EOL) } else { - process.stdout.write(`Kit "${name}" activated successfully.` + EOL) + process.stdout.write(`Pack "${name}" activated successfully.` + EOL) } // altimate_change end try { Telemetry.track({ - type: "kit_applied", + type: "pack_applied", timestamp: Date.now(), session_id: Telemetry.getContext().sessionId || "", - kit_name: name, + pack_name: name, skill_count: skillCount, mcp_count: mcpCount, plugin_count: pluginCount, @@ -997,29 +997,29 @@ const KitActivateCommand = cmd({ }) // altimate_change end -// altimate_change start — kit: deactivate subcommand -const KitDeactivateCommand = cmd({ +// altimate_change start — pack: deactivate subcommand +const PackDeactivateCommand = cmd({ command: "deactivate ", - describe: "deactivate a kit for the current project", + describe: "deactivate a pack for the current project", builder: (yargs) => yargs.positional("name", { type: "string", - describe: "name of the kit to deactivate", + describe: "name of the pack to deactivate", demandOption: true, }), async handler(args) { const name = args.name as string await bootstrap(process.cwd(), async () => { - // Read kit BEFORE deactivating so we know what MCP servers to clean - const kit = await Kit.get(name) + // Read pack BEFORE deactivating so we know what MCP servers to clean + const pack = await Pack.get(name) - await Kit.deactivate(name) - process.stdout.write(`✓ Deactivated kit: ${name}` + EOL) + await Pack.deactivate(name) + process.stdout.write(`✓ Deactivated pack: ${name}` + EOL) const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory - // altimate_change start — kit: clean up instruction file on deactivate - const instructionsFile = path.join(rootDir, ".opencode", "instructions", `kit-${name}.md`) + // altimate_change start — pack: clean up instruction file on deactivate + const instructionsFile = path.join(rootDir, ".opencode", "instructions", `pack-${name}.md`) try { await fs.access(instructionsFile) await fs.rm(instructionsFile, { force: true }) @@ -1027,12 +1027,12 @@ const KitDeactivateCommand = cmd({ } catch {} // altimate_change end - // altimate_change start — kit: clean up MCP config entries added by this kit (JSONC-preserving) - if (kit?.mcp && Object.keys(kit.mcp).length > 0) { + // altimate_change start — pack: clean up MCP config entries added by this pack (JSONC-preserving) + if (pack?.mcp && Object.keys(pack.mcp).length > 0) { try { const { filePath } = await findConfigFile(rootDir) let removed = 0 - for (const serverName of Object.keys(kit.mcp)) { + for (const serverName of Object.keys(pack.mcp)) { if (await removeConfigField(filePath, ["mcp", serverName])) { removed++ } @@ -1048,12 +1048,12 @@ const KitDeactivateCommand = cmd({ }) // altimate_change end -// altimate_change start — kit: search subcommand +// altimate_change start — pack: search subcommand const REGISTRY_URL = "https://raw.githubusercontent.com/AltimateAI/data-engineering-skills/main/registry.json" -const KitSearchCommand = cmd({ +const PackSearchCommand = cmd({ command: "search [query]", - describe: "search the kit registry", + describe: "search the pack registry", builder: (yargs) => yargs .positional("query", { @@ -1069,9 +1069,9 @@ const KitSearchCommand = cmd({ const query = ((args.query as string) || "").toLowerCase().trim() await bootstrap(process.cwd(), async () => { - process.stdout.write(`Searching kit registry...` + EOL) + process.stdout.write(`Searching pack registry...` + EOL) - // altimate_change start — kit: graceful 404 + timeout for registry fetch + // altimate_change start — pack: graceful 404 + timeout for registry fetch let registry: any const controller = new AbortController() const timeout = setTimeout(() => controller.abort(), 5000) @@ -1080,9 +1080,9 @@ const KitSearchCommand = cmd({ clearTimeout(timeout) if (!response.ok) { if (response.status === 404) { - process.stdout.write(`Kit registry not available yet.` + EOL) - process.stdout.write(EOL + `Browse local kits: altimate-code kit list` + EOL) - process.stdout.write(`Create your own: altimate-code kit create ` + EOL) + process.stdout.write(`Pack registry not available yet.` + EOL) + process.stdout.write(EOL + `Browse local packs: altimate-code pack list` + EOL) + process.stdout.write(`Create your own: altimate-code pack create ` + EOL) return } process.stderr.write(`Error: Failed to fetch registry (${response.status})` + EOL) @@ -1092,16 +1092,16 @@ const KitSearchCommand = cmd({ } catch (err) { clearTimeout(timeout) if ((err as Error).name === "AbortError") { - process.stdout.write(`Kit registry unavailable (timeout).` + EOL) + process.stdout.write(`Pack registry unavailable (timeout).` + EOL) } else { process.stderr.write(`Error: Failed to fetch registry: ${(err as Error).message}` + EOL) } - process.stdout.write(EOL + `Browse local kits: altimate-code kit list` + EOL) + process.stdout.write(EOL + `Browse local packs: altimate-code pack list` + EOL) process.exit(1) } // altimate_change end - const kits = (registry.kits || []) as Array<{ + const packs = (registry.packs || []) as Array<{ name: string description: string version: string @@ -1116,11 +1116,11 @@ const KitSearchCommand = cmd({ // Filter by query const results = query - ? kits.filter((r) => { + ? packs.filter((r) => { const searchable = [r.name, r.description, ...(r.tags || []), r.author || ""].join(" ").toLowerCase() return searchable.includes(query) }) - : kits + : packs if (args.json) { process.stdout.write(JSON.stringify(results, null, 2) + EOL) @@ -1128,7 +1128,7 @@ const KitSearchCommand = cmd({ } if (results.length === 0) { - process.stdout.write(`No kits found${query ? ` matching "${query}"` : ""}.` + EOL) + process.stdout.write(`No packs found${query ? ` matching "${query}"` : ""}.` + EOL) return } @@ -1136,15 +1136,15 @@ const KitSearchCommand = cmd({ const nameWidth = Math.max(6, ...results.map((r) => r.name.length)) const tierWidth = 10 - const header = `${"KIT".padEnd(nameWidth)} ${"TIER".padEnd(tierWidth)} DESCRIPTION` + const header = `${"PACK".padEnd(nameWidth)} ${"TIER".padEnd(tierWidth)} DESCRIPTION` const separator = "─".repeat(header.length) process.stdout.write(EOL) process.stdout.write(header + EOL) process.stdout.write(separator + EOL) - for (const kit of results) { - let desc = kit.description || "" + for (const pack of results) { + let desc = pack.description || "" if (desc.length > 50) { desc = desc.slice(0, 50) const lastSpace = desc.lastIndexOf(" ") @@ -1152,50 +1152,50 @@ const KitSearchCommand = cmd({ desc += "..." } - const tier = kit.tier || "community" - process.stdout.write(`${kit.name.padEnd(nameWidth)} ${tier.padEnd(tierWidth)} ${desc}` + EOL) + const tier = pack.tier || "community" + process.stdout.write(`${pack.name.padEnd(nameWidth)} ${tier.padEnd(tierWidth)} ${desc}` + EOL) } process.stdout.write(EOL) - process.stdout.write(`${results.length} kit(s) found in registry.` + EOL) - process.stdout.write(`Install with: altimate-code kit install ` + EOL) + process.stdout.write(`${results.length} pack(s) found in registry.` + EOL) + process.stdout.write(`Install with: altimate-code pack install ` + EOL) }) }, }) // altimate_change end -// altimate_change start — kit: status subcommand -const KitStatusCommand = cmd({ +// altimate_change start — pack: status subcommand +const PackStatusCommand = cmd({ command: "status", - describe: "show active kits for the current project", + describe: "show active packs for the current project", builder: (yargs) => yargs, async handler() { await bootstrap(process.cwd(), async () => { - const activeKits = await Kit.active() + const activePacks = await Pack.active() - if (activeKits.length === 0) { - process.stdout.write("No active kits for this project." + EOL) - process.stdout.write(EOL + `Activate one: altimate-code kit activate ` + EOL) - process.stdout.write(`Auto-detect: altimate-code kit detect` + EOL) + if (activePacks.length === 0) { + process.stdout.write("No active packs for this project." + EOL) + process.stdout.write(EOL + `Activate one: altimate-code pack activate ` + EOL) + process.stdout.write(`Auto-detect: altimate-code pack detect` + EOL) return } process.stdout.write(EOL) - process.stdout.write(`Active kits (${activeKits.length}):` + EOL + EOL) + process.stdout.write(`Active packs (${activePacks.length}):` + EOL + EOL) - for (const kit of activeKits) { - const tier = kit.tier || "community" + for (const pack of activePacks) { + const tier = pack.tier || "community" const tierBadge = tier !== "community" ? ` [${tier}]` : "" - process.stdout.write(` ${kit.name}${tierBadge}` + EOL) - if (kit.description) { - process.stdout.write(` ${kit.description}` + EOL) + process.stdout.write(` ${pack.name}${tierBadge}` + EOL) + if (pack.description) { + process.stdout.write(` ${pack.description}` + EOL) } - // Show skill packs if any - if (kit.skill_packs && Object.keys(kit.skill_packs).length > 0) { - for (const [packName, pack] of Object.entries(kit.skill_packs)) { - const badge = pack.activation === "always" ? "●" : pack.activation === "detect" ? "◐" : "○" - process.stdout.write(` ${badge} ${packName} (${pack.activation}, ${pack.skills.length} skills)` + EOL) + // Show skill groups if any + if (pack.skill_groups && Object.keys(pack.skill_groups).length > 0) { + for (const [groupName, group] of Object.entries(pack.skill_groups)) { + const badge = group.activation === "always" ? "●" : group.activation === "detect" ? "◐" : "○" + process.stdout.write(` ${badge} ${groupName} (${group.activation}, ${group.skills.length} skills)` + EOL) } } @@ -1206,26 +1206,26 @@ const KitStatusCommand = cmd({ }) // altimate_change end -// altimate_change start — kit: validate subcommand -const KitValidateCommand = cmd({ +// altimate_change start — pack: validate subcommand +const PackValidateCommand = cmd({ command: "validate [name]", - describe: "validate a kit's YAML format and references", + describe: "validate a pack's YAML format and references", builder: (yargs) => yargs.positional("name", { type: "string", - describe: "name of the kit to validate (defaults to all)", + describe: "name of the pack to validate (defaults to all)", }), async handler(args) { const targetName = args.name as string | undefined await bootstrap(process.cwd(), async () => { - const kits = targetName ? [await Kit.get(targetName)].filter(Boolean) : await Kit.all() + const packs = targetName ? [await Pack.get(targetName)].filter(Boolean) : await Pack.all() - if (kits.length === 0) { + if (packs.length === 0) { if (targetName) { - process.stderr.write(`Error: Kit "${targetName}" not found.` + EOL) + process.stderr.write(`Error: Pack "${targetName}" not found.` + EOL) process.exit(1) } - process.stdout.write("No kits to validate." + EOL) + process.stdout.write("No packs to validate." + EOL) return } @@ -1234,34 +1234,34 @@ const KitValidateCommand = cmd({ const fail = (msg: string) => { process.stdout.write(` ✗ ${msg}` + EOL); hasErrors = true } const warn = (msg: string) => process.stdout.write(` ⚠ ${msg}` + EOL) - for (const kit of kits as Kit.Info[]) { - process.stdout.write(EOL + `Validating: ${kit.name}` + EOL + EOL) + for (const pack of packs as Pack.Info[]) { + process.stdout.write(EOL + `Validating: ${pack.name}` + EOL + EOL) // 1. Name format - if (/^[a-z][a-z0-9]*(-[a-z0-9]+)*$/.test(kit.name)) { - pass(`Name "${kit.name}" is valid`) + if (/^[a-z][a-z0-9]*(-[a-z0-9]+)*$/.test(pack.name)) { + pass(`Name "${pack.name}" is valid`) } else { - fail(`Name "${kit.name}" has invalid format (must be lowercase, hyphens, 2+ chars)`) + fail(`Name "${pack.name}" has invalid format (must be lowercase, hyphens, 2+ chars)`) } // 2. Description - if (kit.description && !kit.description.startsWith("TODO")) { + if (pack.description && !pack.description.startsWith("TODO")) { pass(`Description present`) } else { warn(`Description is missing or starts with TODO`) } // 3. Version - if (kit.version && /^\d+\.\d+\.\d+/.test(kit.version)) { - pass(`Version "${kit.version}" is valid semver`) + if (pack.version && /^\d+\.\d+\.\d+/.test(pack.version)) { + pass(`Version "${pack.version}" is valid semver`) } else { - warn(`Version "${kit.version || "(none)"}" may not be valid semver`) + warn(`Version "${pack.version || "(none)"}" may not be valid semver`) } // 4. Skills references - const allSkills = (kit.skill_packs && Object.keys(kit.skill_packs).length > 0) - ? Kit.allSkillsFromPacks(kit) - : (kit.skills || []) + const allSkills = (pack.skill_groups && Object.keys(pack.skill_groups).length > 0) + ? Pack.allSkillsFromGroups(pack) + : (pack.skills || []) if (allSkills.length > 0) { pass(`${allSkills.length} skill source(s) defined`) for (const skill of allSkills) { @@ -1280,8 +1280,8 @@ const KitValidateCommand = cmd({ } // 5. MCP servers - if (kit.mcp && Object.keys(kit.mcp).length > 0) { - for (const [name, config] of Object.entries(kit.mcp)) { + if (pack.mcp && Object.keys(pack.mcp).length > 0) { + for (const [name, config] of Object.entries(pack.mcp)) { const cfg = config as Record const type = (cfg.type as string) || "stdio" if (type === "stdio" || type === "local") { @@ -1312,15 +1312,15 @@ const KitValidateCommand = cmd({ } // 6. Detection rules - if (kit.detect && kit.detect.length > 0) { - pass(`${kit.detect.length} detection rule(s) defined`) + if (pack.detect && pack.detect.length > 0) { + pass(`${pack.detect.length} detection rule(s) defined`) } else { - warn(`No detection rules — kit won't appear in 'kit detect'`) + warn(`No detection rules — pack won't appear in 'pack detect'`) } // 7. Instructions - if (kit.instructions && !kit.instructions.startsWith("TODO")) { - pass(`Instructions present (${kit.instructions.split("\n").length} lines)`) + if (pack.instructions && !pack.instructions.startsWith("TODO")) { + pass(`Instructions present (${pack.instructions.split("\n").length} lines)`) } else { warn(`Instructions missing or placeholder`) } @@ -1339,26 +1339,26 @@ const KitValidateCommand = cmd({ // altimate_change end // --------------------------------------------------------------------------- -// Top-level kit command +// Top-level pack command // --------------------------------------------------------------------------- -export const KitCommand = cmd({ - command: "kit", - describe: "manage kits — bundles of skills, MCP servers, and plugins", +export const PackCommand = cmd({ + command: "pack", + describe: "manage packs — bundles of skills, MCP servers, and plugins", builder: (yargs) => yargs - .command(KitListCommand) - .command(KitCreateCommand) - .command(KitShowCommand) - .command(KitInstallCommand) - .command(KitRemoveCommand) - .command(KitDetectCommand) - // altimate_change start — kit: register new subcommands - .command(KitActivateCommand) - .command(KitDeactivateCommand) - .command(KitSearchCommand) - .command(KitStatusCommand) - .command(KitValidateCommand) + .command(PackListCommand) + .command(PackCreateCommand) + .command(PackShowCommand) + .command(PackInstallCommand) + .command(PackRemoveCommand) + .command(PackDetectCommand) + // altimate_change start — pack: register new subcommands + .command(PackActivateCommand) + .command(PackDeactivateCommand) + .command(PackSearchCommand) + .command(PackStatusCommand) + .command(PackValidateCommand) // altimate_change end .demandCommand(), async handler() {}, diff --git a/packages/opencode/src/cli/cmd/tui/thread.ts b/packages/opencode/src/cli/cmd/tui/thread.ts index c61f767fae..f6451f2c93 100644 --- a/packages/opencode/src/cli/cmd/tui/thread.ts +++ b/packages/opencode/src/cli/cmd/tui/thread.ts @@ -14,7 +14,7 @@ import type { EventSource } from "./context/sdk" import { win32DisableProcessedInput, win32InstallCtrlCGuard } from "./win32" import { TuiConfig } from "@/config/tui" import { Instance } from "@/project/instance" -// altimate_change start — kit: Kit imported dynamically in setTimeout below to avoid test mock issues +// altimate_change start — pack: Pack imported dynamically in setTimeout below to avoid test mock issues import { EOL } from "os" // altimate_change end @@ -176,23 +176,23 @@ export const TuiThreadCommand = cmd({ fn: () => TuiConfig.get(), }) - // altimate_change start — kit: non-blocking kit detection nudge on TUI startup + // altimate_change start — pack: non-blocking pack detection nudge on TUI startup // Deferred to avoid interfering with TUI initialization and test mocks. - // Uses setTimeout + dynamic import so Kit module is not required at parse time. + // Uses setTimeout + dynamic import so Pack module is not required at parse time. setTimeout(async () => { try { - const { Kit } = await import("../../../kit") - const activeKits = await Kit.active() - if (activeKits.length > 0) return - const detected = await Kit.detect() + const { Pack } = await import("../../../pack") + const activePacks = await Pack.active() + if (activePacks.length > 0) return + const detected = await Pack.detect() if (detected.length > 0) { const first = detected[0] process.stderr.write( - `\x1b[2m\u{1F4A1} Kit available: ${first.kit.name} \u2014 run /kit activate ${first.kit.name}\x1b[0m` + EOL, + `\x1b[2m\u{1F4A1} Pack available: ${first.pack.name} \u2014 run /pack activate ${first.pack.name}\x1b[0m` + EOL, ) } } catch { - // Kit detection is best-effort; never block startup + // Pack detection is best-effort; never block startup } }, 100) // altimate_change end diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index 504e997ef4..e4d750fed5 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -1076,8 +1076,8 @@ export namespace Config { .optional() .describe("Command configuration, see https://altimate.ai/docs/commands"), skills: Skills.optional().describe("Additional skill folder paths"), - // altimate_change start — kit: config schema for kit paths and URLs - kits: Skills.optional().describe("Additional kit folder paths and URLs (same shape as skills config)"), + // altimate_change start — pack: config schema for pack paths and URLs + packs: Skills.optional().describe("Additional pack folder paths and URLs (same shape as skills config)"), // altimate_change end watcher: z .object({ diff --git a/packages/opencode/src/index.ts b/packages/opencode/src/index.ts index 54e562ac27..9fe46b72f0 100644 --- a/packages/opencode/src/index.ts +++ b/packages/opencode/src/index.ts @@ -39,8 +39,8 @@ import { TraceCommand } from "./cli/cmd/trace" // altimate_change start — top-level skill command import { SkillCommand } from "./cli/cmd/skill" // altimate_change end -// altimate_change start — kit: top-level kit command -import { KitCommand } from "./cli/cmd/kit" +// altimate_change start — pack: top-level pack command +import { PackCommand } from "./cli/cmd/pack" // altimate_change end // altimate_change start — check: deterministic SQL check command import { CheckCommand } from "./cli/cmd/check" @@ -217,8 +217,8 @@ let cli = yargs(hideBin(process.argv)) // altimate_change start — top-level skill command .command(SkillCommand) // altimate_change end - // altimate_change start — kit: register kit command - .command(KitCommand) + // altimate_change start — pack: register pack command + .command(PackCommand) // altimate_change end // altimate_change start — check: register deterministic SQL check command .command(CheckCommand) diff --git a/packages/opencode/src/kit/index.ts b/packages/opencode/src/kit/index.ts deleted file mode 100644 index 010ec61362..0000000000 --- a/packages/opencode/src/kit/index.ts +++ /dev/null @@ -1,3 +0,0 @@ -// altimate_change start — kit: re-export kit module -export { Kit } from "./kit" -// altimate_change end diff --git a/packages/opencode/src/pack/index.ts b/packages/opencode/src/pack/index.ts new file mode 100644 index 0000000000..37b6adc5b1 --- /dev/null +++ b/packages/opencode/src/pack/index.ts @@ -0,0 +1,3 @@ +// altimate_change start — pack: re-export pack module +export { Pack } from "./pack" +// altimate_change end diff --git a/packages/opencode/src/kit/kit.ts b/packages/opencode/src/pack/pack.ts similarity index 69% rename from packages/opencode/src/kit/kit.ts rename to packages/opencode/src/pack/pack.ts index efd7cd9243..be1162c54b 100644 --- a/packages/opencode/src/kit/kit.ts +++ b/packages/opencode/src/pack/pack.ts @@ -1,4 +1,4 @@ -// altimate_change start — kit: core kit module for bundling skills + MCP + plugins + instructions +// altimate_change start — pack: core pack module for bundling skills + MCP + plugins + instructions import z from "zod" import path from "path" import { mkdir, writeFile, unlink } from "fs/promises" @@ -11,12 +11,12 @@ import { Global } from "@/global" import { Filesystem } from "@/util/filesystem" import { Glob } from "../util/glob" -export namespace Kit { - const log = Log.create({ service: "kit" }) +export namespace Pack { + const log = Log.create({ service: "pack" }) - // Kit YAML schema - this is what goes in KIT.yaml frontmatter or body + // Pack YAML schema - this is what goes in PACK.yaml frontmatter or body export const McpConfig = z.object({ - // Kit uses user-friendly names: "stdio" → mapped to "local", "sse"/"streamable-http" → mapped to "remote" + // Pack uses user-friendly names: "stdio" → mapped to "local", "sse"/"streamable-http" → mapped to "remote" type: z.enum(["stdio", "sse", "streamable-http", "local", "remote"]).default("stdio"), command: z.array(z.string()).optional(), args: z.array(z.string()).optional(), @@ -29,7 +29,7 @@ export namespace Kit { description: z.string().optional(), }) - // altimate_change start — kit: trust tier enum for kit provenance + // altimate_change start — pack: trust tier enum for pack provenance export const Tier = z .string() .transform((v) => v?.toLowerCase()) @@ -38,8 +38,8 @@ export namespace Kit { export type Tier = z.infer // altimate_change end - // altimate_change start — kit: skill pack schema for grouped skill activation - export const SkillPack = z.object({ + // altimate_change start — pack: skill group schema for grouped skill activation + export const SkillGroup = z.object({ description: z.string().optional(), skills: z .array( @@ -64,7 +64,7 @@ export namespace Kit { .transform((v) => v ?? []) .default([]), }) - export type SkillPack = z.infer + export type SkillGroup = z.infer // altimate_change end export const Info = z.object({ @@ -72,25 +72,25 @@ export namespace Kit { description: z.string(), version: z.string().optional().default("1.0.0"), author: z.string().optional(), - location: z.string(), // filesystem path where the kit was loaded from + location: z.string(), // filesystem path where the pack was loaded from - // altimate_change start — kit: trust tier field + // altimate_change start — pack: trust tier field // Trust tier tier: Tier.nullable().optional().transform((v) => v ?? "community").default("community"), // altimate_change end - // altimate_change start — kit: skill packs with activation modes - // Skill packs — organized groups of skills with activation modes + // altimate_change start — pack: skill groups with activation modes + // Skill groups — organized groups of skills with activation modes // When present, takes precedence over flat `skills` array - skill_packs: z - .record(z.string(), SkillPack) + skill_groups: z + .record(z.string(), SkillGroup) .nullable() .optional() .transform((v) => v ?? {}) .default({}), // altimate_change end - // What the kit bundles + // What the pack bundles // Note: YAML parses `key: []` with trailing comments as null, so we accept nullable skills: z .array( @@ -134,13 +134,13 @@ export namespace Kit { .transform((v) => v ?? undefined) .describe("Additional system instructions added to every conversation"), - // Auto-detection: when to suggest this kit + // Auto-detection: when to suggest this pack detect: z .array( z.object({ files: z .array(z.string()) - .describe("Glob patterns that indicate this kit is relevant"), + .describe("Glob patterns that indicate this pack is relevant"), message: z .string() .optional() @@ -159,36 +159,36 @@ export namespace Kit { // --- State management (mirrors Skill.state pattern) --- - const KIT_FILE_PATTERN = "KIT.{yaml,yml,md}" + const PACK_FILE_PATTERN = "PACK.{yaml,yml,md}" const stateInit: () => Promise<{ - kits: Record + packs: Record dirs: string[] }> = async () => { - const kits: Record = {} + const packs: Record = {} const dirs = new Set() const config = await Config.get() - // 1. Scan .opencode/kits/ and .altimate-code/kits/ directories + // 1. Scan .opencode/packs/ and .altimate-code/packs/ directories for (const dir of await Config.directories()) { - const matches = await Glob.scan(`{kit,kits}/**/${KIT_FILE_PATTERN}`, { + const matches = await Glob.scan(`{pack,packs}/**/${PACK_FILE_PATTERN}`, { cwd: dir, absolute: true, dot: true, symlink: true, }) for (const item of matches) { - const kit = await loadKit(item) - if (kit) { - kits[kit.name] = kit + const pack = await loadPack(item) + if (pack) { + packs[pack.name] = pack dirs.add(path.dirname(item)) } } } // 2. Load from config paths - if (config.kits?.paths) { - for (let p of config.kits.paths) { + if (config.packs?.paths) { + for (let p of config.packs.paths) { if (p.startsWith("~/")) p = path.join(Global.Path.home, p.slice(2)) if (!path.isAbsolute(p)) p = path.resolve(Instance.directory, p) @@ -196,48 +196,48 @@ export namespace Kit { if (!stat) continue if (stat.isDirectory()) { - const matches = await Glob.scan(KIT_FILE_PATTERN, { + const matches = await Glob.scan(PACK_FILE_PATTERN, { cwd: p, absolute: true, dot: true, symlink: true, }) for (const item of matches) { - const kit = await loadKit(item) - if (kit) { - kits[kit.name] = kit + const pack = await loadPack(item) + if (pack) { + packs[pack.name] = pack dirs.add(p) } } } else { - const kit = await loadKit(p) - if (kit) { - kits[kit.name] = kit + const pack = await loadPack(p) + if (pack) { + packs[pack.name] = pack dirs.add(path.dirname(p)) } } } } - // 3. Load from installed kits directory - const installedDir = path.join(Global.Path.data, "kits") + // 3. Load from installed packs directory + const installedDir = path.join(Global.Path.data, "packs") if (await Filesystem.exists(installedDir)) { - const matches = await Glob.scan(KIT_FILE_PATTERN, { + const matches = await Glob.scan(PACK_FILE_PATTERN, { cwd: installedDir, absolute: true, dot: true, symlink: true, }) for (const item of matches) { - const kit = await loadKit(item) - if (kit) { - kits[kit.name] = kit + const pack = await loadPack(item) + if (pack) { + packs[pack.name] = pack dirs.add(installedDir) } } } - return { kits, dirs: Array.from(dirs) } + return { packs, dirs: Array.from(dirs) } } export const state = Instance.state(stateInit) @@ -248,7 +248,7 @@ export namespace Kit { // --- Loading --- - async function loadKit(filePath: string): Promise { + async function loadPack(filePath: string): Promise { try { const raw = await Filesystem.readText(filePath) if (!raw) return undefined @@ -277,7 +277,7 @@ export namespace Kit { }) if (!result.success) { - log.warn("invalid kit", { + log.warn("invalid pack", { path: filePath, issues: result.error.issues, }) @@ -286,13 +286,13 @@ export namespace Kit { // Validate name to prevent path traversal if (result.data.name && !/^[a-z][a-z0-9]*(-[a-z0-9]+)*$/.test(result.data.name)) { - log.warn("invalid kit name", { path: filePath, name: result.data.name }) + log.warn("invalid pack name", { path: filePath, name: result.data.name }) return undefined } return result.data } catch (err) { - log.error("failed to load kit", { path: filePath, err }) + log.error("failed to load pack", { path: filePath, err }) return undefined } } @@ -300,11 +300,11 @@ export namespace Kit { // --- Public API --- export async function get(name: string): Promise { - return state().then((s) => s.kits[name]) + return state().then((s) => s.packs[name]) } export async function all(): Promise { - return state().then((s) => Object.values(s.kits)) + return state().then((s) => Object.values(s.packs)) } export async function dirs(): Promise { @@ -313,18 +313,18 @@ export namespace Kit { // --- Detection --- - /** Check which installed kits match the current project */ + /** Check which installed packs match the current project */ export async function detect(): Promise< - Array<{ kit: Info; matched: string[] }> + Array<{ pack: Info; matched: string[] }> > { - const kits = await all() - const results: Array<{ kit: Info; matched: string[] }> = [] + const packs = await all() + const results: Array<{ pack: Info; matched: string[] }> = [] - for (const kit of kits) { - if (!kit.detect || kit.detect.length === 0) continue + for (const pack of packs) { + if (!pack.detect || pack.detect.length === 0) continue const matchedFiles: string[] = [] - for (const rule of kit.detect) { + for (const rule of pack.detect) { for (const pattern of rule.files) { const matches = await Glob.scan(pattern, { cwd: Instance.directory, @@ -339,34 +339,34 @@ export namespace Kit { } if (matchedFiles.length > 0) { - results.push({ kit, matched: [...new Set(matchedFiles)] }) + results.push({ pack, matched: [...new Set(matchedFiles)] }) } } return results } - // altimate_change start — kit: active kit management and context scoping - /** Get active kits for the current project (reads .opencode/active-kits) */ + // altimate_change start — pack: active pack management and context scoping + /** Get active packs for the current project (reads .opencode/active-packs) */ export async function active(): Promise { - const activeFile = await findActiveKitsFile() + const activeFile = await findActivePacksFile() if (!activeFile) return [] try { const raw = await Filesystem.readText(activeFile) if (!raw) return [] const names = raw.split("\n").map((l) => l.trim()).filter(Boolean) - const all = await state().then((s) => s.kits) + const all = await state().then((s) => s.packs) return names.map((n) => all[n]).filter((r): r is Info => !!r) } catch { return [] } } - /** Activate a kit for the current project */ + /** Activate a pack for the current project */ export async function activate(name: string): Promise { const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory - const activeFile = path.join(rootDir, ".opencode", "active-kits") + const activeFile = path.join(rootDir, ".opencode", "active-packs") let names: string[] = [] try { @@ -382,10 +382,10 @@ export namespace Kit { await writeFile(activeFile, names.join("\n") + "\n", "utf-8") } - /** Deactivate a kit for the current project */ + /** Deactivate a pack for the current project */ export async function deactivate(name: string): Promise { const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory - const activeFile = path.join(rootDir, ".opencode", "active-kits") + const activeFile = path.join(rootDir, ".opencode", "active-packs") let names: string[] = [] try { @@ -402,11 +402,11 @@ export namespace Kit { } } - async function findActiveKitsFile(): Promise { + async function findActivePacksFile(): Promise { const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory const candidates = [ - path.join(rootDir, ".opencode", "active-kits"), - path.join(rootDir, ".altimate-code", "active-kits"), + path.join(rootDir, ".opencode", "active-packs"), + path.join(rootDir, ".altimate-code", "active-packs"), ] for (const f of candidates) { if (await Filesystem.exists(f)) return f @@ -414,14 +414,14 @@ export namespace Kit { return undefined } - /** Get all skills referenced by a kit's skill_packs */ - export function allSkillsFromPacks(kit: Info): Array { - if (!kit.skill_packs || Object.keys(kit.skill_packs).length === 0) { - return kit.skills + /** Get all skills referenced by a pack's skill_groups */ + export function allSkillsFromGroups(pack: Info): Array { + if (!pack.skill_groups || Object.keys(pack.skill_groups).length === 0) { + return pack.skills } const result: Array = [] - for (const [, pack] of Object.entries(kit.skill_packs)) { - result.push(...pack.skills) + for (const [, group] of Object.entries(pack.skill_groups)) { + result.push(...group.skills) } return result } From 028c1d52cef75450f6145b4c361045dc79e57243 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Fri, 17 Apr 2026 12:55:14 +0530 Subject: [PATCH 4/7] =?UTF-8?q?fix:=20restore=20`Drizzle=20Kit`=20tool=20n?= =?UTF-8?q?ame=20in=20AGENTS.md=20(Kit=E2=86=92Pack=20rename=20overshoot)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bulk Kit→Pack rename in the previous commit incorrectly renamed the Drizzle Kit tool reference in `packages/opencode/AGENTS.md` because the regex did not exclude it. "Drizzle Kit" is the product name of Drizzle ORM's migration CLI (drizzle-kit on npm) and must not be renamed. Marker Guard surfaced this as an unmarked change to an upstream-shared file. Co-Authored-By: Claude Opus 4.7 (1M context) --- packages/opencode/AGENTS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/opencode/AGENTS.md b/packages/opencode/AGENTS.md index 21a8951acc..930297baa9 100644 --- a/packages/opencode/AGENTS.md +++ b/packages/opencode/AGENTS.md @@ -4,7 +4,7 @@ - **Schema**: Drizzle schema lives in `src/**/*.sql.ts`. - **Naming**: tables and columns use snake*case; join columns are `_id`; indexes are `
*\_idx`. -- **Migrations**: generated by Drizzle Pack using `drizzle.config.ts` (schema: `./src/**/*.sql.ts`, output: `./migration`). +- **Migrations**: generated by Drizzle Kit using `drizzle.config.ts` (schema: `./src/**/*.sql.ts`, output: `./migration`). - **Command**: `bun run db generate --name `. - **Output**: creates `migration/_/migration.sql` and `snapshot.json`. - **Tests**: migration tests should read the per-folder layout (no `_journal.json`). From 5b347b7dbd819548e4b76605c974e2809f2a0905 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Fri, 17 Apr 2026 12:55:55 +0530 Subject: [PATCH 5/7] feat: pack plugins, manifest integrity, tier allowlist, registry cache MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes the remaining release gaps for the Pack extension system so it can ship to vendors (dlt-hub, Airbyte, DuckDB first-wave partners) with a credible trust story, offline-safe discovery, and a path for plugin-based extensibility — hooks, custom auth, and custom tools — through one bundle. ## Plugin bundling `PACK.yaml` already declared a `plugins: [npm-spec, ...]` field but only half the lifecycle was wired. This commit completes it: - `pack activate` preview now lists plugins alongside skills + MCP and warns that plugins run with full Node privileges. - `pack deactivate` removes plugin entries from `config.plugin[]` with **reference counting** — a plugin stays if another active pack still lists it, so co-activated packs can share dependencies safely. ## Manifest + integrity - New `Pack.Manifest` schema written to `manifest.json` alongside `PACK.yaml` at install time (`pack install` writes it; local `pack create` packs have no manifest and load as user-authored). - `Pack.computeContentHash(raw)` — SHA256 of content with CRLF→LF normalization for cross-platform stability. - `loadPack()` re-hashes on every load; mismatch surfaces as `pack.trust.tamper_detected = true` and produces an `INTEGRITY WARNING` banner during `pack activate`. - Not cryptographic signing — catches accidental corruption and naive tampering. Real PKI is explicitly out of scope (documented). ## Trust tier enforcement - Hardcoded `BUILTIN_ALLOWLIST` + `VERIFIED_ALLOWLIST` sets in `Pack`. - Packs claiming `built-in` / `verified` that are not on the allowlist are downgraded to `community` at load time and flagged via `pack.trust.tier_downgraded` + `trust.original_tier`. - Env-var overrides for local dev / internal distribution: `ALTIMATE_CODE_VERIFIED_PACKS` and `ALTIMATE_CODE_BUILTIN_PACKS`. - Activation preview shows `TIER DOWNGRADE` notice when it happens. ## Remote registry backend - `packs.registry` added to Config schema (new `Packs` zod type). Overridable via `ALTIMATE_CODE_PACK_REGISTRY` env var. - `pack search` now caches results at `~/.cache/altimate-code/pack-registry-cache.json` with 24h TTL. - `--refresh` flag bypasses cache. - Offline fallback: on fetch failure, falls back to stale cache if present and clearly labels results `(stale N hr ago — offline)`. ## Telemetry additions Three new event types for operators monitoring the installed base: - `pack_deactivated` — symmetric lifecycle event with clean-up counts (MCP entries, plugins, instructions) for measuring partial cleanups. - `pack_integrity_warning` — fired when `tamper_detected` or `tier_downgraded` triggers during activation. Includes `claimed_tier`. - `pack_applied` extended with `tier`, `tamper_detected`, `tier_downgraded` so we can chart trust-signal distribution. ## Tests 20 new bun tests in `test/pack/pack.test.ts` covering: schema discovery, slug-validator rejection, detect rules, activate/deactivate lifecycle, idempotency, multi-pack coexistence, hash determinism + CRLF/LF normalization, manifest roundtrip, tamper detection, tier downgrade, env-var allowlist override, and `allSkillsFromGroups` flattening. 224/224 pass across pack + config + plugin suites. Co-Authored-By: Claude Opus 4.7 (1M context) --- docs/docs/configure/packs.md | 35 +- docs/docs/develop/packs.md | 60 +++ .../opencode/src/altimate/telemetry/index.ts | 27 + packages/opencode/src/cli/cmd/pack.ts | 279 ++++++++-- packages/opencode/src/config/config.ts | 18 +- packages/opencode/src/pack/pack.ts | 135 +++++ packages/opencode/test/pack/pack.test.ts | 478 ++++++++++++++++++ 7 files changed, 998 insertions(+), 34 deletions(-) create mode 100644 packages/opencode/test/pack/pack.test.ts diff --git a/docs/docs/configure/packs.md b/docs/docs/configure/packs.md index b47682ff56..b6ea8b243a 100644 --- a/docs/docs/configure/packs.md +++ b/docs/docs/configure/packs.md @@ -126,6 +126,38 @@ Packs are discovered from: 3. **Config paths**: `packs.paths` in your config file 4. **Installed**: `~/.local/share/altimate-code/packs/` +## Remote Registry + +`pack search` hits a remote registry index (JSON) so you can browse packs beyond the ones installed locally. The default registry is [AltimateAI/data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) but you can point at your own — useful for internal pack catalogs. + +```jsonc +// opencode.json +{ + "packs": { + "registry": "https://packs.internal.acme.com/registry.json" + } +} +``` + +Or via env var for ad-hoc override: `ALTIMATE_CODE_PACK_REGISTRY=https://...`. + +**Caching:** results are cached for 24 hours at `~/.cache/altimate-code/pack-registry-cache.json`. Pass `--refresh` to bypass the cache. If the registry is unreachable, `pack search` falls back to the cache (if any) and flags results as stale. + +## Trust & Integrity + +When you `pack install`, altimate-code writes a `manifest.json` with a SHA256 hash of the pack content. On every load, the hash is re-verified — mismatches surface as an `INTEGRITY WARNING` at activate time. + +Tier claims (`built-in`, `verified`) in `PACK.yaml` are cross-checked against an allowlist. Claims that aren't approved are silently **downgraded to `community`** and the CLI shows a `TIER DOWNGRADE` notice. This means: **installing a pack that claims "verified" does not actually grant it verified status** — only the maintained allowlist does. + +For local development or internal distribution, set allowlist overrides: + +```bash +export ALTIMATE_CODE_VERIFIED_PACKS=my-pack,other-pack +export ALTIMATE_CODE_BUILTIN_PACKS=internal-pack +``` + +Full plumbing (signing, PKI) is out of scope for this release — the current mechanism catches accidental corruption and naive tampering, not a determined attacker with write access. + ## CLI Reference | Command | Description | @@ -141,7 +173,8 @@ Packs are discovered from: | `pack deactivate ` | Remove from active packs, clean up | | `pack remove ` | Delete an installed pack | | `pack detect` | Find packs matching current project | -| `pack search [query]` | Search the pack registry | +| `pack search [query]` | Search the pack registry (24h cached) | +| `pack search --refresh` | Bypass the cache and re-fetch the registry | | `pack status` | Show active packs | | `pack validate [name]` | Validate pack format and references | diff --git a/docs/docs/develop/packs.md b/docs/docs/develop/packs.md index e0dc317882..993be1c040 100644 --- a/docs/docs/develop/packs.md +++ b/docs/docs/develop/packs.md @@ -233,6 +233,34 @@ mcp: - `env`: Default values passed to the MCP server process - `env_keys`: Names of variables the user must set. Pack activation warns if these are missing. Use this for API keys and secrets that shouldn't have defaults. +### Plugins + +Packs can list npm plugin packages to extend altimate-code with custom hooks, auth flows, tools, and providers. On `pack activate`, the plugin specs are appended to your project's `plugin[]` config; on `pack deactivate` they're removed unless another active pack still lists them (reference-counted). + +```yaml +plugins: + - "@dagster/altimate-plugin@^1.0" + - "@atlan/governance-plugin@latest" + - "file:///Users/me/local-plugin" +``` + +**Format:** Each entry is an npm package spec (`name`, `name@version`, or `file://path`). At load time, altimate-code installs the package via Bun and loads its exported `Plugin` function, which can register hooks for: + +| Hook | Purpose | +|------|---------| +| `auth` | Custom OAuth / API-key flows for vendor providers | +| `tool` | Ship custom tools usable by the AI | +| `tool.execute.before` / `.after` | Cost guards, audit logging, write gating | +| `permission.ask` | Custom permission prompts | +| `chat.params` / `chat.headers` | Modify outgoing model requests | +| `command.execute.before` | Intercept shell commands | +| `shell.env` | Inject env vars for tools | + +See the [`@opencode-ai/plugin`](https://www.npmjs.com/package/@opencode-ai/plugin) package for the full hook surface. + +!!! warning + Plugins run with full Node.js privileges. Only activate packs from trusted sources — `pack activate` always warns when it is about to install plugin packages. + ### Detection Rules Auto-suggest the pack when certain files exist in the project: @@ -273,6 +301,38 @@ instructions: | - Include "DO NOT" rules for common mistakes - Avoid duplicating what skills already teach +## Trust & Integrity + +Every installed pack gets a `manifest.json` written next to `PACK.yaml` containing a SHA256 hash of the pack's content. On every load, altimate-code re-hashes the pack and compares against the manifest: + +- **Hash match:** pack is trusted as-installed. +- **Hash mismatch:** the runtime logs a warning and `pack activate` prints an **INTEGRITY WARNING** prompting the user. This protects against accidental corruption and naive tampering — it is **not** a substitute for code signing. + +### Tier enforcement + +Trust tiers (`built-in`, `verified`, `community`, `archived`) are enforced at load time against hardcoded allowlists: + +- Packs claiming `built-in` or `verified` that are **not** in the allowlist are automatically **downgraded to `community`** and logged. +- The CLI prints a **TIER DOWNGRADE** notice during `pack activate`. +- For local development, you can inject entries via env vars: `ALTIMATE_CODE_VERIFIED_PACKS=my-pack,other-pack` and `ALTIMATE_CODE_BUILTIN_PACKS=...`. + +This means: **claiming a tier in `PACK.yaml` does not grant that tier.** The allowlist is the root of trust. To become verified, partners submit PRs to the registry review process. + +### Telemetry events + +The pack system emits these events for operators monitoring the installed base: + +| Event | Fields | Emitted when | +|-------|--------|-------------| +| `pack_created` | `pack_name` | `pack create` scaffolds a new pack | +| `pack_installed` | `install_source`, `pack_count`, `pack_names` | `pack install` completes | +| `pack_applied` | `pack_name`, `skill_count`, `mcp_count`, `plugin_count`, `has_instructions`, `tier`, `tamper_detected`, `tier_downgraded` | `pack activate` completes | +| `pack_deactivated` | `pack_name`, `mcp_cleaned`, `plugins_cleaned`, `instructions_cleaned` | `pack deactivate` completes | +| `pack_removed` | `pack_name` | `pack remove` completes | +| `pack_integrity_warning` | `pack_name`, `warning` (`tamper_detected`\|`tier_downgraded`), `claimed_tier` | Loaded pack fails integrity or tier check | + +Vendors authoring plugins can emit their own telemetry by calling `Telemetry.track(...)` from plugin hooks — attach `pack_name` in a custom event type to correlate with the lifecycle events above. + ## Publishing to the Registry The pack registry is hosted at [AltimateAI/data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills). diff --git a/packages/opencode/src/altimate/telemetry/index.ts b/packages/opencode/src/altimate/telemetry/index.ts index 59aca3f87c..54b0b81fc7 100644 --- a/packages/opencode/src/altimate/telemetry/index.ts +++ b/packages/opencode/src/altimate/telemetry/index.ts @@ -467,6 +467,13 @@ export namespace Telemetry { plugin_count: number has_instructions: boolean source: "cli" | "tui" + // altimate_change start — pack: tier + trust signals so we can monitor + // adoption distribution (built-in vs verified vs community) and catch + // spikes in integrity/tier-downgrade warnings from the installed base. + tier?: "built-in" | "verified" | "community" | "archived" + tamper_detected?: boolean + tier_downgraded?: boolean + // altimate_change end } | { type: "pack_removed" @@ -475,6 +482,26 @@ export namespace Telemetry { pack_name: string source: "cli" | "tui" } + // altimate_change start — pack: deactivation + integrity lifecycle events + | { + type: "pack_deactivated" + timestamp: number + session_id: string + pack_name: string + mcp_cleaned: number + plugins_cleaned: number + instructions_cleaned: boolean + source: "cli" | "tui" + } + | { + type: "pack_integrity_warning" + timestamp: number + session_id: string + pack_name: string + warning: "tamper_detected" | "tier_downgraded" + claimed_tier?: string + } + // altimate_change end // altimate_change end // altimate_change start — plan refinement telemetry event | { diff --git a/packages/opencode/src/cli/cmd/pack.ts b/packages/opencode/src/cli/cmd/pack.ts index 3f2e686226..9d963d8a55 100644 --- a/packages/opencode/src/cli/cmd/pack.ts +++ b/packages/opencode/src/cli/cmd/pack.ts @@ -8,6 +8,7 @@ import { bootstrap } from "../bootstrap" import { cmd } from "./cmd" import { Instance } from "../../project/instance" import { Global } from "@/global" +import { Config } from "@/config/config" import { Telemetry } from "@/altimate/telemetry" // altimate_change start — pack: jsonc-parser for comment-preserving config writes import { modify, applyEdits } from "jsonc-parser" @@ -568,6 +569,30 @@ const PackInstallCommand = cmd({ await fs.cp(src, dst, { recursive: true, dereference: false }) } } + // altimate_change start — pack: write manifest.json at install time for later integrity checks + try { + const destPackFile = path.join(dest, path.basename(packFile)) + const matterMod = (await import("gray-matter")).default + const destRaw = await fs.readFile(destPackFile, "utf-8") + const destExt = path.extname(destPackFile).toLowerCase() + const destParsed = + destExt === ".md" ? matterMod(destRaw) : matterMod("---\n" + destRaw + "\n---") + await Pack.writeManifest( + dest, + destPackFile, + { + name: (destParsed.data.name as string) || packName, + version: (destParsed.data.version as string) || "1.0.0", + tier: (destParsed.data.tier as string) || "community", + }, + source, + ) + } catch (err) { + // Manifest is best-effort; pack still works without one (treated as user-authored). + process.stdout.write(` ⚠ Could not write manifest for "${packName}" — ${(err as Error).message}` + EOL) + } + // altimate_change end + process.stdout.write(` ✓ Installed "${packName}" → ${path.relative(rootDir, dest)}` + EOL) installedNames.push(packName) installed++ @@ -758,6 +783,22 @@ const PackActivateCommand = cmd({ process.stdout.write(EOL) process.stdout.write(`Pack: ${pack.name}${tierBadge} (v${pack.version || "0.0.0"})` + EOL) process.stdout.write(`${pack.description || ""}` + EOL) + + // altimate_change start — pack: surface trust/integrity issues up-front so users can abort + const trust = pack.trust + if (trust?.tamper_detected) { + process.stdout.write(EOL) + process.stdout.write(` ⚠ INTEGRITY WARNING: PACK.yaml hash does not match install manifest.` + EOL) + process.stdout.write(` This pack was modified after install. Only proceed if you edited it yourself.` + EOL) + } + if (trust?.tier_downgraded && trust.original_tier) { + process.stdout.write(EOL) + process.stdout.write( + ` ⚠ TIER DOWNGRADE: pack claims "${trust.original_tier}" but is not in the allowlist — treating as "community".` + EOL, + ) + } + // altimate_change end + process.stdout.write(EOL + "The following changes will be applied:" + EOL + EOL) if (skillCount > 0) { @@ -782,6 +823,17 @@ const PackActivateCommand = cmd({ process.stdout.write(EOL) } + // altimate_change start — pack: plugin preview so users see what npm packages will be installed + if (pluginCount > 0) { + process.stdout.write(` Plugins (${pluginCount}):` + EOL) + for (const plugin of pack.plugins!) { + process.stdout.write(` + ${plugin}` + EOL) + } + process.stdout.write(` ⚠ Plugins are npm packages loaded with full Node.js privileges — only activate packs from trusted sources.` + EOL) + process.stdout.write(EOL) + } + // altimate_change end + if (hasInstructions) { process.stdout.write(` Instructions:` + EOL) process.stdout.write(` + .opencode/instructions/pack-${name}.md` + EOL) @@ -990,7 +1042,31 @@ const PackActivateCommand = cmd({ plugin_count: pluginCount, has_instructions: hasInstructions, source: "cli", + tier: pack.tier || "community", + tamper_detected: pack.trust?.tamper_detected || false, + tier_downgraded: pack.trust?.tier_downgraded || false, }) + // altimate_change start — pack: emit integrity warning events so we can monitor the installed base + if (pack.trust?.tamper_detected) { + Telemetry.track({ + type: "pack_integrity_warning", + timestamp: Date.now(), + session_id: Telemetry.getContext().sessionId || "", + pack_name: name, + warning: "tamper_detected", + }) + } + if (pack.trust?.tier_downgraded) { + Telemetry.track({ + type: "pack_integrity_warning", + timestamp: Date.now(), + session_id: Telemetry.getContext().sessionId || "", + pack_name: name, + warning: "tier_downgraded", + claimed_tier: pack.trust.original_tier, + }) + } + // altimate_change end } catch {} }) }, @@ -1020,36 +1096,148 @@ const PackDeactivateCommand = cmd({ // altimate_change start — pack: clean up instruction file on deactivate const instructionsFile = path.join(rootDir, ".opencode", "instructions", `pack-${name}.md`) + let instructionsCleaned = false try { await fs.access(instructionsFile) await fs.rm(instructionsFile, { force: true }) + instructionsCleaned = true process.stdout.write(` ✓ Removed instructions: ${path.relative(rootDir, instructionsFile)}` + EOL) } catch {} // altimate_change end // altimate_change start — pack: clean up MCP config entries added by this pack (JSONC-preserving) + let mcpCleaned = 0 if (pack?.mcp && Object.keys(pack.mcp).length > 0) { try { const { filePath } = await findConfigFile(rootDir) - let removed = 0 for (const serverName of Object.keys(pack.mcp)) { if (await removeConfigField(filePath, ["mcp", serverName])) { - removed++ + mcpCleaned++ } } - if (removed > 0) { - process.stdout.write(` ✓ Removed ${removed} MCP server(s) from config` + EOL) + if (mcpCleaned > 0) { + process.stdout.write(` ✓ Removed ${mcpCleaned} MCP server(s) from config` + EOL) + } + } catch {} + } + // altimate_change end + + // altimate_change start — pack: clean up plugin entries that no other active pack still needs + let pluginsCleaned = 0 + if (pack?.plugins && pack.plugins.length > 0) { + try { + Pack.invalidate() + const remaining = await Pack.active() + const stillNeeded = new Set() + for (const other of remaining) { + if (other.name === name) continue + for (const p of other.plugins || []) stillNeeded.add(p) + } + const toRemove = pack.plugins.filter((p) => !stillNeeded.has(p)) + if (toRemove.length > 0) { + const { filePath, config } = await findConfigFile(rootDir) + const current = (config.plugin ?? []) as string[] + const next = current.filter((p) => !toRemove.includes(p)) + if (next.length !== current.length) { + await writeConfigField(filePath, ["plugin"], next) + pluginsCleaned = toRemove.length + process.stdout.write(` ✓ Removed ${pluginsCleaned} plugin(s) from config` + EOL) + } } } catch {} } // altimate_change end + + // altimate_change start — pack: telemetry for deactivation lifecycle + try { + Telemetry.track({ + type: "pack_deactivated", + timestamp: Date.now(), + session_id: Telemetry.getContext().sessionId || "", + pack_name: name, + mcp_cleaned: mcpCleaned, + plugins_cleaned: pluginsCleaned, + instructions_cleaned: instructionsCleaned, + source: "cli", + }) + } catch {} + // altimate_change end }) }, }) // altimate_change end -// altimate_change start — pack: search subcommand -const REGISTRY_URL = "https://raw.githubusercontent.com/AltimateAI/data-engineering-skills/main/registry.json" +// altimate_change start — pack: search subcommand with config-driven URL, 24h cache, and offline fallback +const DEFAULT_REGISTRY_URL = "https://raw.githubusercontent.com/AltimateAI/data-engineering-skills/main/registry.json" +const REGISTRY_CACHE_TTL_MS = 24 * 60 * 60 * 1000 +const REGISTRY_FETCH_TIMEOUT_MS = 5000 + +async function resolveRegistryUrl(): Promise { + const envUrl = process.env.ALTIMATE_CODE_PACK_REGISTRY + if (envUrl && envUrl.trim()) return envUrl.trim() + try { + const config = await Config.get() + const fromConfig = (config.packs as { registry?: string } | undefined)?.registry + if (fromConfig && fromConfig.trim()) return fromConfig.trim() + } catch { + // fall through to default — config read failures should not block search + } + return DEFAULT_REGISTRY_URL +} + +function registryCachePath(): string { + return path.join(Global.Path.cache, "pack-registry-cache.json") +} + +async function readRegistryCache( + url: string, +): Promise<{ data: unknown; fetchedAtMs: number; stale: boolean } | undefined> { + try { + const raw = await fs.readFile(registryCachePath(), "utf-8") + const parsed = JSON.parse(raw) as { url?: string; fetched_at?: string; data?: unknown } + if (parsed.url !== url || !parsed.fetched_at) return undefined + const fetchedAtMs = new Date(parsed.fetched_at).getTime() + if (!Number.isFinite(fetchedAtMs)) return undefined + return { data: parsed.data, fetchedAtMs, stale: Date.now() - fetchedAtMs > REGISTRY_CACHE_TTL_MS } + } catch { + return undefined + } +} + +async function writeRegistryCache(url: string, data: unknown): Promise { + try { + const cachePath = registryCachePath() + await fs.mkdir(path.dirname(cachePath), { recursive: true }) + const payload = { url, fetched_at: new Date().toISOString(), data } + await fs.writeFile(cachePath, JSON.stringify(payload, null, 2) + EOL, "utf-8") + } catch { + // Cache write is best-effort — don't surface errors to the user. + } +} + +function formatAge(ms: number): string { + if (ms < 60_000) return "<1 min ago" + const minutes = Math.floor(ms / 60_000) + if (minutes < 60) return `${minutes} min ago` + const hours = Math.floor(minutes / 60) + if (hours < 24) return `${hours} hr ago` + const days = Math.floor(hours / 24) + return `${days} day${days === 1 ? "" : "s"} ago` +} + +async function fetchRegistry( + url: string, + signal: AbortSignal, +): Promise<{ ok: true; data: unknown } | { ok: false; status?: number; error?: string }> { + try { + const response = await fetch(url, { signal }) + if (!response.ok) return { ok: false, status: response.status } + const data = await response.json() + return { ok: true, data } + } catch (err) { + return { ok: false, error: (err as Error).message } + } +} const PackSearchCommand = cmd({ command: "search [query]", @@ -1064,40 +1252,67 @@ const PackSearchCommand = cmd({ type: "boolean", describe: "output as JSON", default: false, + }) + .option("refresh", { + type: "boolean", + describe: "bypass the 24h cache and re-fetch the registry", + default: false, }), async handler(args) { const query = ((args.query as string) || "").toLowerCase().trim() await bootstrap(process.cwd(), async () => { - process.stdout.write(`Searching pack registry...` + EOL) + const registryUrl = await resolveRegistryUrl() + const useCache = !(args.refresh as boolean) - // altimate_change start — pack: graceful 404 + timeout for registry fetch let registry: any - const controller = new AbortController() - const timeout = setTimeout(() => controller.abort(), 5000) - try { - const response = await fetch(REGISTRY_URL, { signal: controller.signal }) - clearTimeout(timeout) - if (!response.ok) { - if (response.status === 404) { - process.stdout.write(`Pack registry not available yet.` + EOL) - process.stdout.write(EOL + `Browse local packs: altimate-code pack list` + EOL) - process.stdout.write(`Create your own: altimate-code pack create ` + EOL) - return - } - process.stderr.write(`Error: Failed to fetch registry (${response.status})` + EOL) - process.exit(1) + let sourceLabel = "" + + // 1. Try fresh cache first (skipped if --refresh) + if (useCache) { + const cached = await readRegistryCache(registryUrl) + if (cached && !cached.stale) { + registry = cached.data + sourceLabel = `(cached ${formatAge(Date.now() - cached.fetchedAtMs)})` } - registry = await response.json() - } catch (err) { - clearTimeout(timeout) - if ((err as Error).name === "AbortError") { - process.stdout.write(`Pack registry unavailable (timeout).` + EOL) + } + + // 2. Fetch if no fresh cache + if (!registry) { + process.stdout.write(`Searching pack registry...` + EOL) + const controller = new AbortController() + const timer = setTimeout(() => controller.abort(), REGISTRY_FETCH_TIMEOUT_MS) + const result = await fetchRegistry(registryUrl, controller.signal) + clearTimeout(timer) + + if (result.ok) { + registry = result.data + sourceLabel = "" + await writeRegistryCache(registryUrl, registry) } else { - process.stderr.write(`Error: Failed to fetch registry: ${(err as Error).message}` + EOL) + // 3. Offline fallback: stale cache + const stale = await readRegistryCache(registryUrl) + if (stale) { + registry = stale.data + sourceLabel = `(stale ${formatAge(Date.now() - stale.fetchedAtMs)} — offline)` + process.stdout.write(` ⚠ Registry unreachable; showing cached results.` + EOL) + } else { + if (result.status === 404) { + process.stdout.write(`Pack registry not available yet.` + EOL) + process.stdout.write(EOL + `Browse local packs: altimate-code pack list` + EOL) + process.stdout.write(`Create your own: altimate-code pack create ` + EOL) + return + } + const reason = result.status + ? `HTTP ${result.status}` + : result.error === "The operation was aborted." || result.error?.includes("abort") + ? "timeout" + : result.error || "unknown error" + process.stderr.write(`Error: Failed to fetch registry (${reason})` + EOL) + process.stdout.write(EOL + `Browse local packs: altimate-code pack list` + EOL) + process.exit(1) + } } - process.stdout.write(EOL + `Browse local packs: altimate-code pack list` + EOL) - process.exit(1) } // altimate_change end @@ -1157,7 +1372,9 @@ const PackSearchCommand = cmd({ } process.stdout.write(EOL) - process.stdout.write(`${results.length} pack(s) found in registry.` + EOL) + process.stdout.write( + `${results.length} pack(s) found in registry${sourceLabel ? ` ${sourceLabel}` : ""}.` + EOL, + ) process.stdout.write(`Install with: altimate-code pack install ` + EOL) }) }, diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index e4d750fed5..a2e28827c7 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -733,6 +733,20 @@ export namespace Config { }) export type Skills = z.infer + // altimate_change start — pack: config shape for pack paths + optional remote registry + export const Packs = z.object({ + paths: z.array(z.string()).optional().describe("Additional paths to pack folders"), + urls: z.array(z.string()).optional().describe("URLs to fetch packs from"), + registry: z + .string() + .optional() + .describe( + "Remote pack registry URL (JSON index of available packs). Overridable via ALTIMATE_CODE_PACK_REGISTRY env var.", + ), + }) + export type Packs = z.infer + // altimate_change end + export const Agent = z .object({ model: ModelId.optional(), @@ -1076,8 +1090,8 @@ export namespace Config { .optional() .describe("Command configuration, see https://altimate.ai/docs/commands"), skills: Skills.optional().describe("Additional skill folder paths"), - // altimate_change start — pack: config schema for pack paths and URLs - packs: Skills.optional().describe("Additional pack folder paths and URLs (same shape as skills config)"), + // altimate_change start — pack: config schema for pack paths, URLs, and registry + packs: Packs.optional().describe("Pack folder paths, remote URLs, and registry URL"), // altimate_change end watcher: z .object({ diff --git a/packages/opencode/src/pack/pack.ts b/packages/opencode/src/pack/pack.ts index be1162c54b..5e2c34661f 100644 --- a/packages/opencode/src/pack/pack.ts +++ b/packages/opencode/src/pack/pack.ts @@ -2,6 +2,7 @@ import z from "zod" import path from "path" import { mkdir, writeFile, unlink } from "fs/promises" +import { createHash } from "crypto" import matter from "gray-matter" import { Config } from "../config/config" import { Instance } from "../project/instance" @@ -154,9 +155,91 @@ export namespace Pack { // The full markdown content (instructions, docs, etc.) content: z.string().nullable().optional().transform((v) => v ?? "").default(""), + + // altimate_change start — pack: runtime-computed trust metadata (set by loadPack) + // Present when this pack was verified against its manifest; absent for + // ad-hoc/scaffolded packs without an install manifest. + trust: z + .object({ + tamper_detected: z.boolean().default(false), + tier_downgraded: z.boolean().default(false), + original_tier: Tier.optional(), + manifest_present: z.boolean().default(false), + }) + .optional(), + // altimate_change end }) export type Info = z.infer + // altimate_change start — pack: manifest schema for install-time integrity verification + // Written alongside PACK.yaml when a pack is installed via `pack install`. + // Not cryptographically signed — defeats accidental corruption + naive + // tampering but is NOT a substitute for real code signing (see: trust tiers). + export const Manifest = z.object({ + name: z.string(), + version: z.string(), + tier: Tier, + content_hash: z.string(), + source: z.string().optional(), + installed_at: z.string(), + }) + export type Manifest = z.infer + + // Hardcoded allowlists — the root of trust for tier claims. + // Populated as verified partner packs are reviewed; empty for now. + const BUILTIN_ALLOWLIST: ReadonlySet = new Set([]) + const VERIFIED_ALLOWLIST: ReadonlySet = new Set([]) + + function envAllowlist(envVar: string): Set { + const raw = process.env[envVar] || "" + return new Set( + raw + .split(",") + .map((s) => s.trim()) + .filter(Boolean), + ) + } + + /** Canonical SHA256 of a pack file's raw content (line endings normalized). */ + export function computeContentHash(raw: string): string { + const normalized = raw.replace(/\r\n/g, "\n").replace(/\r/g, "\n") + return createHash("sha256").update(normalized).digest("hex") + } + + /** Write manifest.json next to a PACK.yaml so later loads can verify integrity. */ + export async function writeManifest( + packDir: string, + packFilePath: string, + info: { name: string; version?: string; tier?: Tier | string | null }, + source?: string, + ): Promise { + const raw = await Filesystem.readText(packFilePath) + if (!raw) return + const tierResult = Tier.safeParse(info.tier ?? "community") + const manifest: Manifest = { + name: info.name, + version: info.version || "1.0.0", + tier: tierResult.success ? tierResult.data : "community", + content_hash: computeContentHash(raw), + source, + installed_at: new Date().toISOString(), + } + await writeFile(path.join(packDir, "manifest.json"), JSON.stringify(manifest, null, 2) + "\n", "utf-8") + } + + async function readManifest(packDir: string): Promise { + try { + const manifestPath = path.join(packDir, "manifest.json") + const raw = await Filesystem.readText(manifestPath) + if (!raw) return undefined + const parsed = Manifest.safeParse(JSON.parse(raw)) + return parsed.success ? parsed.data : undefined + } catch { + return undefined + } + } + // altimate_change end + // --- State management (mirrors Skill.state pattern) --- const PACK_FILE_PATTERN = "PACK.{yaml,yml,md}" @@ -290,6 +373,58 @@ export namespace Pack { return undefined } + // altimate_change start — pack: manifest integrity check + tier allowlist enforcement + const packDir = path.dirname(filePath) + const manifest = await readManifest(packDir) + let tamperDetected = false + if (manifest) { + const actualHash = computeContentHash(raw) + if (actualHash !== manifest.content_hash) { + tamperDetected = true + log.warn("pack manifest hash mismatch — content modified after install", { + pack: result.data.name, + expected: manifest.content_hash, + actual: actualHash, + }) + } + } + + const claimedTier = result.data.tier || "community" + const envVerified = envAllowlist("ALTIMATE_CODE_VERIFIED_PACKS") + const envBuiltin = envAllowlist("ALTIMATE_CODE_BUILTIN_PACKS") + let downgraded = false + + if ( + claimedTier === "verified" && + !VERIFIED_ALLOWLIST.has(result.data.name) && + !envVerified.has(result.data.name) + ) { + log.warn("pack claims verified tier but is not in the allowlist — downgrading to community", { + pack: result.data.name, + }) + result.data.tier = "community" + downgraded = true + } + if ( + claimedTier === "built-in" && + !BUILTIN_ALLOWLIST.has(result.data.name) && + !envBuiltin.has(result.data.name) + ) { + log.warn("pack claims built-in tier but is not in the allowlist — downgrading to community", { + pack: result.data.name, + }) + result.data.tier = "community" + downgraded = true + } + + result.data.trust = { + tamper_detected: tamperDetected, + tier_downgraded: downgraded, + original_tier: downgraded ? claimedTier : undefined, + manifest_present: !!manifest, + } + // altimate_change end + return result.data } catch (err) { log.error("failed to load pack", { path: filePath, err }) diff --git a/packages/opencode/test/pack/pack.test.ts b/packages/opencode/test/pack/pack.test.ts new file mode 100644 index 0000000000..2909fbf7df --- /dev/null +++ b/packages/opencode/test/pack/pack.test.ts @@ -0,0 +1,478 @@ +import { afterEach, describe, expect, test } from "bun:test" +import fs from "fs/promises" +import path from "path" +import { Pack } from "../../src/pack" +import { Instance } from "../../src/project/instance" +import { tmpdir } from "../fixture/fixture" + +type PackFixture = { + name: string + description?: string + version?: string + tier?: string + skills?: unknown[] + mcp?: Record + plugins?: string[] + instructions?: string + detect?: Array<{ files: string[]; message?: string }> + skill_groups?: Record +} + +function yamlify(pack: PackFixture): string { + const lines: string[] = [ + `name: ${pack.name}`, + `description: ${pack.description ?? "Test pack"}`, + ] + if (pack.version) lines.push(`version: ${pack.version}`) + if (pack.tier) lines.push(`tier: ${pack.tier}`) + if (pack.plugins && pack.plugins.length > 0) { + lines.push("plugins:") + for (const p of pack.plugins) lines.push(` - ${JSON.stringify(p)}`) + } + if (pack.skills && pack.skills.length > 0) { + lines.push("skills:") + for (const s of pack.skills) { + if (typeof s === "string") lines.push(` - ${s}`) + else lines.push(` - ${JSON.stringify(s)}`) + } + } + if (pack.instructions) { + lines.push("instructions: |") + for (const l of pack.instructions.split("\n")) lines.push(` ${l}`) + } + if (pack.detect && pack.detect.length > 0) { + lines.push("detect:") + for (const d of pack.detect) { + lines.push(` - files: ${JSON.stringify(d.files)}`) + if (d.message) lines.push(` message: ${JSON.stringify(d.message)}`) + } + } + return lines.join("\n") + "\n" +} + +async function writePackFile(dir: string, pack: PackFixture): Promise { + const packDir = path.join(dir, ".opencode", "packs", pack.name) + await fs.mkdir(packDir, { recursive: true }) + const packFile = path.join(packDir, "PACK.yaml") + await fs.writeFile(packFile, yamlify(pack), "utf-8") + return packFile +} + +describe("Pack schema + discovery", () => { + test("discovers PACK.yaml from .opencode/packs/ and parses core fields", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + await writePackFile(dir, { + name: "hello-pack", + description: "A test pack", + version: "1.2.3", + }) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const packs = await Pack.all() + expect(packs.length).toBe(1) + const hello = await Pack.get("hello-pack") + expect(hello).toBeDefined() + expect(hello!.name).toBe("hello-pack") + expect(hello!.description).toBe("A test pack") + expect(hello!.version).toBe("1.2.3") + }, + }) + }) + + test("rejects packs whose name fails the slug validator", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + // Name with uppercase letters + underscores is invalid. + // gray-matter/yaml would happily parse it — Pack's validator must reject it. + await writePackFile(dir, { name: "Bad_Name" }) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const packs = await Pack.all() + expect(packs.length).toBe(0) + }, + }) + }) + + test("Pack.dirs includes the scanned directory", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + await writePackFile(dir, { name: "dir-pack" }) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const dirs = await Pack.dirs() + expect(dirs.some((d) => d.includes(path.join(".opencode", "packs")))).toBe(true) + }, + }) + }) +}) + +describe("Pack.detect", () => { + test("surfaces packs whose detect.files match project contents", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + await writePackFile(dir, { + name: "detect-pack", + detect: [{ files: ["dbt_project.yml"], message: "Detected dbt" }], + }) + await fs.writeFile(path.join(dir, "dbt_project.yml"), "name: test\n", "utf-8") + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const detected = await Pack.detect() + expect(detected.length).toBe(1) + expect(detected[0].pack.name).toBe("detect-pack") + expect(detected[0].matched).toContain("dbt_project.yml") + }, + }) + }) + + test("returns no matches when project has none of the detect files", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + await writePackFile(dir, { + name: "no-match-pack", + detect: [{ files: ["nonexistent.yml"] }], + }) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const detected = await Pack.detect() + expect(detected.length).toBe(0) + }, + }) + }) +}) + +describe("Pack.activate / deactivate lifecycle", () => { + test("activate adds pack name to .opencode/active-packs; deactivate removes it", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + await writePackFile(dir, { name: "lifecycle-pack" }) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + await Pack.activate("lifecycle-pack") + const activeFile = path.join(tmp.path, ".opencode", "active-packs") + const content = await fs.readFile(activeFile, "utf-8") + expect(content.trim()).toBe("lifecycle-pack") + + const activeAfter = await Pack.active() + expect(activeAfter.map((p) => p.name)).toContain("lifecycle-pack") + + await Pack.deactivate("lifecycle-pack") + // When empty, deactivate unlinks the file entirely + await expect(fs.access(activeFile)).rejects.toThrow() + + Pack.invalidate() + const activeEmpty = await Pack.active() + expect(activeEmpty.length).toBe(0) + }, + }) + }) + + test("multiple active packs coexist in .opencode/active-packs in insertion order", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + await writePackFile(dir, { name: "alpha-pack" }) + await writePackFile(dir, { name: "beta-pack" }) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + await Pack.activate("alpha-pack") + await Pack.activate("beta-pack") + const activeFile = path.join(tmp.path, ".opencode", "active-packs") + const lines = (await fs.readFile(activeFile, "utf-8")).split("\n").filter(Boolean) + expect(lines).toEqual(["alpha-pack", "beta-pack"]) + + // Deactivating only the first should leave the second intact. + await Pack.deactivate("alpha-pack") + const remaining = (await fs.readFile(activeFile, "utf-8")).split("\n").filter(Boolean) + expect(remaining).toEqual(["beta-pack"]) + }, + }) + }) + + test("activate is idempotent — running twice does not duplicate the entry", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + await writePackFile(dir, { name: "idem-pack" }) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + await Pack.activate("idem-pack") + await Pack.activate("idem-pack") + const activeFile = path.join(tmp.path, ".opencode", "active-packs") + const lines = (await fs.readFile(activeFile, "utf-8")).split("\n").filter(Boolean) + expect(lines.length).toBe(1) + }, + }) + }) +}) + +describe("Pack.computeContentHash", () => { + test("produces the same hash for identical content", () => { + const a = "name: foo\ndescription: bar\n" + const b = "name: foo\ndescription: bar\n" + expect(Pack.computeContentHash(a)).toBe(Pack.computeContentHash(b)) + }) + + test("produces different hashes when content differs", () => { + const a = "name: foo\n" + const b = "name: bar\n" + expect(Pack.computeContentHash(a)).not.toBe(Pack.computeContentHash(b)) + }) + + test("normalizes CRLF vs LF line endings — hash is stable across platforms", () => { + const unix = "name: foo\ndescription: bar\n" + const windows = "name: foo\r\ndescription: bar\r\n" + expect(Pack.computeContentHash(unix)).toBe(Pack.computeContentHash(windows)) + }) +}) + +describe("Pack manifest + integrity", () => { + test("writeManifest roundtrips: loadPack detects no tamper when hash matches", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + const packFile = await writePackFile(dir, { name: "manifest-pack", version: "1.0.0" }) + await Pack.writeManifest(path.dirname(packFile), packFile, { + name: "manifest-pack", + version: "1.0.0", + tier: "community", + }) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const pack = await Pack.get("manifest-pack") + expect(pack).toBeDefined() + expect(pack!.trust?.manifest_present).toBe(true) + expect(pack!.trust?.tamper_detected).toBe(false) + }, + }) + }) + + test("tamper_detected flips to true when PACK.yaml is edited after manifest is written", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + const packFile = await writePackFile(dir, { name: "tamper-pack" }) + await Pack.writeManifest(path.dirname(packFile), packFile, { + name: "tamper-pack", + tier: "community", + }) + // Modify content AFTER writing manifest — this is the tamper scenario. + await fs.appendFile(packFile, "# injected\n", "utf-8") + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const pack = await Pack.get("tamper-pack") + expect(pack).toBeDefined() + expect(pack!.trust?.tamper_detected).toBe(true) + }, + }) + }) + + test("packs without a manifest load without tamper detection (user-authored local packs)", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + // No manifest written — simulates `pack create` scaffold. + await writePackFile(dir, { name: "local-pack" }) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const pack = await Pack.get("local-pack") + expect(pack).toBeDefined() + expect(pack!.trust?.manifest_present).toBe(false) + expect(pack!.trust?.tamper_detected).toBe(false) + }, + }) + }) +}) + +describe("Pack tier allowlist enforcement", () => { + // Each test mutates env vars; clean up after. + const prevVerified = process.env.ALTIMATE_CODE_VERIFIED_PACKS + const prevBuiltin = process.env.ALTIMATE_CODE_BUILTIN_PACKS + afterEach(() => { + if (prevVerified === undefined) delete process.env.ALTIMATE_CODE_VERIFIED_PACKS + else process.env.ALTIMATE_CODE_VERIFIED_PACKS = prevVerified + if (prevBuiltin === undefined) delete process.env.ALTIMATE_CODE_BUILTIN_PACKS + else process.env.ALTIMATE_CODE_BUILTIN_PACKS = prevBuiltin + }) + + test("pack claiming verified tier but not in allowlist is downgraded to community", async () => { + delete process.env.ALTIMATE_CODE_VERIFIED_PACKS + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + await writePackFile(dir, { name: "faux-verified", tier: "verified" }) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const pack = await Pack.get("faux-verified") + expect(pack).toBeDefined() + expect(pack!.tier).toBe("community") + expect(pack!.trust?.tier_downgraded).toBe(true) + expect(pack!.trust?.original_tier).toBe("verified") + }, + }) + }) + + test("env-var allowlist (ALTIMATE_CODE_VERIFIED_PACKS) honors the verified claim", async () => { + process.env.ALTIMATE_CODE_VERIFIED_PACKS = "real-verified,other-pack" + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + await writePackFile(dir, { name: "real-verified", tier: "verified" }) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const pack = await Pack.get("real-verified") + expect(pack).toBeDefined() + expect(pack!.tier).toBe("verified") + expect(pack!.trust?.tier_downgraded).toBe(false) + }, + }) + }) + + test("built-in tier claim without allowlist entry is also downgraded", async () => { + delete process.env.ALTIMATE_CODE_BUILTIN_PACKS + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + await writePackFile(dir, { name: "faux-builtin", tier: "built-in" }) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const pack = await Pack.get("faux-builtin") + expect(pack).toBeDefined() + expect(pack!.tier).toBe("community") + expect(pack!.trust?.tier_downgraded).toBe(true) + }, + }) + }) +}) + +describe("Pack.allSkillsFromGroups", () => { + test("returns the flat skills array when skill_groups is empty", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + const packDir = path.join(dir, ".opencode", "packs", "flat-pack") + await fs.mkdir(packDir, { recursive: true }) + await fs.writeFile( + path.join(packDir, "PACK.yaml"), + `name: flat-pack +description: flat skills +skills: + - foo + - bar +`, + "utf-8", + ) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const pack = await Pack.get("flat-pack") + expect(pack).toBeDefined() + const flat = Pack.allSkillsFromGroups(pack!) + expect(flat).toEqual(["foo", "bar"]) + }, + }) + }) + + test("flattens skill_groups into a single list when present", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + const packDir = path.join(dir, ".opencode", "packs", "grouped-pack") + await fs.mkdir(packDir, { recursive: true }) + await fs.writeFile( + path.join(packDir, "PACK.yaml"), + `name: grouped-pack +description: grouped skills +skill_groups: + core: + activation: always + skills: + - a + - b + advanced: + activation: detect + skills: + - c +`, + "utf-8", + ) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const pack = await Pack.get("grouped-pack") + expect(pack).toBeDefined() + const all = Pack.allSkillsFromGroups(pack!) + expect(all.sort()).toEqual(["a", "b", "c"]) + }, + }) + }) +}) From bea67e89c96ede9fd210938efe346920377e59ae Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Fri, 17 Apr 2026 13:22:27 +0530 Subject: [PATCH 6/7] =?UTF-8?q?fix:=20pack=20review=20=E2=80=94=20ownershi?= =?UTF-8?q?p-aware=20cleanup,=20canonical-name=20plugin=20refcount,=20atom?= =?UTF-8?q?ic=20install?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Addresses CRITICAL + MAJOR findings from the 3-model consensus code review (GPT 5.4, Gemini 3.1 Pro, Kimi K2.5) before the Pack v1 release to partners. ## CRITICAL fixes - **MCP cleanup is ownership-aware (GPT, Kimi)** — `pack deactivate` no longer deletes MCP entries by name alone. An activation sidecar `.opencode/pack-state/.json` records the exact MCP value each pack wrote; deactivate compares the current config value to the recorded one and only removes it on match. If the user edited the entry after activation, it's preserved and surfaced via `skippedMcpKeys`. - **Plugin refcount is canonical-name-based (Kimi)** — `@scope/plugin@^1.0` and `@scope/plugin@1.2.3` now correctly refcount as the same plugin via `Config.getPluginName()`. Deactivating pack A no longer removes a plugin that pack B still lists under a different version spec. - **`PackRemoveCommand` now runs the cleanup helper (Gemini)** — previously `pack remove` only deleted files + thinly deactivated, leaving orphaned MCP and plugin entries in config forever. Remove now runs the same ownership-aware cleanup as deactivate (before deleting the pack dir so the sidecar is still readable). ## MAJOR fixes - **`Pack.deactivate` uses `findActivePacksFile()` (Gemini)** — hardcoded `.opencode/active-packs` was silently no-op for users on the legacy `.altimate-code/` layout while still printing "✓ Deactivated pack". - **TOCTOU-hardened plugin writes in activate (GPT, Kimi)** — activate now reads the live plugin array via `readConfigField` directly from disk immediately before the JSONC `modify` edit, instead of relying on the parsed snapshot from `findConfigFile`. - **Manifest metadata verified against `PACK.yaml` (GPT, Claude)** — `loadPack` now compares `manifest.name`, `manifest.version`, and `manifest.tier` against the parsed YAML. Any mismatch sets `trust.tamper_detected = true`. Closes the hole where someone could edit YAML tier without re-computing the hash (allowlist still blocks elevation, but this makes the drift auditable). - **Registry cache Zod-validates before trust (Kimi)** — `readRegistryCache` now uses `CachedRegistry.safeParse()`; corrupted cache files degrade to "no cache" instead of crashing search. Also clamps negative age (clock skew) to 0 so a regressed clock can't mask stale cache as fresh. - **Install rolls back on manifest-write failure for remote sources (GPT, Kimi, Claude)** — remote installs now remove the partially-copied pack directory if `Pack.writeManifest` throws, instead of silently continuing and weakening integrity posture. Local-path installs still warn-and-continue (dev loop ergonomics). - **`PackActivateCommand` prints restart notice when plugins are added (Kimi)** — npm plugins in `config.plugin[]` only install on the plugin loader's lazy init. The activate command now tells the user that newly added plugins will finish installing on next start. ## Tests 12 new integration tests in `test/pack/pack-cli.test.ts` covering the cleanup helper directly: - MCP ownership: removes only entries matching the sidecar, preserves user-modified entries and reports them as skipped. - Plugin refcount: two packs sharing a canonical name → shared plugin survives; solo ownership → plugin removed. - Legacy fallback: sidecar missing → name-only cleanup with warning. - Empty config: no config file means no cleanup AND no scaffolded file. - Instructions: cleaned up when sidecar records one. - Sidecar roundtrip: write → read → delete (idempotent); malformed JSON safely returns undefined. - `Pack.deactivate` with legacy `.altimate-code/active-packs`. 1 new unit test in `test/pack/pack.test.ts`: - Manifest metadata mismatch (tier drift) triggers tamper detection even when content hash matches. 31/31 pack tests pass. Typecheck + Marker Guard clean. Intentionally deferred to follow-up issues (low ship risk): - Minor TOCTOU hardening in `findConfigFile` regex JSONC stripping → use `jsonc-parser.parse()` consistently. - Empty-pack activation skipping telemetry. - Env-var allowlist: quoted-value handling, BOM normalization in `computeContentHash`, registry cache stale-file GC on URL change, envAllowlist memoization for hot loop. Co-Authored-By: Claude Opus 4.7 (1M context) --- packages/opencode/src/cli/cmd/pack.ts | 371 +++++++++++++++---- packages/opencode/src/pack/pack.ts | 92 ++++- packages/opencode/test/pack/pack-cli.test.ts | 324 ++++++++++++++++ packages/opencode/test/pack/pack.test.ts | 38 ++ 4 files changed, 740 insertions(+), 85 deletions(-) create mode 100644 packages/opencode/test/pack/pack-cli.test.ts diff --git a/packages/opencode/src/cli/cmd/pack.ts b/packages/opencode/src/cli/cmd/pack.ts index 9d963d8a55..5c9e87ce6c 100644 --- a/packages/opencode/src/cli/cmd/pack.ts +++ b/packages/opencode/src/cli/cmd/pack.ts @@ -2,6 +2,7 @@ import { EOL } from "os" import path from "path" import fs from "fs/promises" +import z from "zod" import { Pack } from "../../pack" import { Skill } from "../../skill" import { bootstrap } from "../bootstrap" @@ -104,6 +105,165 @@ async function removeConfigField(filePath: string, fieldPath: string[]): Promise await fs.writeFile(filePath, result, "utf-8") return true } + +/** + * Read a field value directly from the file on disk (bypass any parsed cache). + * Returns undefined if the file doesn't exist, can't be parsed, or field is absent. + */ +async function readConfigField(filePath: string, fieldPath: string[]): Promise { + let text: string + try { text = await fs.readFile(filePath, "utf-8") } catch { return undefined } + try { + const parsed = JSON.parse( + filePath.endsWith(".jsonc") + ? text.replace(/^\s*\/\/.*$/gm, "").replace(/,(\s*[}\]])/g, "$1") + : text, + ) as Record + let cursor: unknown = parsed + for (const key of fieldPath) { + if (cursor == null || typeof cursor !== "object") return undefined + cursor = (cursor as Record)[key] + } + return cursor + } catch { + return undefined + } +} +// altimate_change end + +// altimate_change start — pack: shared cleanup helper used by both deactivate and remove. +// Ownership-aware: reads the activation sidecar written on activate and only +// removes config entries the pack actually wrote — user edits and entries owned +// by other active packs are preserved. +// +// Returns cleanup counts for telemetry + UI, plus a `sidecarMissing` flag so +// callers can warn when they're operating on a pack that was activated before +// the sidecar mechanism landed. +// +// Exported for testing — not a public API. Tests import via the CLI module. +export async function cleanupPackActivation( + rootDir: string, + pack: Pack.Info | undefined, + packName: string, + remainingActive: Pack.Info[], +): Promise<{ + mcpCleaned: number + pluginsCleaned: number + instructionsCleaned: boolean + skippedMcpKeys: string[] + sidecarMissing: boolean +}> { + const sidecar = await Pack.readActivationSidecar(rootDir, packName) + const skippedMcpKeys: string[] = [] + let mcpCleaned = 0 + let pluginsCleaned = 0 + let instructionsCleaned = false + + // Resolve config file path once. If config doesn't exist at all, there's + // nothing to clean; skip without creating a file. + let configFilePath: string | undefined + try { + const candidates = [ + path.join(rootDir, ".opencode", "opencode.json"), + path.join(rootDir, ".opencode", "opencode.jsonc"), + path.join(rootDir, ".altimate-code", "altimate-code.json"), + path.join(rootDir, ".altimate-code", "altimate-code.jsonc"), + path.join(rootDir, "opencode.json"), + path.join(rootDir, "opencode.jsonc"), + path.join(rootDir, "altimate-code.json"), + path.join(rootDir, "altimate-code.jsonc"), + ] + for (const candidate of candidates) { + try { + await fs.access(candidate) + configFilePath = candidate + break + } catch { + // try next + } + } + } catch { + // no config at all — nothing to clean + } + + // --- Instructions cleanup (always by convention — pack-.md) --- + const instructionsFile = path.join(rootDir, ".opencode", "instructions", `pack-${packName}.md`) + try { + await fs.access(instructionsFile) + await fs.rm(instructionsFile, { force: true }) + instructionsCleaned = true + } catch { + // missing instructions file is fine + } + + if (!configFilePath) { + return { + mcpCleaned, + pluginsCleaned, + instructionsCleaned, + skippedMcpKeys, + sidecarMissing: !sidecar, + } + } + + // --- MCP cleanup (ownership-aware) --- + if (sidecar && sidecar.mcp.length > 0) { + for (const [serverName, recordedSerialized] of sidecar.mcp) { + const currentValue = await readConfigField(configFilePath, ["mcp", serverName]) + if (currentValue === undefined) continue + const currentSerialized = JSON.stringify(currentValue) + if (currentSerialized === recordedSerialized) { + if (await removeConfigField(configFilePath, ["mcp", serverName])) { + mcpCleaned++ + } + } else { + skippedMcpKeys.push(serverName) + } + } + } else if (pack?.mcp && Object.keys(pack.mcp).length > 0) { + // Legacy path: pack was activated before the sidecar mechanism. Fall back + // to the old name-only removal but announce the limitation to the caller. + for (const serverName of Object.keys(pack.mcp)) { + if (await removeConfigField(configFilePath, ["mcp", serverName])) { + mcpCleaned++ + } + } + } + + // --- Plugin cleanup (refcount by canonical name) --- + const ownedSpecs = sidecar ? sidecar.plugins : pack?.plugins ?? [] + if (ownedSpecs.length > 0) { + const stillNeededCanonicals = new Set() + for (const other of remainingActive) { + if (other.name === packName) continue + for (const spec of other.plugins || []) { + stillNeededCanonicals.add(Config.getPluginName(spec)) + } + } + const specsToRemove = ownedSpecs.filter( + (spec) => !stillNeededCanonicals.has(Config.getPluginName(spec)), + ) + if (specsToRemove.length > 0) { + const currentPlugins = ((await readConfigField(configFilePath, ["plugin"])) ?? []) as string[] + const next = currentPlugins.filter((spec) => !specsToRemove.includes(spec)) + if (next.length !== currentPlugins.length) { + await writeConfigField(configFilePath, ["plugin"], next) + pluginsCleaned = currentPlugins.length - next.length + } + } + } + + // --- Delete the sidecar last (keeps cleanup idempotent on retry) --- + await Pack.deleteActivationSidecar(rootDir, packName) + + return { + mcpCleaned, + pluginsCleaned, + instructionsCleaned, + skippedMcpKeys, + sidecarMissing: !sidecar, + } +} // altimate_change end async function cloneSource(source: string): Promise<{ dir: string; cloned: boolean }> { @@ -569,7 +729,11 @@ const PackInstallCommand = cmd({ await fs.cp(src, dst, { recursive: true, dereference: false }) } } - // altimate_change start — pack: write manifest.json at install time for later integrity checks + // altimate_change start — pack: atomic install — write manifest.json at + // install time, and roll back the copied files on failure so a partial + // install can't silently weaken integrity posture. Local-path installs + // (cloned === false) still log-and-continue because the "install" there + // is a symbolic reference — manifest loss is expected for dev loops. try { const destPackFile = path.join(dest, path.basename(packFile)) const matterMod = (await import("gray-matter")).default @@ -588,8 +752,19 @@ const PackInstallCommand = cmd({ source, ) } catch (err) { - // Manifest is best-effort; pack still works without one (treated as user-authored). - process.stdout.write(` ⚠ Could not write manifest for "${packName}" — ${(err as Error).message}` + EOL) + if (cloned) { + // Remote install: roll back to avoid a pack sitting on disk without + // an integrity manifest. Partners would see this as a silent + // downgrade; fail loud instead. + await fs.rm(dest, { recursive: true, force: true }).catch(() => {}) + process.stderr.write( + ` ✗ Failed to write manifest for "${packName}" — rolled back install: ${(err as Error).message}` + EOL, + ) + continue + } + process.stdout.write( + ` ⚠ Could not write manifest for local "${packName}" — ${(err as Error).message}` + EOL, + ) } // altimate_change end @@ -657,6 +832,31 @@ const PackRemoveCommand = cmd({ process.exit(1) } + // altimate_change start — pack: ownership-aware cleanup BEFORE deleting + // the pack files so we still have the sidecar available to drive cleanup. + // Without this, `pack remove` of a still-active pack would leak MCP + plugin + // config entries that `pack deactivate` would have cleaned up. + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + await Pack.deactivate(name) + Pack.invalidate() + const remaining = await Pack.active() + const cleanup = await cleanupPackActivation(rootDir, pack, name, remaining) + if (cleanup.mcpCleaned > 0) { + process.stdout.write(` ✓ Removed ${cleanup.mcpCleaned} MCP server(s) from config` + EOL) + } + if (cleanup.skippedMcpKeys.length > 0) { + process.stdout.write( + ` ⚠ Left ${cleanup.skippedMcpKeys.length} MCP entry(ies) in place — modified after activation: ${cleanup.skippedMcpKeys.join(", ")}` + EOL, + ) + } + if (cleanup.pluginsCleaned > 0) { + process.stdout.write(` ✓ Removed ${cleanup.pluginsCleaned} plugin(s) from config` + EOL) + } + if (cleanup.instructionsCleaned) { + process.stdout.write(` ✓ Removed instructions file` + EOL) + } + // altimate_change end + // Safety: only remove if the directory looks like a pack directory // (contains the PACK file and is not a top-level scan directory) const packBasename = path.basename(packDir) @@ -669,22 +869,6 @@ const PackRemoveCommand = cmd({ process.stdout.write(` ✓ Removed pack: ${packDir}` + EOL) } - // Deactivate if active, then invalidate cache - await Pack.deactivate(name) - Pack.invalidate() - - // altimate_change start — pack: clean up instruction file on remove - const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory - const instructionsFile = path.join(rootDir, ".opencode", "instructions", `pack-${name}.md`) - try { - await fs.access(instructionsFile) - await fs.rm(instructionsFile, { force: true }) - process.stdout.write(` ✓ Removed instructions: ${path.relative(rootDir, instructionsFile)}` + EOL) - } catch { - // No instructions file, that's fine - } - // altimate_change end - // altimate_change start — telemetry try { Telemetry.track({ @@ -949,6 +1133,11 @@ const PackActivateCommand = cmd({ } // --- 2. Configure MCP servers and plugins (JSONC-aware, preserves comments) --- + // altimate_change start — pack: track exactly what we wrote so the sidecar + // can drive ownership-aware cleanup on deactivate/remove. + const writtenMcp: Array<[string, string]> = [] + const writtenPlugins: string[] = [] + // altimate_change end if (mcpCount > 0 || pluginCount > 0) { const { filePath } = await findConfigFile(rootDir) const missingEnvKeys: string[] = [] @@ -968,6 +1157,9 @@ const PackActivateCommand = cmd({ // Write each MCP server using JSONC-preserving modify await writeConfigField(filePath, ["mcp", serverName], configEntry) + // altimate_change start — pack: record for ownership-aware deactivate + writtenMcp.push([serverName, JSON.stringify(configEntry)]) + // altimate_change end process.stdout.write(` ✓ Configured MCP server "${serverName}"` + EOL) const envKeys = def.env_keys @@ -980,13 +1172,16 @@ const PackActivateCommand = cmd({ } if (pluginCount > 0) { - // Read current plugins, add new ones, write back - const { config } = await findConfigFile(rootDir) - const plugins = (config.plugin ?? []) as string[] + // altimate_change start — pack: read plugins directly from the file text + // we're about to edit (avoids TOCTOU between findConfigFile's cached + // parse and the JSONC modify we'll apply). + const currentPlugins = ((await readConfigField(filePath, ["plugin"])) ?? []) as string[] + const plugins = [...currentPlugins] let changed = false for (const plugin of pack.plugins!) { if (!plugins.includes(plugin)) { plugins.push(plugin) + writtenPlugins.push(plugin) changed = true process.stdout.write(` ✓ Added plugin "${plugin}"` + EOL) } @@ -994,6 +1189,7 @@ const PackActivateCommand = cmd({ if (changed) { await writeConfigField(filePath, ["plugin"], plugins) } + // altimate_change end } process.stdout.write(` ✓ Updated config: ${path.relative(rootDir, filePath)}` + EOL) @@ -1009,18 +1205,45 @@ const PackActivateCommand = cmd({ } // --- 3. Add instructions --- + let writtenInstructionsRel: string | null = null if (hasInstructions) { const instructionsDir = path.join(rootDir, ".opencode", "instructions") const instructionsFile = path.join(instructionsDir, `pack-${name}.md`) await fs.mkdir(instructionsDir, { recursive: true }) await fs.writeFile(instructionsFile, pack.instructions!, "utf-8") - process.stdout.write(` ✓ Created instructions: ${path.relative(rootDir, instructionsFile)}` + EOL) + writtenInstructionsRel = path.relative(rootDir, instructionsFile) + process.stdout.write(` ✓ Created instructions: ${writtenInstructionsRel}` + EOL) } // --- 4. Activate (add to active-packs) --- await Pack.activate(name) Pack.invalidate() + // altimate_change start — pack: write activation sidecar so deactivate/remove + // can clean up ownership-aware (see cleanupPackActivation). + try { + await Pack.writeActivationSidecar(rootDir, { + pack_name: name, + activated_at: new Date().toISOString(), + mcp: writtenMcp, + plugins: writtenPlugins, + instructions_file: writtenInstructionsRel, + }) + } catch (err) { + process.stdout.write( + ` ⚠ Could not write activation sidecar — deactivate will fall back to name-only cleanup: ${(err as Error).message}` + EOL, + ) + } + + // Nudge user to restart so the plugin loader picks up new specs. + if (writtenPlugins.length > 0) { + process.stdout.write(EOL) + process.stdout.write( + ` ℹ Plugins will finish installing on the next altimate-code start. Restart to use them.` + EOL, + ) + } + // altimate_change end + process.stdout.write(EOL) // altimate_change start — pack: report partial failures in activation message if (skillFailures > 0) { @@ -1086,7 +1309,8 @@ const PackDeactivateCommand = cmd({ async handler(args) { const name = args.name as string await bootstrap(process.cwd(), async () => { - // Read pack BEFORE deactivating so we know what MCP servers to clean + // Read pack BEFORE deactivating so we have fallback metadata if the + // sidecar is missing (legacy packs activated under an older CLI). const pack = await Pack.get(name) await Pack.deactivate(name) @@ -1094,70 +1318,41 @@ const PackDeactivateCommand = cmd({ const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory - // altimate_change start — pack: clean up instruction file on deactivate - const instructionsFile = path.join(rootDir, ".opencode", "instructions", `pack-${name}.md`) - let instructionsCleaned = false - try { - await fs.access(instructionsFile) - await fs.rm(instructionsFile, { force: true }) - instructionsCleaned = true - process.stdout.write(` ✓ Removed instructions: ${path.relative(rootDir, instructionsFile)}` + EOL) - } catch {} - // altimate_change end + // altimate_change start — pack: ownership-aware cleanup via sidecar, with + // canonical-name refcounting so shared plugins between active packs survive. + Pack.invalidate() + const remaining = await Pack.active() + const result = await cleanupPackActivation(rootDir, pack, name, remaining) - // altimate_change start — pack: clean up MCP config entries added by this pack (JSONC-preserving) - let mcpCleaned = 0 - if (pack?.mcp && Object.keys(pack.mcp).length > 0) { - try { - const { filePath } = await findConfigFile(rootDir) - for (const serverName of Object.keys(pack.mcp)) { - if (await removeConfigField(filePath, ["mcp", serverName])) { - mcpCleaned++ - } - } - if (mcpCleaned > 0) { - process.stdout.write(` ✓ Removed ${mcpCleaned} MCP server(s) from config` + EOL) - } - } catch {} + if (result.instructionsCleaned) { + process.stdout.write(` ✓ Removed instructions file` + EOL) } - // altimate_change end - - // altimate_change start — pack: clean up plugin entries that no other active pack still needs - let pluginsCleaned = 0 - if (pack?.plugins && pack.plugins.length > 0) { - try { - Pack.invalidate() - const remaining = await Pack.active() - const stillNeeded = new Set() - for (const other of remaining) { - if (other.name === name) continue - for (const p of other.plugins || []) stillNeeded.add(p) - } - const toRemove = pack.plugins.filter((p) => !stillNeeded.has(p)) - if (toRemove.length > 0) { - const { filePath, config } = await findConfigFile(rootDir) - const current = (config.plugin ?? []) as string[] - const next = current.filter((p) => !toRemove.includes(p)) - if (next.length !== current.length) { - await writeConfigField(filePath, ["plugin"], next) - pluginsCleaned = toRemove.length - process.stdout.write(` ✓ Removed ${pluginsCleaned} plugin(s) from config` + EOL) - } - } - } catch {} + if (result.mcpCleaned > 0) { + process.stdout.write(` ✓ Removed ${result.mcpCleaned} MCP server(s) from config` + EOL) + } + if (result.skippedMcpKeys.length > 0) { + process.stdout.write( + ` ⚠ Left ${result.skippedMcpKeys.length} MCP entry(ies) in place — they were modified after activation: ${result.skippedMcpKeys.join(", ")}` + EOL, + ) + } + if (result.pluginsCleaned > 0) { + process.stdout.write(` ✓ Removed ${result.pluginsCleaned} plugin(s) from config` + EOL) + } + if (result.sidecarMissing) { + process.stdout.write( + ` ⚠ No activation sidecar found — cleaned up by name only (may leave user-modified entries).` + EOL, + ) } - // altimate_change end - // altimate_change start — pack: telemetry for deactivation lifecycle try { Telemetry.track({ type: "pack_deactivated", timestamp: Date.now(), session_id: Telemetry.getContext().sessionId || "", pack_name: name, - mcp_cleaned: mcpCleaned, - plugins_cleaned: pluginsCleaned, - instructions_cleaned: instructionsCleaned, + mcp_cleaned: result.mcpCleaned, + plugins_cleaned: result.pluginsCleaned, + instructions_cleaned: result.instructionsCleaned, source: "cli", }) } catch {} @@ -1172,6 +1367,14 @@ const DEFAULT_REGISTRY_URL = "https://raw.githubusercontent.com/AltimateAI/data- const REGISTRY_CACHE_TTL_MS = 24 * 60 * 60 * 1000 const REGISTRY_FETCH_TIMEOUT_MS = 5000 +// Schema for the cache wrapper on disk. We validate shape explicitly so a +// corrupted cache file degrades to "no cache" instead of crashing the search. +const CachedRegistry = z.object({ + url: z.string(), + fetched_at: z.string(), + data: z.unknown(), +}) + async function resolveRegistryUrl(): Promise { const envUrl = process.env.ALTIMATE_CODE_PACK_REGISTRY if (envUrl && envUrl.trim()) return envUrl.trim() @@ -1194,11 +1397,15 @@ async function readRegistryCache( ): Promise<{ data: unknown; fetchedAtMs: number; stale: boolean } | undefined> { try { const raw = await fs.readFile(registryCachePath(), "utf-8") - const parsed = JSON.parse(raw) as { url?: string; fetched_at?: string; data?: unknown } - if (parsed.url !== url || !parsed.fetched_at) return undefined - const fetchedAtMs = new Date(parsed.fetched_at).getTime() + const parsed = CachedRegistry.safeParse(JSON.parse(raw)) + if (!parsed.success) return undefined + if (parsed.data.url !== url) return undefined + const fetchedAtMs = new Date(parsed.data.fetched_at).getTime() if (!Number.isFinite(fetchedAtMs)) return undefined - return { data: parsed.data, fetchedAtMs, stale: Date.now() - fetchedAtMs > REGISTRY_CACHE_TTL_MS } + // Clamp negative age (clock skew) to 0 so a regressed system clock doesn't + // mask stale cache as fresh. + const ageMs = Math.max(0, Date.now() - fetchedAtMs) + return { data: parsed.data.data, fetchedAtMs, stale: ageMs > REGISTRY_CACHE_TTL_MS } } catch { return undefined } diff --git a/packages/opencode/src/pack/pack.ts b/packages/opencode/src/pack/pack.ts index 5e2c34661f..91aa1c4d9a 100644 --- a/packages/opencode/src/pack/pack.ts +++ b/packages/opencode/src/pack/pack.ts @@ -240,6 +240,61 @@ export namespace Pack { } // altimate_change end + // altimate_change start — pack: activation sidecar records what a pack wrote, + // so deactivate can clean up ownership-aware (don't delete the user's unrelated + // MCP server that happens to share a name, don't drop a plugin another pack + // still needs). Written on successful activate; read-then-deleted on deactivate. + export const ActivationSidecar = z.object({ + pack_name: z.string(), + activated_at: z.string(), + // MCP entries this pack wrote. Stored as [server_name, serialized_value] + // — the serialized value is what we wrote, so we only remove if the current + // config still matches. Prevents stomping on user edits. + mcp: z.array(z.tuple([z.string(), z.string()])).default([]), + // npm plugin specs this pack appended to `config.plugin[]`. + plugins: z.array(z.string()).default([]), + // Relative path under root dir, or null if no instructions were written. + instructions_file: z.string().nullable().default(null), + }) + export type ActivationSidecar = z.infer + + function sidecarPath(rootDir: string, packName: string): string { + return path.join(rootDir, ".opencode", "pack-state", `${packName}.json`) + } + + export async function writeActivationSidecar( + rootDir: string, + sidecar: ActivationSidecar, + ): Promise { + const file = sidecarPath(rootDir, sidecar.pack_name) + await mkdir(path.dirname(file), { recursive: true }) + await writeFile(file, JSON.stringify(sidecar, null, 2) + "\n", "utf-8") + } + + export async function readActivationSidecar( + rootDir: string, + packName: string, + ): Promise { + try { + const file = sidecarPath(rootDir, packName) + const raw = await Filesystem.readText(file) + if (!raw) return undefined + const parsed = ActivationSidecar.safeParse(JSON.parse(raw)) + return parsed.success ? parsed.data : undefined + } catch { + return undefined + } + } + + export async function deleteActivationSidecar(rootDir: string, packName: string): Promise { + try { + await unlink(sidecarPath(rootDir, packName)) + } catch { + // best-effort — missing file is fine + } + } + // altimate_change end + // --- State management (mirrors Skill.state pattern) --- const PACK_FILE_PATTERN = "PACK.{yaml,yml,md}" @@ -373,7 +428,10 @@ export namespace Pack { return undefined } - // altimate_change start — pack: manifest integrity check + tier allowlist enforcement + // altimate_change start — pack: manifest integrity check + tier allowlist enforcement. + // We check BOTH the content hash and the metadata fields (name/version/tier) + // against the manifest so a vendor can't edit only PACK.yaml metadata while + // leaving the manifest stale — either would be caught as tampering. const packDir = path.dirname(filePath) const manifest = await readManifest(packDir) let tamperDetected = false @@ -387,6 +445,31 @@ export namespace Pack { actual: actualHash, }) } + if (manifest.name !== result.data.name) { + tamperDetected = true + log.warn("pack manifest name mismatch", { + expected: manifest.name, + actual: result.data.name, + }) + } + const yamlVersion = result.data.version || "1.0.0" + if (manifest.version !== yamlVersion) { + tamperDetected = true + log.warn("pack manifest version mismatch", { + pack: result.data.name, + expected: manifest.version, + actual: yamlVersion, + }) + } + const yamlTier = result.data.tier || "community" + if (manifest.tier !== yamlTier) { + tamperDetected = true + log.warn("pack manifest tier mismatch — possible tier-elevation attempt", { + pack: result.data.name, + expected: manifest.tier, + actual: yamlTier, + }) + } } const claimedTier = result.data.tier || "community" @@ -519,8 +602,11 @@ export namespace Pack { /** Deactivate a pack for the current project */ export async function deactivate(name: string): Promise { - const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory - const activeFile = path.join(rootDir, ".opencode", "active-packs") + // altimate_change start — pack: use findActivePacksFile so we correctly + // handle legacy `.altimate-code/active-packs` instead of silently no-op + const activeFile = await findActivePacksFile() + if (!activeFile) return + // altimate_change end let names: string[] = [] try { diff --git a/packages/opencode/test/pack/pack-cli.test.ts b/packages/opencode/test/pack/pack-cli.test.ts new file mode 100644 index 0000000000..da249b397f --- /dev/null +++ b/packages/opencode/test/pack/pack-cli.test.ts @@ -0,0 +1,324 @@ +/** + * CLI-level integration tests for pack activation/deactivation/removal lifecycle. + * Exercises the cleanupPackActivation helper directly — ownership-aware MCP + * cleanup, canonical-name plugin refcounting, sidecar roundtrip, and the + * legacy-fallback path when the sidecar is missing. + */ +import { describe, expect, test } from "bun:test" +import fs from "fs/promises" +import path from "path" +import { Pack } from "../../src/pack" +import { Instance } from "../../src/project/instance" +import { cleanupPackActivation } from "../../src/cli/cmd/pack" +import { tmpdir } from "../fixture/fixture" + +async function writeOpenCodeConfig(rootDir: string, config: Record): Promise { + const dir = path.join(rootDir, ".opencode") + await fs.mkdir(dir, { recursive: true }) + const filePath = path.join(dir, "opencode.json") + await fs.writeFile(filePath, JSON.stringify(config, null, 2) + "\n", "utf-8") + return filePath +} + +async function readConfig(filePath: string): Promise> { + const raw = await fs.readFile(filePath, "utf-8") + return JSON.parse(raw) +} + +async function writePack( + rootDir: string, + name: string, + body: string, +): Promise { + const packDir = path.join(rootDir, ".opencode", "packs", name) + await fs.mkdir(packDir, { recursive: true }) + const packFile = path.join(packDir, "PACK.yaml") + await fs.writeFile(packFile, body, "utf-8") + return packFile +} + +describe("cleanupPackActivation — MCP ownership (sidecar present)", () => { + test("removes only MCP entries the pack wrote when config matches sidecar record", async () => { + await using tmp = await tmpdir({ git: true }) + const configPath = await writeOpenCodeConfig(tmp.path, { + mcp: { + "pack-mcp": { type: "local", command: ["uvx", "pack-server"] }, + "user-mcp": { type: "local", command: ["my-personal-server"] }, + }, + }) + await Pack.writeActivationSidecar(tmp.path, { + pack_name: "test-pack", + activated_at: new Date().toISOString(), + mcp: [ + ["pack-mcp", JSON.stringify({ type: "local", command: ["uvx", "pack-server"] })], + ], + plugins: [], + instructions_file: null, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const result = await cleanupPackActivation(tmp.path, undefined, "test-pack", []) + expect(result.mcpCleaned).toBe(1) + expect(result.skippedMcpKeys).toEqual([]) + + const config = await readConfig(configPath) + const mcp = config.mcp as Record + expect(mcp["pack-mcp"]).toBeUndefined() + expect(mcp["user-mcp"]).toBeDefined() + }, + }) + }) + + test("preserves MCP entries that the user modified after activation (skippedMcpKeys reports them)", async () => { + await using tmp = await tmpdir({ git: true }) + const configPath = await writeOpenCodeConfig(tmp.path, { + mcp: { + "pack-mcp": { + // User edited the command after pack activated. + type: "local", + command: ["uvx", "pack-server", "--custom-flag"], + }, + }, + }) + await Pack.writeActivationSidecar(tmp.path, { + pack_name: "test-pack", + activated_at: new Date().toISOString(), + mcp: [ + ["pack-mcp", JSON.stringify({ type: "local", command: ["uvx", "pack-server"] })], + ], + plugins: [], + instructions_file: null, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const result = await cleanupPackActivation(tmp.path, undefined, "test-pack", []) + expect(result.mcpCleaned).toBe(0) + expect(result.skippedMcpKeys).toEqual(["pack-mcp"]) + + const config = await readConfig(configPath) + const mcp = config.mcp as Record + expect(mcp["pack-mcp"]).toBeDefined() + }, + }) + }) +}) + +describe("cleanupPackActivation — plugin refcount by canonical name", () => { + test("keeps plugins that another active pack still lists (same canonical name, different version specs)", async () => { + await using tmp = await tmpdir({ git: true }) + const configPath = await writeOpenCodeConfig(tmp.path, { + plugin: ["@scope/plugin@^1.0", "@scope/plugin@1.2.3", "@other/plugin@^2.0"], + }) + // Pack A owns @scope/plugin@^1.0; Pack B (still active) also uses it via a + // different version spec. Deactivating A should leave the plugin in place + // because canonical @scope/plugin is still needed. + await Pack.writeActivationSidecar(tmp.path, { + pack_name: "pack-a", + activated_at: new Date().toISOString(), + mcp: [], + plugins: ["@scope/plugin@^1.0"], + instructions_file: null, + }) + + const packB: Pack.Info = { + name: "pack-b", + description: "shares the plugin", + version: "1.0.0", + location: "/nonexistent/pack-b/PACK.yaml", + tier: "community", + skills: [], + skill_groups: {}, + mcp: {}, + plugins: ["@scope/plugin@1.2.3"], + detect: [], + content: "", + } + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const result = await cleanupPackActivation(tmp.path, undefined, "pack-a", [packB]) + // Canonical @scope/plugin still needed by pack-b → keep it. + expect(result.pluginsCleaned).toBe(0) + + const config = await readConfig(configPath) + expect(config.plugin).toEqual(["@scope/plugin@^1.0", "@scope/plugin@1.2.3", "@other/plugin@^2.0"]) + }, + }) + }) + + test("removes plugins when no other active pack needs the canonical name", async () => { + await using tmp = await tmpdir({ git: true }) + const configPath = await writeOpenCodeConfig(tmp.path, { + plugin: ["@scope/plugin@^1.0", "@other/plugin@^2.0"], + }) + await Pack.writeActivationSidecar(tmp.path, { + pack_name: "pack-a", + activated_at: new Date().toISOString(), + mcp: [], + plugins: ["@scope/plugin@^1.0"], + instructions_file: null, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const result = await cleanupPackActivation(tmp.path, undefined, "pack-a", []) + expect(result.pluginsCleaned).toBe(1) + + const config = await readConfig(configPath) + expect(config.plugin).toEqual(["@other/plugin@^2.0"]) + }, + }) + }) +}) + +describe("cleanupPackActivation — sidecar missing (legacy fallback)", () => { + test("falls back to name-only MCP removal from the pack definition and flags sidecarMissing", async () => { + await using tmp = await tmpdir({ git: true }) + const configPath = await writeOpenCodeConfig(tmp.path, { + mcp: { "legacy-mcp": { type: "local", command: ["foo"] } }, + }) + const pack: Pack.Info = { + name: "legacy-pack", + description: "no sidecar", + version: "1.0.0", + location: "/nonexistent/legacy/PACK.yaml", + tier: "community", + skills: [], + skill_groups: {}, + mcp: { + "legacy-mcp": { type: "stdio", command: ["foo"] }, + }, + plugins: [], + detect: [], + content: "", + } + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const result = await cleanupPackActivation(tmp.path, pack, "legacy-pack", []) + expect(result.sidecarMissing).toBe(true) + expect(result.mcpCleaned).toBe(1) + + const config = await readConfig(configPath) + const mcp = config.mcp as Record + expect(mcp["legacy-mcp"]).toBeUndefined() + }, + }) + }) + + test("no-ops gracefully when there is no config file at all", async () => { + await using tmp = await tmpdir({ git: true }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const result = await cleanupPackActivation(tmp.path, undefined, "nonexistent-pack", []) + expect(result.mcpCleaned).toBe(0) + expect(result.pluginsCleaned).toBe(0) + expect(result.sidecarMissing).toBe(true) + // Importantly: we don't scaffold a fresh config file just to clean up + // nothing. Assert no .opencode/opencode.json was created. + const configPath = path.join(tmp.path, ".opencode", "opencode.json") + await expect(fs.access(configPath)).rejects.toThrow() + }, + }) + }) +}) + +describe("cleanupPackActivation — instructions file", () => { + test("removes pack-scoped instructions file and reports it", async () => { + await using tmp = await tmpdir({ git: true }) + const instructionsDir = path.join(tmp.path, ".opencode", "instructions") + await fs.mkdir(instructionsDir, { recursive: true }) + const instructionsFile = path.join(instructionsDir, "pack-docs-pack.md") + await fs.writeFile(instructionsFile, "Test instructions", "utf-8") + + await Pack.writeActivationSidecar(tmp.path, { + pack_name: "docs-pack", + activated_at: new Date().toISOString(), + mcp: [], + plugins: [], + instructions_file: path.relative(tmp.path, instructionsFile), + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const result = await cleanupPackActivation(tmp.path, undefined, "docs-pack", []) + expect(result.instructionsCleaned).toBe(true) + await expect(fs.access(instructionsFile)).rejects.toThrow() + }, + }) + }) +}) + +describe("Pack activation sidecar roundtrip", () => { + test("writeActivationSidecar → readActivationSidecar preserves all fields", async () => { + await using tmp = await tmpdir({ git: true }) + const sidecar: Pack.ActivationSidecar = { + pack_name: "rt-pack", + activated_at: "2026-04-17T12:00:00.000Z", + mcp: [ + ["server-a", JSON.stringify({ type: "local", command: ["a"] })], + ["server-b", JSON.stringify({ type: "remote", url: "https://b.example" })], + ], + plugins: ["@x/plugin@^1.0", "file:///local/plugin"], + instructions_file: ".opencode/instructions/pack-rt-pack.md", + } + await Pack.writeActivationSidecar(tmp.path, sidecar) + const round = await Pack.readActivationSidecar(tmp.path, "rt-pack") + expect(round).toEqual(sidecar) + }) + + test("deleteActivationSidecar removes the file and is idempotent", async () => { + await using tmp = await tmpdir({ git: true }) + await Pack.writeActivationSidecar(tmp.path, { + pack_name: "gone-pack", + activated_at: new Date().toISOString(), + mcp: [], + plugins: [], + instructions_file: null, + }) + await Pack.deleteActivationSidecar(tmp.path, "gone-pack") + const after = await Pack.readActivationSidecar(tmp.path, "gone-pack") + expect(after).toBeUndefined() + + // Second delete should not throw. + await Pack.deleteActivationSidecar(tmp.path, "gone-pack") + }) + + test("readActivationSidecar returns undefined for malformed JSON", async () => { + await using tmp = await tmpdir({ git: true }) + const sidecarDir = path.join(tmp.path, ".opencode", "pack-state") + await fs.mkdir(sidecarDir, { recursive: true }) + await fs.writeFile(path.join(sidecarDir, "bad.json"), "{ not: valid json }", "utf-8") + const result = await Pack.readActivationSidecar(tmp.path, "bad") + expect(result).toBeUndefined() + }) +}) + +describe("Pack.deactivate — legacy .altimate-code fallback", () => { + test("finds and updates active-packs in .altimate-code/ when .opencode/ has none", async () => { + await using tmp = await tmpdir({ git: true }) + // Only populate the legacy location. + const legacyDir = path.join(tmp.path, ".altimate-code") + await fs.mkdir(legacyDir, { recursive: true }) + const legacyFile = path.join(legacyDir, "active-packs") + await fs.writeFile(legacyFile, "legacy-pack\nother-pack\n", "utf-8") + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + await Pack.deactivate("legacy-pack") + const content = await fs.readFile(legacyFile, "utf-8") + expect(content.trim()).toBe("other-pack") + }, + }) + }) +}) diff --git a/packages/opencode/test/pack/pack.test.ts b/packages/opencode/test/pack/pack.test.ts index 2909fbf7df..3df6234882 100644 --- a/packages/opencode/test/pack/pack.test.ts +++ b/packages/opencode/test/pack/pack.test.ts @@ -314,6 +314,44 @@ describe("Pack manifest + integrity", () => { }) }) + test("manifest-vs-yaml metadata mismatch is flagged as tamper (name, version, tier)", async () => { + await using tmp = await tmpdir({ + git: true, + init: async (dir) => { + const packFile = await writePackFile(dir, { + name: "meta-pack", + version: "1.0.0", + tier: "community", + }) + // Write a manifest whose `tier` disagrees with PACK.yaml. This simulates + // an attempt to elevate trust by editing only the manifest, or to + // downgrade the yaml after install. + const matterMod = (await import("gray-matter")).default + const raw = await fs.readFile(packFile, "utf-8") + const parsed = matterMod("---\n" + raw + "\n---") + await Pack.writeManifest( + path.dirname(packFile), + packFile, + { + name: (parsed.data.name as string) || "meta-pack", + version: (parsed.data.version as string) || "1.0.0", + tier: "verified", // ← manifest claims verified, yaml says community + }, + ) + }, + }) + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const pack = await Pack.get("meta-pack") + expect(pack).toBeDefined() + // Metadata mismatch must trigger tamper detection even if content_hash matches. + expect(pack!.trust?.tamper_detected).toBe(true) + }, + }) + }) + test("packs without a manifest load without tamper detection (user-authored local packs)", async () => { await using tmp = await tmpdir({ git: true, From 09fe43156f80fba0d720c47afc8f62dd437c560d Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Fri, 17 Apr 2026 13:24:18 +0530 Subject: [PATCH 7/7] chore: add missing `instructions` field to Pack.Info test fixtures MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Root typecheck (`tsgo`) was stricter than bun test's runtime check and flagged the two fixture objects in `pack-cli.test.ts` for missing `instructions` — added as `undefined` so the fixtures match the Pack.Info schema shape exactly. No runtime behavior change. Co-Authored-By: Claude Opus 4.7 (1M context) --- packages/opencode/test/pack/pack-cli.test.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/opencode/test/pack/pack-cli.test.ts b/packages/opencode/test/pack/pack-cli.test.ts index da249b397f..71a27054fb 100644 --- a/packages/opencode/test/pack/pack-cli.test.ts +++ b/packages/opencode/test/pack/pack-cli.test.ts @@ -134,6 +134,7 @@ describe("cleanupPackActivation — plugin refcount by canonical name", () => { skill_groups: {}, mcp: {}, plugins: ["@scope/plugin@1.2.3"], + instructions: undefined, detect: [], content: "", } @@ -195,6 +196,7 @@ describe("cleanupPackActivation — sidecar missing (legacy fallback)", () => { "legacy-mcp": { type: "stdio", command: ["foo"] }, }, plugins: [], + instructions: undefined, detect: [], content: "", }