diff --git a/.claude/launch.json b/.claude/launch.json new file mode 100644 index 0000000..2ce26b0 --- /dev/null +++ b/.claude/launch.json @@ -0,0 +1,17 @@ +{ + "version": "0.0.1", + "configurations": [ + { + "name": "plex-api", + "runtimeExecutable": "py", + "runtimeArgs": ["run_dev.py"], + "port": 5000 + }, + { + "name": "web", + "runtimeExecutable": "npx", + "runtimeArgs": ["--prefix", "web", "vite", "web", "--port", "5174"], + "port": 5174 + } + ] +} diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..ea6a80a --- /dev/null +++ b/.env.example @@ -0,0 +1,45 @@ +# .env.example +# +# Copy this file to .env.local (which is gitignored) and fill in real values. +# bootstrap.py loads .env.local at startup so you don't have to set these +# variables in every shell. Real shell environment variables always win +# over .env.local via setdefault semantics. +# +# Get your Consumer Key and Consumer Secret from: +# https://developers.plex.com/ → My Apps → Fusion2Plex → Key + +# ── REQUIRED ──────────────────────────────────────────────────────── +PLEX_API_KEY=your-consumer-key-here +PLEX_API_SECRET=your-consumer-secret-here + +# ── OPTIONAL ──────────────────────────────────────────────────────── +# Override the target tenant. Defaults to the verified Grace Engineering +# production tenant. Set this only if you need to point at a different +# tenant for testing. +# PLEX_TENANT_ID=58f781ba-1691-4f32-b1db-381cdb21300c + +# Hit the test environment (test.connect.plex.com) instead of production +# (connect.plex.com). The Fusion2Plex app currently only exists in +# production, so leaving this unset is correct for normal use. +# PLEX_USE_TEST=1 + +# Allow mutating HTTP methods (POST/PUT/PATCH/DELETE) against production. +# OFF by default — every write to connect.plex.com affects real Grace +# manufacturing data. Set to 1 only when you intentionally want to write, +# and unset it as soon as you're done. +# PLEX_ALLOW_WRITES=1 + +# ── SUPABASE — Datum ingest layer (issue #31) ────────────────────── +# Dedicated `datum` project. Service role key bypasses RLS and is +# used by sync_supabase.py to populate the libraries, tools, and +# cutting_presets tables. NEVER ship the service role key to a browser. +SUPABASE_URL=https://.supabase.co +SUPABASE_SERVICE_ROLE_KEY=your-service-role-jwt-here + +# ── AUTODESK PLATFORM SERVICES (APS) — cloud tool libraries ────── +# Register an app at https://aps.autodesk.com → Applications → Create. +# App type: "Traditional Web App". Enable API: Data Management. +# Required scopes: data:read +APS_CLIENT_ID=your-aps-client-id-here +APS_CLIENT_SECRET=your-aps-client-secret-here +# APS_CALLBACK_URL=http://localhost:5000/api/aps/callback diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..d52594e --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,33 @@ +name: tests + +on: + pull_request: + branches: [master] + push: + branches: [master] + +jobs: + test: + name: pytest + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: pip + cache-dependency-path: | + requirements.txt + requirements-dev.txt + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements-dev.txt + + - name: Run pytest + run: pytest diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ccba9e4 --- /dev/null +++ b/.gitignore @@ -0,0 +1,47 @@ +__pycache__/ +*.pyc + +# dotenv — local secrets, NEVER commit +.env +.env.local +.env.*.local + +# APS OAuth tokens — local-only, contains access/refresh tokens +.aps_tokens.json + +# Node / frontend +node_modules/ + +# editor / IDE +.vscode/ +.idea/ +*.swp + +# Python tooling +.pytest_cache/ +.coverage +htmlcov/ +.tox/ +*.egg-info/ +build/ +dist/ + +# Large reference material (kept locally, not in git) +data/ + +# Runtime outputs (regenerated by extractors, not source of truth) +outputs/ + +# Claude Code worktree scratch space +.claude/worktrees/ + +# Ad-hoc probe scripts, one-off dumps, and similar throwaway work +scratch/ + +# Claude Code per-machine permissions cache (regenerated on demand) +.claude/settings.local.json + +# Plex mock — ephemeral capture data (POSTs the sync sent against the mock) +tools/plex_mock/captures/ +tools/plex_mock/*.db +tools/plex_mock/*.db-journal diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..c035b38 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,94 @@ +# Claude memory file + +This is the entry point for Claude Code (or any AI agent) working on this +repository. **Read these files in this order before doing anything**: + +1. **[`docs/BRIEFING.md`](./docs/BRIEFING.md)** — primary context document. Project + purpose, current credentials, current Plex environment, verified + endpoint matrix, gotchas, immediate TODO, "History of incorrect + hypotheses" postmortem, and a session log of what's been done. **This + is the most important file in the repo for AI context.** + +2. **[`docs/Plex_API_Reference.md`](./docs/Plex_API_Reference.md)** — verified URL + patterns, the 401-vs-404 reading guide, and the no-pagination gotcha. + Read this before writing any new Plex API call. + +3. **[`docs/Fusion360_Tool_Library_Reference.md`](./docs/Fusion360_Tool_Library_Reference.md)** + — Fusion JSON schema and field-to-Plex mapping. Read this before + writing anything that consumes the local Fusion library files. + +4. **[`docs/Postman_Collections.md`](./docs/Postman_Collections.md)** — + the two Postman collections (Plex API — Datum and Fusion 360 Tool + Libraries — Datum), including the full endpoint catalog with + verified-vs-probe status, the `[NS]` naming convention, the safe + write workflow, and how to add new requests via the Postman MCP. + Read this before touching the collections or before exploring a new + Plex namespace from the Postman UI. + +5. **[`TODO.md`](./TODO.md)** — project roadmap, links to GitHub Issues + for live status. + +6. **Datum Notion page** — https://www.notion.so/Grace-Engineering-Fusion2Plex-33c3160a3abf81f1aac0e58101952be5 + — **read at the start of every session.** Current State block tells + you phase, next action, and test count. End each session by updating + that block + appending one line to the Decision Log. See the + "Notion pages" section in `docs/BRIEFING.md` for details. + +7. **Supabase staging layer** — Fusion JSON is ingested into the + `libraries`, `tools`, and `cutting_presets` tables in the dedicated + `datum` Supabase project (us-east-2) before anything pushes to Plex. + Schema spec: [Notion · Supabase Schema Design](https://www.notion.so/33c3160a3abf814c885cc174cda76d17). + Code: `supabase_client.py`, `sync_supabase.py`, `scripts/load_sample.py`. + Credentials: `SUPABASE_URL` + `SUPABASE_SERVICE_ROLE_KEY` in `.env.local`. + Issue #31. Downstream (`build_supply_item_payload`, #3) reads from + the `tools` table, not raw JSON. + +## Hard rules + +- **Never read credentials from images.** Always have the user paste them + as text or via Insomnia "Generate Code" output. We learned this the + hard way (see BRIEFING.md "History of incorrect hypotheses §1"). +- **Never hardcode credentials.** They live in `.env.local` (gitignored), + loaded by `bootstrap.py`. Production deploy uses real shell env vars. +- **Never bypass the production write guard.** Mutating HTTP methods on + `connect.plex.com` are refused at `/api/plex/raw` unless + `PLEX_ALLOW_WRITES=1` is explicitly set in the environment. +- **Always run `pytest` before committing.** Branch protection on master + requires the `pytest` GitHub Actions check to pass before any merge. +- **Use the `claude/` branch naming convention** for new + branches off master, then auto-merge with `gh pr merge --auto --squash`. +- **Never ship the Supabase service role key to a browser.** It bypasses + RLS. Server-side ingest scripts only. + +## Quick commands + +```powershell +# Run the local endpoint tester (overrides shell env from .env.local) +py run_dev.py + +# Run tests +py -m pytest + +# Open a PR with auto-merge +gh pr create --base master --head claude/my-branch --title "..." --body "..." +gh pr merge --auto --squash +``` + +## Things this repo does NOT have + +- A test environment for the Datum Plex app — production is the + only environment we have credentials for. Be cautious. +- A CI badge or release versioning yet +- Any tooling-API endpoints — Plex's tool data lives under + `inventory/v1/inventory-definitions/supply-items`, NOT + `tooling/v1/*` or `mdm/v1/parts`. See BRIEFING.md. + +## When in doubt + +- The repo is small and the context fits in one read of BRIEFING.md + + Plex_API_Reference.md. Read them; don't guess. +- Claude Code has a built-in `tenant_whoami` diagnostic at + `/api/diagnostics/tenant` — run that first whenever the connection + state is unclear. +- Open a PR. CI is fast (~10s) and branch protection guarantees you + can't break master. diff --git a/Plex_API_Reference.md b/Plex_API_Reference.md deleted file mode 100644 index 8b27cad..0000000 --- a/Plex_API_Reference.md +++ /dev/null @@ -1,87 +0,0 @@ -# Grace Engineering: Plex Connect REST API Reference - -## 1. Overview - -This reference document synthesizes the discoveries from preliminary API testing and aligns them with the **Fusion 360 Tool Library Synchronization** architectural goals. It serves as the master guide for developers interacting with the Grace Engineering Plex instance (`plexonline.com`). - -*Note: Grace Engineering runs Plex Classic, MES+ enabled, supporting Prime Archery and Montana Rifle Company.* - ---- - -## 2. Authentication & Headers - -All Plex APIs are routed through the developer portal. There is no session token or OAuth flow; a static subscription key is passed via request headers. - -- **Developer Portal**: `https://developers.plex.com/` -- **Rate Limit**: 200 API calls per minute across all endpoints. -- **Base URL**: `https://connect.plex.com` (Production) / `https://test.connect.plex.com` (Test) - -**Required Header:** - -```http -X-Plex-Connect-Api-Key: -``` - -> [!WARNING] -> The API key **must** be in the Request Headers. Placing it as a URL parameter will result in a 401 Unauthorized error. - ---- - -## 3. Discovered Endpoints & Subscription Status - -The target architecture requires pushing Fusion 360 data to the Tooling/Workcenter endpoints. Initial discovery revealed that certain API collections require activation by IT. - -### ✅ Working Endpoints - -| Collection | Endpoint | Purpose | -|---|---|---| -| Master Data | `mdm/v1/parts` | Returns master part records. Confirmed working. | -| Master Data | `mdm/v1/suppliers` | Returns supplier UUIDs (e.g., MSC Industrial). | -| Purchasing | `purchasing/v1/purchase-orders` | Returns full PO headers (e.g., tooling orders from MSC). | -| Production | `production/v1/control/workcenters` | Discovered on Dev Portal. Replaces old 404 manufacturing endpoint. | - -### ⚠️ Blocked Endpoints (Action Required) -> -> [!IMPORTANT] -> **ACTION REQUIRED**: IT (Courtney) must enable the **Tooling** and **Manufacturing** API collections for the currently active App in the Plex Developer Portal. Initial testing returned 403 authorization failures. The Tooling endpoint documentation remains completely hidden from the public developer portal until you authenticate with a subscribed developer account. - -- `tooling/v1/tools` -- `tooling/v1/tool-assemblies` -- `tooling/v1/tool-inventory` - ---- - -## 4. Current Tooling Data Flow (Fusion 360 to Plex) - -While waiting for the Tooling APIs to be activated, data can be managed in two ways: - -1. **REST API Automation (Target State)** - - A scheduled script parses the network share `BROTHER SPEEDIO ALUMINUM.json` library. - - Extracts `product-id`, `vendor`, and geometry. - - Pushes payloads to `tooling/v1/tool-assemblies` to update the master inventory list. - - Pushes payload to `production/v1/control/workcenters` utilizing the `post-process.number` to ensure correct turret/pocket placement. - -2. **CSV Upload System (Interim State)** - - Without API access, engineering relies on bulk CSV uploads. - - Sequence: **Tool Assembly Upload** ➔ **Tool Inventory Upload** ➔ **Tool BOM Upload** ➔ **Routing Upload**. - - Ensure the *Tool Assembly Type* picklist exists in Plex before attempting uploads. - ---- - -## 5. Machine Integration (DNC Overview) - -Outside of the Plex database, NC programs and tool alignments require pushing to physical machines on the floor: - -- **Brother Speedio (879/880)**: Native FTP integration (`192.168.25.79`, `192.168.25.80`). Scripts can push programs directly via standard FTP. -- **Citizen / Tsugami**: Connected via Moxa NPort 5150/5250 converting RS-232 to TCP/IP. -- **Haas VMCs**: Native Ethernet on Sigma 5 boards. - -*Plex DCS acts as the source-of-truth for NC programs natively; DNC protocols transfer them to machines just-in-time.* - ---- - -## 6. Known Issues & Development Gotchas - -- **Supplier UUIDs**: The `supplierId` in API responses is a UUID, NOT the supplier code (i.e. MSC is not `MSC001`). You must query the MDM endpoint to resolve vendor names to their internal UUIDs. -- **PO Filters**: Filtering by `type` strings containing spaces (`MRO SUPPLIES`) requires proper URL encoding (`%20`). Undetected encoding issues will result in zero-record responses rather than explicit HTTP errors. -- **PowerShell Curl**: Do not use the alias `curl` in PowerShell scripts. Use `Invoke-RestMethod` to guarantee proper header passage and JSON native ingestion. diff --git a/README.md b/README.md index 109c8b9..770abf4 100644 --- a/README.md +++ b/README.md @@ -1,33 +1,154 @@ -# Plex API Integration: Fusion 360 Tool Sync +# Datum — Fusion 360 → Plex tooling sync for Grace Engineering -`plex-api` is a project designed to automate the synchronization of tooling data between Autodesk Fusion 360 and the Plex Manufacturing Cloud (Rockwell Automation) for **Grace Engineering**. +Nightly automation that syncs Autodesk Fusion 360 tool library data into Rockwell Automation Plex +Manufacturing Cloud (ERP). Fusion 360 JSON files on a local network share are the absolute source +of truth. As of the April 2026 architecture pivot, Fusion data lands first in a Supabase +database (enriched source of truth — geometry, holder pairings, pocket assignments) and then an +identity slice (vendor part number + description) is pushed on to Plex's `supply-items` endpoint. +The React UI reads from Supabase; Plex gets only what its schema can accept. -## 🎯 Architecture & Primary Goal +## Status -The overarching goal of this project is to maintain an up-to-date tooling inventory without manual data entry. **Crucially, the Autodesk Fusion 360 tool library files act as the single source of truth for all tooling data entering Plex.** +| | | +|---|---| +| **Plex environment** | `connect.plex.com` (production) — there is no test environment for this app | +| **Plex app** | `Datum` Consumer Key, expires every 31 days (next rotation: 2026-05-08, issue #12) | +| **Plex tenant** | `58f781ba-1691-4f32-b1db-381cdb21300c` (Grace Engineering) | +| **Tooling endpoint** | `inventory/v1/inventory-definitions/supply-items` filtered to `category="Tools & Inserts"` (1,109 records) | +| **Workcenters** | `production/v1/production-definitions/workcenters` (143 records, including 21 mills mapping directly to Brother Speedio FTP IPs) | +| **Supabase** | dedicated `datum` project (us-east-2), 3 tables — `libraries`, `tools`, `cutting_presets` | +| **Phase** | **Phase B complete** — `validate_library.py` pre-sync gate landed (#25). Phase A-Python (Supabase upsert layer) is next. | +| **Tests** | 215 pytest tests, all green. CI on PRs to master via GitHub Actions. Branch protection requires the check to pass. | -**The 30,000-Foot View & Industry Standard Data Flow:** +## Architecture -1. **Source Data (Source of Truth)**: Autodesk Fusion 360 maintains a tool library stored as `.json` files on a local network share. -2. **Component Hierarchy (Consumables First)**: In standard tooling management, Tool Assemblies are made up of purchased components. The script's initial focus is on the **consumable cutting tools** (e.g., end mills, drills) purchased from suppliers (tracked as purchased parts via POs). -3. **Plex Linkage & Traceability**: These consumable purchased parts are linked to Tool Assemblies. Tool Assemblies are then linked to specific Routings/Operations. When an operation runs on the shop floor, it generates a Job, which ultimately produces the manufactured Part. -4. **Scheduled Sync**: A script runs automatically every day at midnight. -5. **Plex Updates**: The script reads the Fusion 360 JSON files and pushes the data to Plex via its REST API, performing key actions: - - Updates the tooling inventory in the master list, focusing on connecting purchased consumables to assemblies. - - Updates the tooling in the respective workcenter document (`production/v1/control/workcenters`). -6. **Data Management**: For simplicity, state management and data files are maintained on the network shares using file overwriting. +``` +Fusion 360 JSON (network share, via Autodesk Desktop Connector) + │ + ▼ + validate_library.py ← pre-sync gate: abort if library is invalid (#25) + │ + ▼ + sync_supabase.py ← upsert full tool records into Supabase [Phase A-Python] + │ + ├──▶ Supabase (bulletforge) ← enriched source of truth + │ │ + │ └──▶ React UI ← tool library browser [Phase D+] + │ + └──▶ sync_plex.py ← identity slice only → supply-items [Phase C] +``` -## 📚 Resources +Why the pivot: Plex's `supply-items` schema is identity-only — vendor part number and description, +nothing else. Geometry, holder pairings, and pocket assignments have no home in Plex. Supabase +holds the full record; Plex gets the slice it can accept. -The Plex API is a modern, RESTful service utilizing JSON for data exchange. This integration will map local JSON structures to the cloud API. Note: Due to explicit IAM role permissions, certain Tooling endpoints are hidden from the developer portal until subscribed. +The original plan to write to `mdm/v1/parts` and `tooling/v1/tool-assemblies` was incorrect — see +[BRIEFING.md "History of incorrect hypotheses"](./docs/BRIEFING.md) for the postmortem. -- **Official Documentation**: [Plex Manufacturing Cloud API](https://www.rockwellautomation.com/en-us/support/plex-manufacturing-cloud/api.html) -- **Project Roadmap**: See [TODO.md](./TODO.md) for step-by-step implementation tasks. -- **Reference**: See [Plex API Reference](./Plex_API_Reference.md) for endpoints, auth routing, and DNC details. -- **Data Mapping**: See [Fusion360 Reference](./Fusion360_Tool_Library_Reference.md) for data extraction rules. +## Quick start (local development) -## 🚀 Postman Testing +1. **Clone and create your `.env.local`** -We use **[Postman](https://www.postman.com/)** for upfront API discovery and management demonstrations. + ```powershell + git clone https://github.com/grace-shane/datum.git + cd datum + copy .env.example .env.local + # Edit .env.local with your Datum Consumer Key + Secret + ``` -By saving queries to a Postman Collection, we can manually verify the exact structure needed to push inventory updates and workcenter document updates to Plex before writing the final automation script. + `.env.local` is gitignored. Get the Consumer Key from + [developers.plex.com](https://developers.plex.com/) → My Apps → Datum. + +2. **Install dependencies** + + ```powershell + py -m pip install -r requirements-dev.txt + ``` + +3. **Run the local endpoint tester** + + ```powershell + py run_dev.py + ``` + + Opens on http://localhost:5000. The left rail has buttons for: + - **Diagnostics** — `tenant_whoami` (run this first to verify connection) + - **Plex presets** — verified Plex API URLs as one-click hits + - **Extractors** — `extract_supply_items` (1,109 cutting tools), `extract_parts`, `extract_purchase_orders`, etc. + - **Fusion 360 local** — `tools_stats` and `consumables_only` for verifying the local Fusion library load + + `run_dev.py` overrides shell environment variables with `.env.local` (the opposite of + `bootstrap.py`'s production-safe `setdefault` semantics), so a stale system env var won't + silently shadow your real key. + +4. **Run tests** + + ```powershell + py -m pytest + ``` + +5. **Validate a Fusion library before syncing** + + ```powershell + # Production mode — PASS/FAIL only, exit code 0 or 1 + py validate_library.py --file "BROTHER SPEEDIO ALUMINUM.json" --no-api + + # Verbose — shows WARN issues too + py validate_library.py --file "BROTHER SPEEDIO ALUMINUM.json" --no-api --verbose + + # With live Plex supplier lookup for VENDOR_NOT_IN_PLEX checks + py validate_library.py --file "BROTHER SPEEDIO ALUMINUM.json" --verbose + ``` + + The validator catches duplicate product-ids, missing required fields, non-positive geometry, + unknown tool types, and vendors that won't resolve to a Plex supplier. The sync layer gates on + a PASS; FAILs abort the sync before anything touches Supabase or Plex. Full rule table in + [`docs/validate_library_spec.md`](./docs/validate_library_spec.md). + +## Production safety + +This codebase reads from real Grace Engineering production data on every API call. Two guard rails +protect against accidental writes: + +- **`PlexClient.get_envelope()`** returns structured success/error envelopes so HTTP failures + are visible (PR #15 fixed an earlier "swallow on error" bug). +- **`/api/plex/raw` proxy refuses POST/PUT/PATCH/DELETE** when running against + `connect.plex.com` unless `PLEX_ALLOW_WRITES=1` is set in the environment (PR #17). Read-only + is always allowed. To enable writes: + + ```powershell + $env:PLEX_ALLOW_WRITES = "1" + py run_dev.py + ``` + + The UI shows a red `WRITES ON` chip when the guard is disabled. Rotate the env var off as soon + as you're done. + +## Key references + +- [`docs/BRIEFING.md`](./docs/BRIEFING.md) — primary context document for AI-assisted dev sessions and the + source of truth for current status, current credentials, gotchas, and project history +- [`docs/Plex_API_Reference.md`](./docs/Plex_API_Reference.md) — verified endpoint access matrix and URL + pattern conventions +- [`docs/Fusion360_Tool_Library_Reference.md`](./docs/Fusion360_Tool_Library_Reference.md) — Fusion JSON + schema and field-to-Plex mapping +- [`docs/validate_library_spec.md`](./docs/validate_library_spec.md) — design spec for the pre-sync + validation gate; implemented as `validate_library.py` (#25) +- [`TODO.md`](./TODO.md) — project roadmap mirrored to GitHub Issues +- [GitHub Issues](https://github.com/grace-shane/datum/issues) — live status of every phase work + item with dependencies and blockers +- [Plex Manufacturing Cloud API docs](https://www.rockwellautomation.com/en-us/support/plex-manufacturing-cloud/api.html) + +## Contributing workflow + +1. Branch from `master` +2. Push to a `claude/` branch (or any branch — naming is convention, not enforced) +3. Open a PR to `master` +4. CI runs `pytest` automatically +5. Branch protection blocks merge until the check is green +6. Use `gh pr merge --auto --squash` to enable auto-merge — it lands the PR the moment CI passes + +## License + +Internal Grace Engineering project. Forked from +[`just-shane/plex-api`](https://github.com/just-shane/plex-api). diff --git a/TODO.md b/TODO.md index a3ca46c..a63c887 100644 --- a/TODO.md +++ b/TODO.md @@ -1,7 +1,10 @@ -# Project Roadmap: Fusion 360 to Plex Sync +# Project Roadmap: Datum — Fusion 360 → Plex Tooling Sync This document outlines the step-by-step implementation plan for the Autodesk Fusion 360 tool library to Plex Manufacturing Cloud synchronization project. +> **Live tracking:** All unchecked items below are mirrored as GitHub Issues. +> See for current status, comments, and blockers. + ## Phase 1: API Discovery & Authentication - [x] Set up Postman and discover relevant Plex API endpoints. @@ -17,26 +20,63 @@ This document outlines the step-by-step implementation plan for the Autodesk Fus ## Phase 3: Plex API Source-of-Truth Implementation -- [ ] Implement API call to retrieve current tooling inventory from Plex (master list) to prep for overwrite. -- [ ] Implement API call to update/create purchased parts (focused first on **consumables** like cutting tools) in Plex. -- [ ] Implement API call to create/update Tool Assemblies, assigning the purchased consumable parts to them. -- [ ] Implement API call to link Tool Assemblies to Routings/Operations. -- [ ] Implement API call to update tooling within the specific Workcenter Document (`production/v1/control/workcenters`). -- [ ] **BLOCKED**: Waiting on IT (Courtney) to enable Tooling & Manufacturing APIs in the Developer Portal. +> **Ordering rule (2026-04-17):** every real write to `connect.plex.com` in +> this phase ships **last** and is blocked on +> [#92](https://github.com/grace-shane/Datum/issues/92) — the Plex-mimic mock +> HTTP server. Nothing POSTs/PUTs/PATCHes against the live tenant until the +> mimic has run clean for a documented validation window. See `MEMORY.md` in +> the Claude memory folder for the full rationale. + +- [x] **DONE (PR #21).** Implement API call to retrieve current tooling inventory — `extract_supply_items(client)` in `plex_api.py` hits `inventory/v1/inventory-definitions/supply-items` (2,516 records), filters to `category="Tools & Inserts"` (1,109 records), and writes a CSV snapshot to `outputs/`. Verified live: 30 KB response, 1.4s round trip. → [#2](https://github.com/grace-shane/datum/issues/2) *(closed)* +- [ ] Implement API call to upsert supply-items — payload compute, staging, post-sync hook, and UI have all landed (PRs #82 / #84 / #90; issues #79 / #80 / #81 closed). Remaining work is the HTTP POST itself, which ships **last** behind the Plex-mimic mock. → [#3](https://github.com/grace-shane/Datum/issues/3) **— blocked on [#92](https://github.com/grace-shane/Datum/issues/92)** +- [ ] Implement Tool Assembly handling — **blocked on Classic Web Services access.** Plex REST supply-items are identity-only; Classic `Part_Operation` Data Sources are the likely path. See BRIEFING §"Classic Web Services" and `docs/Plex_Classic_API_Request.md`. → [#4](https://github.com/grace-shane/datum/issues/4) +- [ ] Implement API call to link tools to Routings/Operations — **blocked on Classic Web Services access.** REST `mdm/v1/operations` has no FK to tools; `scheduling/v1/jobs` deep-dive (114,684 records) confirmed zero tool/operation FKs. → [#5](https://github.com/grace-shane/datum/issues/5) +- [ ] Implement API call to update tooling within the specific Workcenter Document — GET workcenter now verified (PR #20) with a Brother Speedio `workcenterCode` → `workcenterId` map. Writes (PUT/PATCH support) are the unknown — investigation happens **against the Plex-mimic mock (#92) first, not the live tenant**, then ships last. Classic DCS_v2 remains a separate fallback path. → [#6](https://github.com/grace-shane/Datum/issues/6) **— blocked on [#92](https://github.com/grace-shane/Datum/issues/92)** +- [x] **IT blocker resolved.** The Datum app on production with the Grace tenant authenticates correctly. The earlier "tenant routing" / "subscription approvals" investigation was a red herring caused by a credential typo. See BRIEFING.md "History of incorrect hypotheses" for the postmortem. → [#1](https://github.com/grace-shane/datum/issues/1) ## Phase 4: Data Mapping & Sync Logic - [x] Create a mapping definition between Fusion 360 data structures and Plex API payload requirements (Completed in `Fusion360_Tool_Library_Reference.md`). -- [ ] Implement the core synchronization logic: +- [ ] Implement the core synchronization logic: → [#7](https://github.com/grace-shane/datum/issues/7) - Utilize the Fusion JSON file output as the explicit Source of Truth relative to Plex. - Push updates for purchased consumables to the master inventory list. - Link those consumables into Tool Assemblies. - Ensure those assemblies dynamically flow down to the Routing and then the Job when run in the shop, linking tools directly to manufactured parts. - Push final setups to the workcenter documents. -- [ ] Add basic error handling and logging (e.g., logging successful syncs or failed API calls to a text file on the network share). +- [ ] Add basic error handling and logging (e.g., logging successful syncs or failed API calls to a text file on the network share). → [#8](https://github.com/grace-shane/datum/issues/8) ## Phase 5: Automation & Deployment -- [ ] Finalize the synchronization script. -- [ ] Deploy the script to a server or always-on PC with access to the network share. -- [ ] Schedule the script to run daily at midnight (e.g., using Windows Task Scheduler). +- [x] **DONE (PR #44).** Finalize the synchronization script — `sync.py` nightly CLI entrypoint + `pyproject.toml` packaging. → [#9](https://github.com/grace-shane/datum/issues/9) *(closed)* +- [x] **DONE (PR #47).** Deploy the script — nightly sync runs on an always-on host. → [#10](https://github.com/grace-shane/datum/issues/10) *(closed)* +- [x] **DONE (PR #47).** Schedule the script to run nightly at midnight. → [#11](https://github.com/grace-shane/datum/issues/11) *(closed)* +- [x] **DONE (PR #33).** Rotate the Plex API key — old key from git history no longer authenticates; the `Datum` Consumer Key is current. Next rotation deadline 2026-05-08 tracked separately. → [#12](https://github.com/grace-shane/datum/issues/12) *(closed)* + +--- + +## Built beyond the original roadmap + +Work that has landed since the Phase 1–5 roadmap was written, tracked via GitHub Issues and not part of the original plan: + +- **Supabase staging layer (#31, PR #32 + #34)** — `libraries` / `tools` / `cutting_presets` tables on a dedicated Supabase DB. Fusion JSON ingests here first; Plex gets only the identity slice. Table prefix `fusion2plex_` was removed in PR #34 once the DB isolation made it redundant. +- **APS cloud integration (PR #43)** — `aps_client.py` pulls tool libraries from Autodesk Platform Services. `sync.py` is now APS-first with local ADC as fallback; ADC removal is tracked under the GCP migration epic ([#85](https://github.com/grace-shane/Datum/issues/85)). +- **Pre-sync validation gate (#25, PR #28)** — `validate_library.py` with CLI / programmatic / Flask entry points per `docs/validate_library_spec.md`. Gates every sync run. +- **React UI (PR #41 + subsequent)** — tool browser, library browser, Scripts page, last-sync indicator. Deployed to Cloudflare Workers (PR #70). +- **Vendor reference catalog + geometry-based enrichment (PR #48)** — `enrich.py`, wired upstream in the sync pipeline (PR #54). +- **Plex `plex_supply_items` staging pipeline (sprint: #79/#80/#81/#67/#76, PRs #82 + #84)** — prerequisite for #3 upsert work. +- **Tool inventory qty sync (#75, PRs #77 + #78)** — Plex → Supabase qty cache. +- **Classic Web Services discovery (PR #42)** — documented the SOAP path at `plexonline.com/Modules/Xmla/XmlDataSource.asmx` that can unblock #4 / #5 / #6. Access request pending; see `docs/Plex_Classic_API_Request.md`. + +## Phase 6: GCP migration (umbrella [#85](https://github.com/grace-shane/Datum/issues/85)) + +Move Datum off Supabase + Autodesk Desktop Connector and onto GCP + the Autodesk Platform Services HTTP API. Architecture and affected-code map live in [`docs/GCP_MIGRATION.md`](./docs/GCP_MIGRATION.md). + +- [ ] Provision GCP (`datum-dev` e2-standard-2, `datum-runtime` e2-micro, Cloud SQL `db-f1-micro`, Secret Manager) +- [ ] Apply schema to Cloud SQL (bare table names, matches current Supabase) +- [ ] `bootstrap.py` Secret Manager loader path (additive) +- [ ] `db_client.py` — drop-in replacement for `supabase_client.py` +- [ ] Replace/refactor `tool_library_loader.py` to APS-backed; remove local-ADC fallback branch in `sync.py` +- [ ] Update Flask `/api/fusion/validate` GET + `/api/fusion/libraries` to pull from APS +- [ ] Cloud Scheduler — nightly sync + dev VM start/stop +- [ ] Cloudflare DNS — `datum.graceops.dev` +- [ ] Decom Supabase + strip ADC references from docs diff --git a/app.py b/app.py new file mode 100644 index 0000000..44e6986 --- /dev/null +++ b/app.py @@ -0,0 +1,947 @@ +from flask import Flask, render_template, jsonify, request +import os +import sys +import json +import time +import traceback +import requests + +# Force stdout to UTF-8 so prints with non-ASCII characters (em dashes, +# arrows, summary glyphs) don't blow up Flask request handlers on a +# Windows cp1252 console. Without this, any print() containing → or — +# raises UnicodeEncodeError mid-request and turns into a 500. +try: + sys.stdout.reconfigure(encoding="utf-8") +except Exception: + pass + +# Import our existing scripts +from plex_api import ( + PlexClient, + API_KEY, + API_SECRET, + TENANT_ID, + USE_TEST, + BASE_URL as PLEX_PROD_URL, + discover_all, + extract_parts, + extract_purchase_orders, + extract_workcenters, + extract_operations, + extract_supply_items, + TOOLING_CATEGORY, +) +from tool_library_loader import load_all_libraries, CAM_TOOLS_DIR +from plex_diagnostics import tenant_whoami, list_tenants, get_tenant +from validate_library import validate_library, ValidationMode +from aps_client import ( + APSClient, + APSConfigError, + APSAuthError, + APSHTTPError, + APS_CLIENT_ID, +) +from sync_supabase import sync_library +from supabase_client import SupabaseClient + +app = Flask(__name__) + +# Initialize Plex Client +client = PlexClient( + api_key=API_KEY, + api_secret=API_SECRET, + tenant_id=TENANT_ID, + use_test=USE_TEST, +) + +# ───────────────────────────────────────────── +# Production write guard +# ───────────────────────────────────────────── +# Read-only methods are always allowed. Mutating methods (POST/PUT/PATCH/ +# DELETE) are blocked when running against a non-test Plex environment +# (connect.plex.com), unless the operator explicitly opts in by setting +# PLEX_ALLOW_WRITES=1 in the environment. +# +# This guard exists because the Fusion2Plex app currently has read access +# to real Grace Engineering production data. A casual write — even one +# triggered by a stray click in the UI — could affect actual manufacturing +# operations. +# +# To enable writes: +# $env:PLEX_ALLOW_WRITES = "1" # PowerShell +# export PLEX_ALLOW_WRITES=1 # bash +# Then restart the server. The /api/config endpoint will reflect the change. +WRITES_ALLOWED = os.environ.get("PLEX_ALLOW_WRITES", "").strip().lower() in ( + "1", "true", "yes", "on", "enabled", +) + + +def _is_production_base(base: str) -> bool: + """True iff ``base`` is the real Plex production endpoint. + + Exact match (case-insensitive, trailing slash insensitive) against + PLEX_PROD_URL. Anything else — test.connect.plex.com, the local + Plex-mimic mock at PLEX_BASE_URL, an unrecognised proxy — is treated + as non-production so `_is_write_blocked` returns False. + + The earlier ``"test." not in client.base`` heuristic was operator- + controllable: a PLEX_BASE_URL containing "test." silently disarmed + the guard (#96 review follow-up). This strict match fails closed. + """ + return base.rstrip("/").lower() == PLEX_PROD_URL.rstrip("/").lower() + + +IS_PRODUCTION = _is_production_base(client.base) +WRITE_METHODS = {"POST", "PUT", "PATCH", "DELETE"} + + +def _is_write_blocked(method: str) -> tuple[bool, str]: + """ + Returns (blocked, reason). True if a write request should be refused. + """ + if method.upper() not in WRITE_METHODS: + return False, "" + if not IS_PRODUCTION: + return False, "" + if WRITES_ALLOWED: + return False, "" + return True, ( + f"Write blocked: {method} requests to {client.base} are refused " + f"because the server is running against a production Plex environment " + f"and PLEX_ALLOW_WRITES is not set. To enable writes, set " + f"PLEX_ALLOW_WRITES=1 in the environment and restart the server." + ) + + +@app.route('/') +def index(): + """Serve the main dashboard HTML.""" + return render_template('index.html') + + +# ───────────────────────────────────────────── +# Raw proxy — lets the UI hit ANY Plex endpoint +# through the authenticated PlexClient without +# ever exposing credentials to the browser. +# ───────────────────────────────────────────── +@app.route('/api/plex/raw', methods=['GET', 'POST', 'PUT', 'DELETE', 'PATCH']) +def api_plex_raw(): + """ + Proxy an arbitrary Plex REST call. + + Query params (for the tester): + path — full path after the base URL, e.g. "mdm/v1/parts" + ... — all other query params are forwarded as-is to Plex + + For non-GET, JSON body from the client is forwarded as-is. + Always returns {status, http_status, elapsed_ms, size_bytes, headers, body}. + """ + path = (request.args.get('path') or '').strip().lstrip('/') + if not path: + return jsonify({ + "status": "error", + "message": "Missing required 'path' query param (e.g. mdm/v1/parts)", + }), 400 + + method = request.method.upper() + + # Production write guard — refuse mutating methods unless explicitly enabled + blocked, reason = _is_write_blocked(method) + if blocked: + return jsonify({ + "status": "error", + "http_status": 0, + "method": method, + "url": f"{client.base}/{path}", + "message": reason, + "guard": "PLEX_ALLOW_WRITES", + "is_production": IS_PRODUCTION, + "writes_allowed": WRITES_ALLOWED, + }), 403 + + # Forward all query params EXCEPT our own 'path' marker. + forwarded_params = {k: v for k, v in request.args.items() if k != 'path'} + + url = f"{client.base}/{path}" + + body = None + if method in ('POST', 'PUT', 'PATCH'): + body = request.get_json(silent=True) + + started = time.perf_counter() + try: + r = requests.request( + method=method, + url=url, + headers=client.headers, + params=forwarded_params, + json=body, + timeout=30, + ) + elapsed_ms = int((time.perf_counter() - started) * 1000) + + # Try to parse JSON, fall back to text + try: + parsed = r.json() + except ValueError: + parsed = r.text + + return jsonify({ + "status": "success" if r.ok else "error", + "http_status": r.status_code, + "http_reason": r.reason, + "elapsed_ms": elapsed_ms, + "size_bytes": len(r.content), + "url": r.url, + "method": method, + "headers": dict(r.headers), + "body": parsed, + }) + except requests.exceptions.RequestException as e: + elapsed_ms = int((time.perf_counter() - started) * 1000) + return jsonify({ + "status": "error", + "http_status": 0, + "elapsed_ms": elapsed_ms, + "url": url, + "method": method, + "message": str(e), + }), 502 + + +@app.route('/api/plex/discover') +def api_discover(): + """Run discover_all on Plex.""" + try: + report = discover_all(client) + return jsonify({"status": "success", "data": report}) + except Exception as e: + return jsonify({"status": "error", "message": str(e), "trace": traceback.format_exc()}), 500 + + +# ───────────────────────────────────────────── +# Diagnostics — read-only sanity checks +# ───────────────────────────────────────────── +@app.route('/api/diagnostics/tenant') +def api_diagnostics_tenant(): + """ + Composite tenant diagnostic. + + Calls /mdm/v1/tenants and (if a TENANT_ID is configured) /mdm/v1/tenants/{id}, + then compares the result against the known Grace and G5 UUIDs so the UI can + show a clear "is this the right tenant?" status. Read-only and safe. + """ + try: + report = tenant_whoami(client, TENANT_ID) + return jsonify({"status": "success", "data": report}) + except Exception as e: + return jsonify({"status": "error", "message": str(e), "trace": traceback.format_exc()}), 500 + + +@app.route('/api/diagnostics/tenants/list') +def api_diagnostics_tenants_list(): + """Raw GET /mdm/v1/tenants — list all tenants visible to the credential.""" + try: + data = list_tenants(client) + return jsonify({"status": "success", "data": data}) + except Exception as e: + return jsonify({"status": "error", "message": str(e), "trace": traceback.format_exc()}), 500 + + +@app.route('/api/diagnostics/tenants/') +def api_diagnostics_tenant_get(tenant_id): + """Raw GET /mdm/v1/tenants/{id} — fetch a single tenant by UUID.""" + try: + data = get_tenant(client, tenant_id) + return jsonify({"status": "success", "data": data}) + except Exception as e: + return jsonify({"status": "error", "message": str(e), "trace": traceback.format_exc()}), 500 + + +@app.route('/api/plex/') +def api_extract(endpoint_type): + """Run one of the extraction tools.""" + try: + if endpoint_type == 'parts': + data = extract_parts(client) + elif endpoint_type == 'purchase_orders': + data = extract_purchase_orders(client, date_from="2025-01-01") + elif endpoint_type == 'workcenters': + data = extract_workcenters(client) + elif endpoint_type == 'operations': + data = extract_operations(client) + elif endpoint_type == 'supply_items': + data = extract_supply_items(client) + else: + return jsonify({"status": "error", "message": "Unknown endpoint"}), 400 + + return jsonify({ + "status": "success", + "count": len(data) if data else 0, + "data": data[:100] if data else [] # Return first 100 for UI performance + }) + except Exception as e: + return jsonify({"status": "error", "message": str(e), "trace": traceback.format_exc()}), 500 + + +@app.route('/api/fusion/tools', methods=['GET', 'POST']) +def api_fusion_tools(): + """Load Fusion 360 libraries.""" + try: + libs = {} + if request.method == 'POST': + for key, uploaded_file in request.files.items(): + if uploaded_file.filename.endswith('.json'): + content = uploaded_file.read().decode('utf-8') + try: + raw = json.loads(content) + if 'data' in raw and isinstance(raw['data'], list): + libs[uploaded_file.filename.replace('.json', '')] = raw['data'] + except Exception as e: + print(f"Error parsing {uploaded_file.filename}: {e}") + else: + abort_on_stale = request.args.get('abort_on_stale', 'true').lower() == 'true' + libs = load_all_libraries(abort_on_stale=abort_on_stale) + + # Transform the dict of libraries into a UI-friendly list + summary = [] + for name, tools in libs.items(): + summary.append({ + "library_name": name, + "tool_count": len(tools), + "tools_sample": tools[:5] # Send a sample for the UI + }) + + return jsonify({ + "status": "success", + "library_count": len(libs), + "data": summary + }) + except Exception as e: + return jsonify({"status": "error", "message": str(e), "trace": traceback.format_exc()}), 500 + + +# ───────────────────────────────────────────── +# Fusion 360 testing-harness endpoints +# ───────────────────────────────────────────── +# These expose Fusion JSON data via Flask routes so the UI rail can poke +# at the local tool libraries without re-uploading. Read-only on the +# network share via tool_library_loader. + +# Tool types we exclude from the sync per BRIEFING spec — holders are +# the geometric collision shapes, probes are measurement devices, neither +# represent purchasable cutting tools. +NON_CONSUMABLE_TYPES = {"holder", "probe"} + + +@app.route('/api/fusion/tools/stats') +def api_fusion_tools_stats(): + """ + Type and vendor distribution across all loaded Fusion libraries. + + Useful for verifying load before any sync work — confirms how many + tools/holders/probes the loader saw and which vendors are represented. + """ + try: + libs = load_all_libraries(abort_on_stale=True) + + per_library = [] + global_types = {} + global_vendors = {} + total_records = 0 + consumable_count = 0 + + for name, tools in libs.items(): + type_counts = {} + for t in tools: + tool_type = (t.get("type") or "unknown").strip().lower() + type_counts[tool_type] = type_counts.get(tool_type, 0) + 1 + global_types[tool_type] = global_types.get(tool_type, 0) + 1 + vendor = (t.get("vendor") or "unknown").strip() + global_vendors[vendor] = global_vendors.get(vendor, 0) + 1 + total_records += 1 + if tool_type not in NON_CONSUMABLE_TYPES: + consumable_count += 1 + per_library.append({ + "library_name": name, + "tool_count": len(tools), + "type_counts": type_counts, + }) + + return jsonify({ + "status": "success", + "library_count": len(libs), + "total_records": total_records, + "consumable_count": consumable_count, + "non_consumable_count": total_records - consumable_count, + "global_type_counts": global_types, + "global_vendor_counts": global_vendors, + "per_library": per_library, + }) + except Exception as e: + return jsonify({"status": "error", "message": str(e), "trace": traceback.format_exc()}), 500 + + +@app.route('/api/fusion/tools/consumables') +def api_fusion_tools_consumables(): + """ + Return the list of Fusion tools to actually push to Plex + (excluding holders and probes). + + This is the input to ``build_supply_item_payload(fusion_tool)`` in + issue #3. The returned list contains only the fields the Plex sync + will care about: vendor, product-id, description, type, guid. + """ + try: + libs = load_all_libraries(abort_on_stale=True) + + consumables = [] + for library_name, tools in libs.items(): + for t in tools: + tool_type = (t.get("type") or "").strip().lower() + if tool_type in NON_CONSUMABLE_TYPES: + continue + consumables.append({ + "library_name": library_name, + "guid": t.get("guid"), + "type": t.get("type"), + "vendor": t.get("vendor"), + "product_id": t.get("product-id"), + "description": t.get("description"), + }) + + return jsonify({ + "status": "success", + "count": len(consumables), + "data": consumables, + }) + except Exception as e: + return jsonify({"status": "error", "message": str(e), "trace": traceback.format_exc()}), 500 + + +@app.route('/api/fusion/validate', methods=['GET', 'POST']) +def api_fusion_validate(): + """ + Pre-sync validation for Fusion 360 tool library JSON. + + GET — validates live files from the ADC network share + POST — validates uploaded JSON file(s) without touching the share + + Query params (GET only): + use_api=1 Enable the live Plex supplier lookup for + VENDOR_NOT_IN_PLEX checks. Default off. + file= Validate a single library by stem. Default: all files. + + POST shape is the same multipart upload as /api/fusion/tools — + each uploaded .json file becomes its own ValidationResult. + + Always runs in VERBOSE mode (human is reading the response). + Returns {status, library_count, results: [ValidationResult.to_dict(), ...]}. + """ + try: + use_api = request.args.get('use_api', '').strip().lower() in ( + "1", "true", "yes", "on", + ) + + results: list[dict] = [] + cross_library: dict[str, str] = {} + + # Multi-library runs need the cross-library dedupe dict to flow + # between calls so CROSS_LIBRARY_DUPLICATE can fire on the second + # and later libraries. Build it as we go. + def _update_cross(name: str, tools): + if not isinstance(tools, list): + return + from validate_library import _is_sync_candidate + for tool in tools: + if not isinstance(tool, dict) or not _is_sync_candidate(tool): + continue + pid = tool.get("product-id") + if isinstance(pid, str) and pid and pid not in cross_library: + cross_library[pid] = name + + if request.method == 'POST': + for _key, uploaded_file in request.files.items(): + if not uploaded_file.filename.endswith('.json'): + continue + try: + raw = json.loads(uploaded_file.read().decode('utf-8')) + except Exception as e: + results.append({ + "library_name": uploaded_file.filename, + "passed": False, + "tool_count": 0, + "sync_candidate_count": 0, + "issues": [{ + "severity": "FAIL", + "rule": "STRUCT_ROOT_KEY", + "tool_index": None, + "tool_description": None, + "field": None, + "value": None, + "message": f"Failed to parse uploaded JSON: {e}", + }], + "debug_trace": None, + }) + continue + + name = uploaded_file.filename.replace('.json', '') + tools = raw.get("data") if isinstance(raw, dict) else raw + result = validate_library( + tools=tools, + library_name=name, + mode=ValidationMode.VERBOSE, + use_api=use_api, + client=client if use_api else None, + cross_library_product_ids=dict(cross_library) if cross_library else None, + ) + results.append(result.to_dict()) + _update_cross(name, tools) + + else: + # GET — walk the ADC CAMTools directory + single_file = request.args.get('file') + if not CAM_TOOLS_DIR.exists(): + return jsonify({ + "status": "error", + "message": f"CAMTools directory not found: {CAM_TOOLS_DIR}", + }), 500 + + if single_file: + files = [CAM_TOOLS_DIR / single_file] + if not files[0].exists(): + return jsonify({ + "status": "error", + "message": f"File not found: {files[0]}", + }), 404 + else: + files = sorted(CAM_TOOLS_DIR.glob("*.json")) + + for path in files: + try: + with open(path, "r", encoding="utf-8") as f: + raw = json.load(f) + except Exception as e: + results.append({ + "library_name": path.stem, + "passed": False, + "tool_count": 0, + "sync_candidate_count": 0, + "issues": [{ + "severity": "FAIL", + "rule": "STRUCT_ROOT_KEY", + "tool_index": None, + "tool_description": None, + "field": None, + "value": None, + "message": f"Failed to load file: {e}", + }], + "debug_trace": None, + }) + continue + + tools = raw.get("data") if isinstance(raw, dict) else raw + result = validate_library( + tools=tools, + library_name=path.stem, + mode=ValidationMode.VERBOSE, + use_api=use_api, + client=client if use_api else None, + cross_library_product_ids=dict(cross_library) if cross_library else None, + ) + results.append(result.to_dict()) + _update_cross(path.stem, tools) + + all_passed = all(r["passed"] for r in results) if results else True + return jsonify({ + "status": "success", + "library_count": len(results), + "all_passed": all_passed, + "results": results, + }) + except Exception as e: + return jsonify({"status": "error", "message": str(e), "trace": traceback.format_exc()}), 500 + + +# ───────────────────────────────────────────── +# APS (Autodesk Platform Services) — cloud tool libraries +# ───────────────────────────────────────────── +# The APS client is initialized lazily (credentials are optional). +# OAuth flow: browser hits /api/aps/login → Autodesk consent → +# callback at /api/aps/callback → tokens stored in memory. +_aps_client: APSClient | None = None + + +def _get_aps_client() -> APSClient: + """Lazy-init the APS client. Raises APSConfigError if creds missing.""" + global _aps_client + if _aps_client is None: + _aps_client = APSClient() + _aps_client._require_config() + return _aps_client + + +@app.route('/api/aps/status') +def api_aps_status(): + """Check whether APS is configured and authenticated.""" + has_config = bool(APS_CLIENT_ID) + has_token = False + if has_config: + try: + c = _get_aps_client() + has_token = c.tokens.is_valid + except APSConfigError: + has_config = False + return jsonify({ + "status": "success", + "configured": has_config, + "authenticated": has_token, + }) + + +@app.route('/api/aps/login') +def api_aps_login(): + """Redirect the browser to Autodesk's OAuth consent page.""" + try: + c = _get_aps_client() + url = c.get_authorize_url() + return jsonify({"status": "success", "authorize_url": url}) + except APSConfigError as e: + return jsonify({"status": "error", "message": str(e)}), 500 + + +@app.route('/api/aps/callback') +def api_aps_callback(): + """ + OAuth callback — Autodesk redirects here with ?code=... + Exchanges the code for tokens and confirms success. + """ + code = request.args.get("code") + if not code: + return jsonify({ + "status": "error", + "message": "Missing 'code' parameter from Autodesk redirect.", + }), 400 + try: + c = _get_aps_client() + c.exchange_code(code) + return jsonify({ + "status": "success", + "message": "APS authentication successful. You can close this tab.", + "authenticated": True, + }) + except (APSConfigError, APSAuthError) as e: + return jsonify({"status": "error", "message": str(e)}), 500 + + +@app.route('/api/aps/hubs') +def api_aps_hubs(): + """List Fusion Team hubs accessible to the authenticated user.""" + try: + c = _get_aps_client() + hubs = c.get_hubs() + return jsonify({"status": "success", "count": len(hubs), "data": hubs}) + except (APSConfigError, APSAuthError) as e: + return jsonify({"status": "error", "message": str(e)}), 401 + except APSHTTPError as e: + return jsonify({"status": "error", "message": str(e)}), e.status + + +@app.route('/api/aps/hubs//projects') +def api_aps_projects(hub_id): + """List projects in a hub.""" + try: + c = _get_aps_client() + projects = c.get_projects(hub_id) + return jsonify({"status": "success", "count": len(projects), "data": projects}) + except (APSConfigError, APSAuthError) as e: + return jsonify({"status": "error", "message": str(e)}), 401 + except APSHTTPError as e: + return jsonify({"status": "error", "message": str(e)}), e.status + + +@app.route('/api/aps/hubs//projects//folders') +def api_aps_top_folders(hub_id, project_id): + """List top-level folders in a project.""" + try: + c = _get_aps_client() + folders = c.get_top_folders(hub_id, project_id) + return jsonify({"status": "success", "count": len(folders), "data": folders}) + except (APSConfigError, APSAuthError) as e: + return jsonify({"status": "error", "message": str(e)}), 401 + except APSHTTPError as e: + return jsonify({"status": "error", "message": str(e)}), e.status + + +@app.route('/api/aps/projects//folders//contents') +def api_aps_folder_contents(project_id, folder_id): + """List items in a folder.""" + try: + c = _get_aps_client() + contents = c.get_folder_contents(project_id, folder_id) + return jsonify({"status": "success", "count": len(contents), "data": contents}) + except (APSConfigError, APSAuthError) as e: + return jsonify({"status": "error", "message": str(e)}), 401 + except APSHTTPError as e: + return jsonify({"status": "error", "message": str(e)}), e.status + + +@app.route('/api/aps/libraries') +def api_aps_libraries(): + """ + Find all .tools files across all hubs (or a specific hub). + Query param: hub_id (optional) — restrict search to one hub. + """ + hub_id = request.args.get("hub_id") + try: + c = _get_aps_client() + libs = c.find_tool_libraries(hub_id=hub_id) + return jsonify({"status": "success", "count": len(libs), "data": libs}) + except (APSConfigError, APSAuthError) as e: + return jsonify({"status": "error", "message": str(e)}), 401 + except APSHTTPError as e: + return jsonify({"status": "error", "message": str(e)}), e.status + + +@app.route('/api/aps/items//tip') +def api_aps_item_tip(item_id): + """Get the latest version (tip) of an item, including its storage URL.""" + project_id = request.args.get("project_id") + if not project_id: + return jsonify({"status": "error", "message": "Missing 'project_id' query param."}), 400 + try: + c = _get_aps_client() + tip = c.get_item_tip(project_id, item_id) + # Extract storage URL from the tip + storage_url = ( + tip.get("relationships", {}) + .get("storage", {}) + .get("meta", {}) + .get("link", {}) + .get("href", "") + ) + return jsonify({ + "status": "success", + "data": tip, + "storage_url": storage_url, + }) + except (APSConfigError, APSAuthError) as e: + return jsonify({"status": "error", "message": str(e)}), 401 + except APSHTTPError as e: + return jsonify({"status": "error", "message": str(e)}), e.status + + +@app.route('/api/aps/cam-tools') +def api_aps_cam_tools(): + """ + List tool libraries in the known XWERKS > Assets > CAMTools folder. + Resolves storage URLs for each file so they're ready to download. + Much faster than the full hub scan. + """ + # Known IDs from the XWERKS hub discovery + project_id = "a.YnVzaW5lc3M6Z3JhY2Vlbmc0I0QyMDI0MTIyMDg0OTIxNzc3Ng" + cam_tools_folder = "urn:adsk.wipprod:fs.folder:co.C0zYkNP4TOexre_-hWRhRA" + + try: + c = _get_aps_client() + contents = c.get_folder_contents(project_id, cam_tools_folder) + + libraries = [] + for item in contents: + if item.get("type") != "items": + continue + name = item.get("attributes", {}).get("displayName", "") + item_id = item["id"] + + # Get the tip version to find the storage URN (for signed download) + try: + tip = c.get_item_tip(project_id, item_id) + storage_url = ( + tip.get("relationships", {}) + .get("storage", {}) + .get("data", {}) + .get("id", "") + ) + last_modified = tip.get("attributes", {}).get("lastModifiedTime", "") + except APSHTTPError: + storage_url = "" + last_modified = "" + + libraries.append({ + "name": name, + "item_id": item_id, + "storage_url": storage_url, + "last_modified": last_modified, + }) + + return jsonify({ + "status": "success", + "count": len(libraries), + "data": libraries, + }) + except (APSConfigError, APSAuthError) as e: + return jsonify({"status": "error", "message": str(e)}), 401 + except APSHTTPError as e: + return jsonify({"status": "error", "message": str(e)}), e.status + except Exception as e: + return jsonify({"status": "error", "message": str(e), "trace": traceback.format_exc()}), 500 + + +@app.route('/api/aps/libraries/download') +def api_aps_library_download(): + """ + Download and parse a single tool library from APS. + Query param: storage_url (required) — the OSS storage URL from find_tool_libraries. + Returns the same shape as /api/fusion/tools (library_name, tool_count, tools_sample). + """ + storage_url = request.args.get("storage_url") + if not storage_url: + return jsonify({ + "status": "error", + "message": "Missing required 'storage_url' query param.", + }), 400 + name = request.args.get("name", "cloud-library") + try: + c = _get_aps_client() + tools = c.download_tool_library(storage_url) + return jsonify({ + "status": "success", + "library_name": name, + "tool_count": len(tools), + "tools_sample": tools[:5], + "data": tools, + }) + except (APSConfigError, APSAuthError) as e: + return jsonify({"status": "error", "message": str(e)}), 401 + except APSHTTPError as e: + return jsonify({"status": "error", "message": str(e)}), e.status + except Exception as e: + return jsonify({"status": "error", "message": str(e), "trace": traceback.format_exc()}), 500 + + +@app.route('/api/aps/sync', methods=['POST']) +def api_aps_sync(): + """ + Download all cloud tool libraries from APS and sync them into Supabase. + + Uses the known XWERKS > Assets > CAMTools folder path. For each + .json file found, downloads via signed S3, then calls sync_library() + to upsert into the libraries/tools/cutting_presets tables. + + Returns per-library results and totals. + """ + project_id = "a.YnVzaW5lc3M6Z3JhY2Vlbmc0I0QyMDI0MTIyMDg0OTIxNzc3Ng" + cam_tools_folder = "urn:adsk.wipprod:fs.folder:co.C0zYkNP4TOexre_-hWRhRA" + + try: + aps = _get_aps_client() + sb = SupabaseClient() + + contents = aps.get_folder_contents(project_id, cam_tools_folder) + + results = [] + total_tools = 0 + total_presets = 0 + + for item in contents: + if item.get("type") != "items": + continue + name = item.get("attributes", {}).get("displayName", "") + if not name.endswith(".json"): + continue + + item_id = item["id"] + library_name = name.replace(".json", "") + + # Get storage URN from the tip + tip = aps.get_item_tip(project_id, item_id) + storage_urn = ( + tip.get("relationships", {}) + .get("storage", {}) + .get("data", {}) + .get("id", "") + ) + if not storage_urn: + results.append({ + "library": library_name, + "status": "error", + "message": "No storage URN in tip", + }) + continue + + # Download and parse + tools = aps.download_tool_library(storage_urn) + if not tools: + results.append({ + "library": library_name, + "status": "skipped", + "message": "Empty or unparseable", + "tools": 0, + "presets": 0, + }) + continue + + # Sync to Supabase + counts = sync_library( + library_name, + tools, + client=sb, + file_path=f"aps://{item_id}", + ) + total_tools += counts["tools"] + total_presets += counts["presets"] + results.append({ + "library": library_name, + "status": "success", + "tools": counts["tools"], + "presets": counts["presets"], + }) + + return jsonify({ + "status": "success", + "libraries_synced": len([r for r in results if r.get("status") == "success"]), + "total_tools": total_tools, + "total_presets": total_presets, + "results": results, + }) + except (APSConfigError, APSAuthError) as e: + return jsonify({"status": "error", "message": str(e)}), 401 + except APSHTTPError as e: + return jsonify({"status": "error", "message": str(e)}), e.status + except Exception as e: + return jsonify({"status": "error", "message": str(e), "trace": traceback.format_exc()}), 500 + + +@app.route('/api/config') +def api_config(): + """Expose non-secret client config to the UI (base URL, tenant, env).""" + return jsonify({ + "base_url": client.base, + "environment": "test" if USE_TEST else "production", + "is_production": IS_PRODUCTION, + "writes_allowed": WRITES_ALLOWED, + "tenant_id": TENANT_ID, + "has_key": bool(API_KEY), + "has_secret": bool(API_SECRET), + "aps_configured": bool(APS_CLIENT_ID), + }) + + +if __name__ == '__main__': + # Loud startup banner if we're connected to a production environment + if IS_PRODUCTION: + print() + print("=" * 70) + print(f" WARNING: Connected to PRODUCTION Plex environment") + print(f" {client.base}") + if WRITES_ALLOWED: + print(f" WRITES ARE ENABLED via PLEX_ALLOW_WRITES") + print(f" Every POST/PUT/PATCH/DELETE will hit real production data.") + else: + print(f" Writes are BLOCKED at the proxy. To enable, set") + print(f" PLEX_ALLOW_WRITES=1 in the environment and restart.") + print("=" * 70) + print() + + print("Starting UX Test Server...") + app.run(debug=True, host='0.0.0.0', port=5000) diff --git a/aps_client.py b/aps_client.py new file mode 100644 index 0000000..c5000b4 --- /dev/null +++ b/aps_client.py @@ -0,0 +1,590 @@ +""" +aps_client.py +Autodesk Platform Services (APS) OAuth + Data Management client +Grace Engineering — Datum project +============================================================= +Handles 3-legged OAuth 2.0 with Autodesk and provides methods to +traverse Fusion Team hubs to locate and download cloud tool library +files (.tools / .json). + +This eliminates the need for Fusion 360 or Autodesk Desktop Connector +to be installed locally. The pipeline is: + + APS Hub -> download .tools file -> unzip -> parse JSON + -> same schema as local CAMTools files -> Supabase ingest + +Credentials come from environment variables loaded via ``bootstrap.py``: + + APS_CLIENT_ID App client ID from aps.autodesk.com + APS_CLIENT_SECRET App client secret + APS_CALLBACK_URL OAuth redirect URI (default: http://localhost:5000/api/aps/callback) + +Token state is persisted to a local file (``.aps_tokens.json``, gitignored) +so tokens survive Flask debug reloads and process restarts. A production +deploy would use an encrypted store or database instead. +""" +from __future__ import annotations + +import io +import json +import logging +import os +import time +import zipfile +from pathlib import Path +from typing import Any +from urllib.parse import urlencode + +import requests + +import bootstrap # noqa: F401 — loads .env.local into os.environ on import + +log = logging.getLogger(__name__) + +# ───────────────────────────────────────────── +# Configuration +# ───────────────────────────────────────────── +APS_CLIENT_ID = os.environ.get("APS_CLIENT_ID", "") +APS_CLIENT_SECRET = os.environ.get("APS_CLIENT_SECRET", "") +APS_CALLBACK_URL = os.environ.get( + "APS_CALLBACK_URL", "http://localhost:5000/api/aps/callback" +) + +APS_AUTH_BASE = "https://developer.api.autodesk.com/authentication/v2" +APS_DM_BASE = "https://developer.api.autodesk.com" + +# Scopes needed for reading hub data (tool libraries live in the hub) +DEFAULT_SCOPES = "data:read" + +DEFAULT_TIMEOUT = 30 # seconds + + +# ───────────────────────────────────────────── +# Exceptions +# ───────────────────────────────────────────── +class APSConfigError(RuntimeError): + """Raised when APS_CLIENT_ID or APS_CLIENT_SECRET is missing.""" + + +class APSAuthError(RuntimeError): + """Raised when OAuth flow fails (bad code, expired token, etc.).""" + + +class APSHTTPError(RuntimeError): + """Raised when an APS API call returns a non-2xx response.""" + + def __init__(self, status: int, body: Any, url: str): + self.status = status + self.body = body + self.url = url + super().__init__(f"APS {status} on {url}: {body}") + + +# ───────────────────────────────────────────── +# Token store (file-backed, single-user) +# ───────────────────────────────────────────── +# Default token file lives next to .env.local — both are gitignored. +_DEFAULT_TOKEN_PATH = Path(__file__).resolve().parent / ".aps_tokens.json" + + +class TokenStore: + """ + Persists OAuth tokens to a local JSON file so they survive Flask + debug reloads and process restarts. + + Parameters + ---------- + path : Path | None + File to persist tokens to. ``None`` disables persistence + (pure in-memory, useful for tests). + """ + + def __init__(self, path: Path | None = _DEFAULT_TOKEN_PATH): + self._path = path + self.access_token: str | None = None + self.refresh_token: str | None = None + self.expires_at: float = 0.0 # epoch seconds + self._load() + + @property + def is_valid(self) -> bool: + return bool(self.access_token) and time.time() < self.expires_at + + def update(self, data: dict) -> None: + self.access_token = data["access_token"] + self.refresh_token = data.get("refresh_token") + self.expires_at = time.time() + data.get("expires_in", 3600) - 60 + self._save() + + def clear(self) -> None: + self.access_token = None + self.refresh_token = None + self.expires_at = 0.0 + if self._path and self._path.exists(): + self._path.unlink() + + def _save(self) -> None: + if not self._path: + return + try: + self._path.write_text(json.dumps({ + "access_token": self.access_token, + "refresh_token": self.refresh_token, + "expires_at": self.expires_at, + }), encoding="utf-8") + except OSError as e: + log.warning("Could not persist APS tokens: %s", e) + + def _load(self) -> None: + if not self._path or not self._path.exists(): + return + try: + data = json.loads(self._path.read_text(encoding="utf-8")) + self.access_token = data.get("access_token") + self.refresh_token = data.get("refresh_token") + self.expires_at = data.get("expires_at", 0.0) + except (OSError, json.JSONDecodeError, KeyError) as e: + log.warning("Could not load APS tokens from %s: %s", self._path, e) + + +# ───────────────────────────────────────────── +# Client +# ───────────────────────────────────────────── +class APSClient: + """ + Autodesk Platform Services client for OAuth and Data Management API. + + Parameters + ---------- + client_id : str | None + APS app client ID. Defaults to ``APS_CLIENT_ID`` env var. + client_secret : str | None + APS app client secret. Defaults to ``APS_CLIENT_SECRET`` env var. + callback_url : str | None + OAuth redirect URI. Defaults to ``APS_CALLBACK_URL`` env var. + timeout : int + Per-request timeout in seconds. + token_path : Path | None | str + File to persist tokens to. Pass ``None`` to disable persistence + (in-memory only, useful for tests). Defaults to ``.aps_tokens.json``. + """ + + def __init__( + self, + client_id: str | None = None, + client_secret: str | None = None, + callback_url: str | None = None, + timeout: int = DEFAULT_TIMEOUT, + token_path: Path | None | str = _DEFAULT_TOKEN_PATH, + ): + self.client_id = client_id or APS_CLIENT_ID + self.client_secret = client_secret or APS_CLIENT_SECRET + self.callback_url = callback_url or APS_CALLBACK_URL + self.timeout = timeout + self.tokens = TokenStore(path=Path(token_path) if token_path else None) + self._session = requests.Session() + + # ───────────────────────────────────────── + # Config validation + # ───────────────────────────────────────── + def _require_config(self) -> None: + if not self.client_id: + raise APSConfigError( + "APS_CLIENT_ID is not set. Register an app at " + "https://aps.autodesk.com and add the client ID to .env.local." + ) + if not self.client_secret: + raise APSConfigError( + "APS_CLIENT_SECRET is not set. Add it to .env.local." + ) + + def _require_token(self) -> None: + if not self.tokens.is_valid: + raise APSAuthError( + "No valid APS access token. Complete the OAuth flow first " + "by visiting /api/aps/login in your browser." + ) + + # ───────────────────────────────────────── + # OAuth 2.0 — 3-legged flow + # ───────────────────────────────────────── + def get_authorize_url(self, scopes: str = DEFAULT_SCOPES) -> str: + """ + Build the Autodesk authorization URL. Redirect the user's browser here. + After consent, Autodesk redirects back to ``callback_url`` with a code. + """ + self._require_config() + params = { + "response_type": "code", + "client_id": self.client_id, + "redirect_uri": self.callback_url, + "scope": scopes, + } + return f"{APS_AUTH_BASE}/authorize?{urlencode(params)}" + + def exchange_code(self, code: str) -> dict: + """ + Exchange an authorization code for access + refresh tokens. + Called from the OAuth callback handler. + """ + self._require_config() + resp = self._session.post( + f"{APS_AUTH_BASE}/token", + data={ + "grant_type": "authorization_code", + "code": code, + "client_id": self.client_id, + "client_secret": self.client_secret, + "redirect_uri": self.callback_url, + }, + timeout=self.timeout, + ) + if not resp.ok: + raise APSAuthError( + f"Token exchange failed: {resp.status_code} {resp.text}" + ) + data = resp.json() + self.tokens.update(data) + log.info("APS OAuth tokens acquired (expires in %ss)", data.get("expires_in")) + return data + + def refresh_access_token(self) -> dict: + """Use the refresh token to get a new access token.""" + self._require_config() + if not self.tokens.refresh_token: + raise APSAuthError( + "No refresh token available. Re-authenticate via /api/aps/login." + ) + resp = self._session.post( + f"{APS_AUTH_BASE}/token", + data={ + "grant_type": "refresh_token", + "refresh_token": self.tokens.refresh_token, + "client_id": self.client_id, + "client_secret": self.client_secret, + }, + timeout=self.timeout, + ) + if not resp.ok: + raise APSAuthError( + f"Token refresh failed: {resp.status_code} {resp.text}" + ) + data = resp.json() + self.tokens.update(data) + log.info("APS tokens refreshed") + return data + + def _ensure_token(self) -> None: + """Auto-refresh if the current token is expired but we have a refresh token.""" + if self.tokens.is_valid: + return + if self.tokens.refresh_token: + self.refresh_access_token() + return + raise APSAuthError( + "APS token expired and no refresh token available. " + "Re-authenticate via /api/aps/login." + ) + + # ───────────────────────────────────────── + # Authenticated API calls + # ───────────────────────────────────────── + def _authed_headers(self) -> dict[str, str]: + return { + "Authorization": f"Bearer {self.tokens.access_token}", + "Content-Type": "application/json", + } + + def _get(self, url: str, params: dict | None = None) -> Any: + """Authenticated GET. Returns parsed JSON.""" + self._ensure_token() + resp = self._session.get( + url, + headers=self._authed_headers(), + params=params, + timeout=self.timeout, + ) + if not resp.ok: + try: + body = resp.json() + except ValueError: + body = resp.text + raise APSHTTPError(resp.status_code, body, resp.url) + return resp.json() + + def _get_binary(self, url: str) -> bytes: + """Authenticated GET returning raw bytes (for file downloads).""" + self._ensure_token() + resp = self._session.get( + url, + headers={"Authorization": f"Bearer {self.tokens.access_token}"}, + timeout=self.timeout, + ) + if not resp.ok: + raise APSHTTPError(resp.status_code, resp.text, resp.url) + return resp.content + + # ───────────────────────────────────────── + # Data Management API — hub traversal + # ───────────────────────────────────────── + def get_hubs(self) -> list[dict]: + """List all hubs the authenticated user can access.""" + data = self._get(f"{APS_DM_BASE}/project/v1/hubs") + return data.get("data", []) + + def get_projects(self, hub_id: str) -> list[dict]: + """List projects within a hub.""" + data = self._get(f"{APS_DM_BASE}/project/v1/hubs/{hub_id}/projects") + return data.get("data", []) + + def get_top_folders(self, hub_id: str, project_id: str) -> list[dict]: + """List top-level folders in a project.""" + data = self._get( + f"{APS_DM_BASE}/project/v1/hubs/{hub_id}/projects/{project_id}/topFolders" + ) + return data.get("data", []) + + def get_folder_contents( + self, project_id: str, folder_id: str + ) -> list[dict]: + """List items in a folder.""" + data = self._get( + f"{APS_DM_BASE}/data/v1/projects/{project_id}/folders/{folder_id}/contents" + ) + return data.get("data", []) + + def search_folder( + self, project_id: str, folder_id: str, filter_name: str = "" + ) -> list[dict]: + """ + Search within a folder. Useful for finding .tools files by name. + """ + params = {} + if filter_name: + params["filter[displayName]"] = filter_name + data = self._get( + f"{APS_DM_BASE}/data/v1/projects/{project_id}/folders/{folder_id}/search", + params=params, + ) + return data.get("data", []) + + def get_item_versions(self, project_id: str, item_id: str) -> list[dict]: + """List versions of an item (to get download links).""" + data = self._get( + f"{APS_DM_BASE}/data/v1/projects/{project_id}/items/{item_id}/versions" + ) + return data.get("data", []) + + def get_item_tip(self, project_id: str, item_id: str) -> dict: + """Get the latest version (tip) of an item.""" + data = self._get( + f"{APS_DM_BASE}/data/v1/projects/{project_id}/items/{item_id}/tip" + ) + return data.get("data", {}) + + # ───────────────────────────────────────── + # File download + parsing + # ───────────────────────────────────────── + def download_version(self, storage_url: str) -> bytes: + """ + Download a file by its storage info. + + Accepts either: + - An ``urn:adsk.objects:os.object:BUCKET/OBJECT`` URN + (from ``relationships.storage.data.id``) + - A legacy ``/oss/v2/buckets/...`` URL + (from ``relationships.storage.meta.link.href``) + + Uses the signed S3 download endpoint (the old direct OSS v2 + GET is deprecated and returns 403). + """ + # Parse bucket and object key from URN or URL + bucket, obj_key = self._parse_storage_id(storage_url) + if not bucket or not obj_key: + raise APSHTTPError( + 400, + f"Cannot parse storage reference: {storage_url}", + storage_url, + ) + + # Get a signed S3 download URL + sign_resp = self._get( + f"{APS_DM_BASE}/oss/v2/buckets/{bucket}/objects/{obj_key}/signeds3download" + ) + signed_url = sign_resp.get("url") + if not signed_url: + raise APSHTTPError( + 500, + f"No signed URL returned for {bucket}/{obj_key}", + storage_url, + ) + + # Download from S3 (no auth header needed — the URL is pre-signed) + resp = self._session.get(signed_url, timeout=self.timeout) + if not resp.ok: + raise APSHTTPError(resp.status_code, resp.text, signed_url) + return resp.content + + @staticmethod + def _parse_storage_id(ref: str) -> tuple[str, str]: + """ + Extract (bucket, object_key) from a storage URN or URL. + + URN format: ``urn:adsk.objects:os.object:wip.dm.prod/abc-123.json`` + URL format: ``.../oss/v2/buckets/wip.dm.prod/objects/abc-123.json?...`` + """ + # URN form + if ref.startswith("urn:adsk.objects:os.object:"): + path = ref.split("urn:adsk.objects:os.object:")[-1] + parts = path.split("/", 1) + if len(parts) == 2: + return parts[0], parts[1] + + # URL form + if "/oss/v2/buckets/" in ref: + # .../oss/v2/buckets/{bucket}/objects/{object}?... + segment = ref.split("/oss/v2/buckets/")[-1] + segment = segment.split("?")[0] # strip query params + parts = segment.split("/objects/", 1) + if len(parts) == 2: + return parts[0], parts[1] + + return "", "" + + def download_tool_library(self, storage_url: str) -> list[dict]: + """ + Download a .tools file and extract the JSON tool data. + + .tools files are ZIP archives containing a single JSON file + with the same schema as local Fusion tool library exports: + ``{"data": [], ...}`` + + Returns the list of tool dicts (the "data" array), matching + the return type of ``tool_library_loader.load_library()``. + """ + raw_bytes = self.download_version(storage_url) + + # Try ZIP first (.tools files are typically zipped) + try: + with zipfile.ZipFile(io.BytesIO(raw_bytes)) as zf: + names = zf.namelist() + # Find the JSON file inside + json_name = next( + (n for n in names if n.endswith(".json")), names[0] + ) + with zf.open(json_name) as jf: + parsed = json.load(jf) + except zipfile.BadZipFile: + # Not a ZIP — might be raw JSON (some exports) + parsed = json.loads(raw_bytes) + + # Extract the "data" array + if isinstance(parsed, dict) and "data" in parsed: + tools = parsed["data"] + elif isinstance(parsed, list): + tools = parsed + else: + log.warning( + "Unexpected tool library structure from %s — no 'data' key", + storage_url, + ) + return [] + + if not isinstance(tools, list): + return [] + + log.info( + "Downloaded tool library from APS: %d entries", len(tools) + ) + return tools + + # ───────────────────────────────────────── + # High-level: find tool libraries in hub + # ───────────────────────────────────────── + def find_tool_libraries( + self, hub_id: str | None = None + ) -> list[dict]: + """ + Walk the hub to find .tools files. Returns a list of dicts: + + [{"name": "MyLibrary.tools", + "item_id": "urn:...", + "project_id": "...", + "hub_id": "...", + "storage_url": "https://..."}, ...] + + If ``hub_id`` is None, searches all accessible hubs. + """ + results = [] + hubs = [{"id": hub_id}] if hub_id else self.get_hubs() + + for hub in hubs: + hid = hub["id"] + projects = self.get_projects(hid) + + for project in projects: + pid = project["id"] + try: + top_folders = self.get_top_folders(hid, pid) + except APSHTTPError: + log.debug("Skipping project %s — can't list folders", pid) + continue + + for folder in top_folders: + fid = folder["id"] + self._scan_folder_for_tools( + hid, pid, fid, results, depth=0 + ) + + return results + + def _scan_folder_for_tools( + self, + hub_id: str, + project_id: str, + folder_id: str, + results: list[dict], + depth: int = 0, + max_depth: int = 5, + ) -> None: + """Recursively scan folders for .tools files.""" + if depth > max_depth: + return + + try: + contents = self.get_folder_contents(project_id, folder_id) + except APSHTTPError: + return + + for item in contents: + item_type = item.get("type", "") + name = item.get("attributes", {}).get("displayName", "") + + if item_type == "folders": + # Recurse into subfolders + self._scan_folder_for_tools( + hub_id, project_id, item["id"], results, depth + 1 + ) + + elif item_type == "items" and ( + name.endswith(".tools") or name.endswith(".json") + ): + # Found a tool library file — get its download URL + try: + tip = self.get_item_tip(project_id, item["id"]) + storage = ( + tip.get("relationships", {}) + .get("storage", {}) + .get("meta", {}) + .get("link", {}) + .get("href", "") + ) + results.append({ + "name": name, + "item_id": item["id"], + "project_id": project_id, + "hub_id": hub_id, + "storage_url": storage, + }) + except APSHTTPError as e: + log.warning("Could not get tip for %s: %s", name, e) diff --git a/bootstrap.py b/bootstrap.py new file mode 100644 index 0000000..de8f32d --- /dev/null +++ b/bootstrap.py @@ -0,0 +1,103 @@ +""" +bootstrap.py +.env.local loader +================== +Optional dotenv-style loader for credentials and other environment +configuration. Imported at the very top of plex_api.py so that +PLEX_API_KEY / PLEX_API_SECRET can come from a gitignored .env.local +file in the project root, instead of requiring the user to set them +in every shell. + +Behavior +-------- +- If .env.local exists in the project root, parse KEY=VALUE pairs + and inject them into os.environ via setdefault — meaning any + variable already set in the real environment WINS, never overridden. +- Lines starting with # are comments. Blank lines are ignored. +- Surrounding single or double quotes on values are stripped. +- Missing file is a no-op (no error). + +Why setdefault, not direct assignment +------------------------------------- +A real shell environment variable should always override .env.local — +that lets CI, production deployments, and ad-hoc shell exports take +precedence over local dev defaults without anyone having to remember +to delete the file. +""" +import os +from pathlib import Path + +# Project root = directory containing this file (bootstrap.py lives at the root) +_PROJECT_ROOT = Path(__file__).resolve().parent + + +def _find_env_local() -> Path | None: + """ + Walk up from this file's directory until we find a ``.env.local``, + or give up at the filesystem root. + + This lets worktrees (e.g. ``.claude/worktrees/foo``) inherit the + ``.env.local`` from the main repo root without needing their own copy. + """ + current = _PROJECT_ROOT + while True: + candidate = current / ".env.local" + if candidate.exists(): + return candidate + parent = current.parent + if parent == current: # reached filesystem root + return None + current = parent + + +def load_env_local(path: Path | str | None = None) -> int: + """ + Load KEY=VALUE pairs from a .env.local file into os.environ via setdefault. + + Parameters + ---------- + path : Path | str | None + Override the file path. When ``None``, walks up the directory tree + from the project root to find the nearest ``.env.local``. An + explicit path always wins over the walk-up search. + + Returns + ------- + int + Number of variables actually injected into os.environ + (i.e. that were not already present). + """ + if path is None: + found = _find_env_local() + if found is None: + return 0 + path = found + else: + path = Path(path) + + if not path.exists(): + return 0 + + injected = 0 + for line in path.read_text(encoding="utf-8").splitlines(): + line = line.strip() + if not line or line.startswith("#") or "=" not in line: + continue + + key, _, value = line.partition("=") + key = key.strip() + value = value.strip() + + # Strip matched surrounding quotes (' or ") + if len(value) >= 2 and value[0] == value[-1] and value[0] in ("'", '"'): + value = value[1:-1] + + if key and key not in os.environ: + os.environ[key] = value + injected += 1 + + return injected + + +# Auto-load on import — no-op if .env.local does not exist. +load_env_local() diff --git a/db/migrations/0001_initial_schema.sql b/db/migrations/0001_initial_schema.sql new file mode 100644 index 0000000..d724f79 --- /dev/null +++ b/db/migrations/0001_initial_schema.sql @@ -0,0 +1,190 @@ +-- ========================================================================= +-- Datum — initial schema (libraries / tools / cutting_presets) +-- ========================================================================= +-- Replays the bulletforge fusion2plex_* design from 2026-04-08 against the +-- dedicated `datum` Supabase project, with the prefix dropped (no collision +-- risk in a dedicated project). +-- +-- Source: bulletforge migrations 20260408171007 + 20260408171051, +-- bundled into one apply for the cutover on 2026-04-09. +-- +-- Apply via Supabase SQL Editor against the `datum` project. Idempotent on a +-- fresh project; safe to replay only if the prior tables/triggers/policies +-- are dropped first. +-- ========================================================================= + +-- Generic updated_at trigger function. search_path pinned per Supabase +-- linter rule 0011. +CREATE OR REPLACE FUNCTION public.set_updated_at() +RETURNS TRIGGER +LANGUAGE plpgsql +SET search_path = public, pg_temp +AS $fn$ +BEGIN + NEW.updated_at = NOW(); + RETURN NEW; +END; +$fn$; + +-- ========================================================================= +-- libraries — one row per ingested .json file +-- ========================================================================= +CREATE TABLE public.libraries ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + library_name TEXT NOT NULL, + vendor TEXT, + file_path TEXT, + file_hash TEXT, + tool_count INTEGER NOT NULL DEFAULT 0, + unit_original TEXT, + ingested_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE UNIQUE INDEX libraries_library_name_key + ON public.libraries (library_name); + +ALTER TABLE public.libraries ENABLE ROW LEVEL SECURITY; + +-- Explicit deny-all anon policy on libraries. +-- Service role bypasses RLS implicitly, so ingest still writes. Anon + +-- authenticated cannot read libraries — only tools + cutting_presets are +-- exposed to the future React UI per spec. +CREATE POLICY libraries_deny_anon + ON public.libraries + FOR SELECT + TO anon + USING (false); + +-- ========================================================================= +-- tools — one row per cutting tool, geometry normalized to mm +-- ========================================================================= +CREATE TABLE public.tools ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + fusion_guid TEXT NOT NULL UNIQUE, + library_id UUID REFERENCES public.libraries(id) ON DELETE SET NULL, + + -- Identity + vendor TEXT NOT NULL, + product_id TEXT NOT NULL, + description TEXT NOT NULL, + type TEXT NOT NULL, + bmc TEXT, + grade TEXT, + reference_guid TEXT, + unit_original TEXT, + product_link TEXT, + tapered_type TEXT, + + -- Geometry (all normalized to mm; nullable because vendor-specific) + geo_dc FLOAT8, + geo_nof FLOAT8, + geo_oal FLOAT8, + geo_lcf FLOAT8, + geo_lb FLOAT8, + geo_sfdm FLOAT8, + geo_sig FLOAT8, + geo_re FLOAT8, + geo_nt FLOAT8, + geo_ta FLOAT8, + geo_ta2 FLOAT8, + geo_tp FLOAT8, + geo_thread_profile_angle FLOAT8, + geo_tip_diameter FLOAT8, + geo_tip_length FLOAT8, + geo_tip_offset FLOAT8, + geo_assembly_gauge_length FLOAT8, + geo_shoulder_diameter FLOAT8, + geo_shoulder_length FLOAT8, + geo_hand BOOLEAN, + geo_csp BOOLEAN, + + -- Post-process (populated by CAM programmer, often zero in catalog libs) + pp_number INTEGER, + pp_turret INTEGER, + pp_diameter_offset INTEGER, + pp_length_offset INTEGER, + pp_live BOOLEAN, + pp_break_control BOOLEAN, + pp_manual_tool_change BOOLEAN, + pp_comment TEXT, + + -- Passthrough + shaft_segments JSONB, + + -- Plex sync + plex_supply_item_id UUID, + plex_synced_at TIMESTAMPTZ, + + -- Timestamps + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX tools_product_vendor_idx + ON public.tools (product_id, vendor); +CREATE INDEX tools_type_idx + ON public.tools (type); +CREATE INDEX tools_library_id_idx + ON public.tools (library_id); +CREATE INDEX tools_geo_dc_idx + ON public.tools (geo_dc); + +CREATE TRIGGER tools_updated_at + BEFORE UPDATE ON public.tools + FOR EACH ROW EXECUTE FUNCTION public.set_updated_at(); + +ALTER TABLE public.tools ENABLE ROW LEVEL SECURITY; + +-- Anon role: read-only for future React UI +CREATE POLICY tools_anon_select + ON public.tools + FOR SELECT + TO anon + USING (true); + +-- ========================================================================= +-- cutting_presets — feeds/speeds per material per tool +-- ========================================================================= +CREATE TABLE public.cutting_presets ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + tool_id UUID NOT NULL REFERENCES public.tools(id) ON DELETE CASCADE, + preset_guid TEXT, + name TEXT, + description TEXT, + material_category TEXT, + material_query TEXT, + material_use_hardness BOOLEAN, + v_c FLOAT8, + v_f FLOAT8, + f_z FLOAT8, + f_n FLOAT8, + n FLOAT8, + n_ramp FLOAT8, + ramp_angle FLOAT8, + tool_coolant TEXT, + v_f_plunge FLOAT8, + v_f_ramp FLOAT8, + v_f_lead_in FLOAT8, + v_f_lead_out FLOAT8, + v_f_retract FLOAT8, + v_f_transition FLOAT8, + use_feed_per_revolution BOOLEAN, + use_stepdown BOOLEAN, + use_stepover BOOLEAN, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX cutting_presets_tool_id_idx + ON public.cutting_presets (tool_id); +CREATE INDEX cutting_presets_name_idx + ON public.cutting_presets (name); + +ALTER TABLE public.cutting_presets ENABLE ROW LEVEL SECURITY; + +-- Anon role: read-only for future React UI +CREATE POLICY cutting_presets_anon_select + ON public.cutting_presets + FOR SELECT + TO anon + USING (true); diff --git a/db/migrations/0002_reference_catalog.sql b/db/migrations/0002_reference_catalog.sql new file mode 100644 index 0000000..72aadcf --- /dev/null +++ b/db/migrations/0002_reference_catalog.sql @@ -0,0 +1,50 @@ +-- ========================================================================= +-- Datum — reference_catalog table for vendor catalog cross-referencing +-- ========================================================================= +-- Large vendor catalogs (Harvey, Helical, Garr, Guhring, Sandvik, etc.) +-- ingested from hsmtools downloads. Used to enrich shop tools that are +-- missing product_id by matching on (type, geometry). +-- +-- Separate from the `tools` table so vendor catalog data doesn't mix +-- with Grace's actual shop tool inventory. +-- ========================================================================= + +CREATE TABLE public.reference_catalog ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + catalog_name TEXT NOT NULL, -- e.g. "Harvey Tool-End Mills" + vendor TEXT NOT NULL, + product_id TEXT NOT NULL, + description TEXT NOT NULL DEFAULT '', + type TEXT NOT NULL, -- "flat end mill", "drill", etc. + + -- Geometry fingerprint (normalized to mm for consistent matching) + geo_dc FLOAT8, -- cutting diameter + geo_nof FLOAT8, -- number of flutes + geo_oal FLOAT8, -- overall length + geo_lcf FLOAT8, -- length of cut / flute length + geo_sig FLOAT8, -- point angle (drills) + + unit_original TEXT, -- original unit before normalization + + ingested_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Primary lookup index: match shop tools by type + diameter + flute count +CREATE INDEX ref_catalog_match_idx + ON public.reference_catalog (type, geo_dc, geo_nof); + +-- Prevent duplicate entries from the same catalog +CREATE UNIQUE INDEX ref_catalog_dedup_idx + ON public.reference_catalog (catalog_name, product_id); + +CREATE INDEX ref_catalog_vendor_idx + ON public.reference_catalog (vendor); + +ALTER TABLE public.reference_catalog ENABLE ROW LEVEL SECURITY; + +-- Service role only — no browser access to reference data +CREATE POLICY ref_catalog_deny_anon + ON public.reference_catalog + FOR SELECT + TO anon + USING (false); diff --git a/db/migrations/0003_source_modified_at.sql b/db/migrations/0003_source_modified_at.sql new file mode 100644 index 0000000..2204e36 --- /dev/null +++ b/db/migrations/0003_source_modified_at.sql @@ -0,0 +1,7 @@ +-- Track when the library file was last modified in Fusion Hub (APS). +-- Distinct from ingested_at (when we last synced) and updated_at (Supabase trigger). +ALTER TABLE public.libraries + ADD COLUMN IF NOT EXISTS source_modified_at TIMESTAMPTZ; + +COMMENT ON COLUMN public.libraries.source_modified_at + IS 'lastModifiedTime from APS Data Management API — when the .tools file was last saved in Fusion Hub'; diff --git a/db/migrations/0004_anon_read_reference_catalog.sql b/db/migrations/0004_anon_read_reference_catalog.sql new file mode 100644 index 0000000..ab86bfe --- /dev/null +++ b/db/migrations/0004_anon_read_reference_catalog.sql @@ -0,0 +1,8 @@ +-- Allow anon read on reference_catalog (public vendor data). +DROP POLICY IF EXISTS "ref_catalog_deny_anon" ON reference_catalog; + +CREATE POLICY "ref_catalog_anon_read" + ON reference_catalog + FOR SELECT + TO anon + USING (true); diff --git a/db/migrations/0005_tools_plex_linkage.sql b/db/migrations/0005_tools_plex_linkage.sql new file mode 100644 index 0000000..def0425 --- /dev/null +++ b/db/migrations/0005_tools_plex_linkage.sql @@ -0,0 +1,46 @@ +-- ========================================================================= +-- Datum — tools.plex_* linkage + qty cache columns +-- ========================================================================= +-- Adds Plex linkage provenance and on-hand-qty cache columns to the tools +-- table, supporting the inventory display work on datum.graceops.dev. +-- +-- Supersedes the separate tool_plex_links table originally planned in #74: +-- the tools table already carries plex_supply_item_id UUID (from +-- 0001_initial_schema.sql line 116), so linkage is a property of the tool +-- rather than a separate entity. Flattening avoids a join on every +-- ToolsPage load. +-- +-- Issue: #75 (parent #49) +-- ========================================================================= + +ALTER TABLE public.tools + ADD COLUMN plex_linked_by TEXT + CHECK (plex_linked_by IS NULL OR plex_linked_by IN ('manual', 'writeback', 'sync')), + ADD COLUMN plex_linked_at TIMESTAMPTZ, + ADD COLUMN qty_on_hand NUMERIC, + ADD COLUMN qty_tracked BOOLEAN, + ADD COLUMN qty_synced_at TIMESTAMPTZ; + +-- Reverse lookup: "which tool is linked to this Plex supply-item?" +-- Partial because most rows will have NULL plex_supply_item_id until +-- #3 writeback catches up. +CREATE INDEX tools_plex_supply_item_id_idx + ON public.tools (plex_supply_item_id) + WHERE plex_supply_item_id IS NOT NULL; + +-- Column semantics documented in the DB so they're visible in the +-- Supabase Table Editor + `psql \d+ tools`. +COMMENT ON COLUMN public.tools.plex_linked_by IS + 'How plex_supply_item_id was populated. ''manual'' = hand-curated, ''writeback'' = captured from #3 Fusion->Plex write sync response, ''sync'' = automated description-match pass. NULL = not linked.'; + +COMMENT ON COLUMN public.tools.plex_linked_at IS + 'When plex_supply_item_id was set. NULL = not linked.'; + +COMMENT ON COLUMN public.tools.qty_on_hand IS + 'Running balance derived from summing Plex inventory-history/item-adjustments for plex_supply_item_id. NULL = unknown (tool not linked, or not yet synced, or no adjustment history).'; + +COMMENT ON COLUMN public.tools.qty_tracked IS + 'TRUE if the linked Plex supply-item has one or more adjustment records. FALSE = linked but Plex has no inventory history for it (the 97% case per the 2026-04-15 probe). NULL = not yet checked.'; + +COMMENT ON COLUMN public.tools.qty_synced_at IS + 'When qty_on_hand / qty_tracked were last refreshed from Plex. Distinct from plex_synced_at (the Fusion->Plex write-sync timestamp, populated by #3).'; diff --git a/db/migrations/0006_plex_supply_items.sql b/db/migrations/0006_plex_supply_items.sql new file mode 100644 index 0000000..8663be6 --- /dev/null +++ b/db/migrations/0006_plex_supply_items.sql @@ -0,0 +1,72 @@ +-- ========================================================================= +-- Datum — plex_supply_items staging table +-- ========================================================================= +-- Mirrors the 6-field Plex supply-item POST payload shape (see docs/ +-- Plex_API_Reference.md §3.5). One row per tools.fusion_guid, containing +-- exactly what would be POSTed to inventory/v1/inventory-definitions/ +-- supply-items when #3 writeback runs. Plex-assigned UUID lands in +-- plex_id after a successful POST; until then NULL. +-- +-- Design: +-- - fusion_guid PRIMARY KEY (1:1 with tools) — not a surrogate id +-- - Plex field names with reserved words (group, type) are renamed to +-- item_group / item_type here; the payload builder in #3 does the +-- one-line camelCase + rename translation at serialization time +-- - Defaults cover universally-true values (category, inventory_unit, +-- item_type) so inserts need only 3 derived columns +-- - Two partial indexes: reverse-lookup by plex_id, and "unposted" +-- queue for the writeback worker +-- - supply_item_number is NOT UNIQUE locally — let Plex 409 on collision +-- +-- Issue: #3 (writeback), staging precursor requested 2026-04-15 +-- ========================================================================= + +CREATE TABLE public.plex_supply_items ( + fusion_guid TEXT PRIMARY KEY + REFERENCES public.tools(fusion_guid) ON DELETE CASCADE, + + -- Plex payload fields (snake_case locally; payload builder converts + -- to camelCase on the wire and renames item_group -> "group", + -- item_type -> "type"). + category TEXT NOT NULL DEFAULT 'Tools & Inserts', + description TEXT, + item_group TEXT, + inventory_unit TEXT NOT NULL DEFAULT 'Ea', + supply_item_number TEXT, + item_type TEXT NOT NULL DEFAULT 'SUPPLY', + + -- Plex-assigned UUID, NULL until #3 writeback POST succeeds. + plex_id UUID, + + -- Audit + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now(), + posted_to_plex_at TIMESTAMPTZ +); + +CREATE INDEX plex_supply_items_plex_id_idx + ON public.plex_supply_items(plex_id) WHERE plex_id IS NOT NULL; + +CREATE INDEX plex_supply_items_unposted_idx + ON public.plex_supply_items(fusion_guid) WHERE plex_id IS NULL; + +CREATE TRIGGER plex_supply_items_set_updated_at + BEFORE UPDATE ON public.plex_supply_items + FOR EACH ROW EXECUTE FUNCTION public.set_updated_at(); + +ALTER TABLE public.plex_supply_items ENABLE ROW LEVEL SECURITY; + +COMMENT ON TABLE public.plex_supply_items IS + 'Staging layer mirroring the 6 Plex supply-item payload fields (#3 writeback target). One row per tools.fusion_guid. plex_id is NULL until Plex assigns one on POST.'; + +COMMENT ON COLUMN public.plex_supply_items.plex_id IS + 'UUID assigned by Plex on POST to /inventory/v1/inventory-definitions/supply-items. Also mirrored into tools.plex_supply_item_id with plex_linked_by=''writeback''.'; + +COMMENT ON COLUMN public.plex_supply_items.item_group IS + 'Plex "group" field (reserved word locally). Mapped from tools.type via the spec in Notion Supabase Schema Design.'; + +COMMENT ON COLUMN public.plex_supply_items.item_type IS + 'Plex "type" field (reserved word locally). Default "SUPPLY"; may become "SUPPLY-FUSION" if Plex accepts a custom type (per 2026-04-08 decision).'; + +COMMENT ON COLUMN public.plex_supply_items.posted_to_plex_at IS + 'Timestamp of the most recent successful POST to Plex for this row. NULL means never posted; non-NULL with NULL plex_id would indicate a data-integrity bug.'; diff --git a/db/migrations/0007_anon_read_plex_supply_items.sql b/db/migrations/0007_anon_read_plex_supply_items.sql new file mode 100644 index 0000000..b89a8ae --- /dev/null +++ b/db/migrations/0007_anon_read_plex_supply_items.sql @@ -0,0 +1,15 @@ +-- Allow anon read on plex_supply_items so the browser can render the +-- "Plex Staging Payload" card on ToolDetailPage (#81). +-- +-- Migration 0006 enabled RLS on plex_supply_items but didn't add a +-- policy, so the anon client at web/src/pages/ToolDetailPage.tsx +-- `.from("plex_supply_items").select("*")` silently returns nothing. +-- Staging-payload data is already derivable from the anon-readable +-- tools rows — exposing it here just saves the browser from +-- recomputing the 6 fields. + +CREATE POLICY "plex_supply_items_anon_read" + ON public.plex_supply_items + FOR SELECT + TO anon + USING (true); diff --git a/docs/BRIEFING.md b/docs/BRIEFING.md new file mode 100644 index 0000000..7e473be --- /dev/null +++ b/docs/BRIEFING.md @@ -0,0 +1,775 @@ +# Grace Engineering — Datum: Claude Code Briefing + +This is the primary context document for AI-assisted development sessions. +Read this first, then read `plex_api.py`, `tool_library_loader.py`, and +`docs/validate_library_spec.md` (the pre-sync validation gate design, #25). + +> **File layout note.** As of 2026-04-08, all long-form docs live under +> `docs/`. This file is `docs/BRIEFING.md`, not `./BRIEFING.md`. Siblings: +> `docs/Plex_API_Reference.md`, `docs/Fusion360_Tool_Library_Reference.md`, +> `docs/validate_library_spec.md`, `docs/Postman_Collections.md`. +> `README.md`, `CLAUDE.md`, and `TODO.md` are still at the repo root. See +> PR #24 for the move. + +> **Read the "History of incorrect hypotheses" section at the bottom of this +> file before changing anything credential- or tenant-related.** It documents +> four wrong turns this project took that all came down to one root cause +> (see History §1). Do not repeat them. + +--- + +## What this project is + +Nightly automation that syncs Autodesk Fusion 360 tool library data into +Rockwell Automation Plex Smart Manufacturing (ERP). Fusion 360 JSON files +on a local network share are the absolute source of truth. The script reads +them and pushes tooling data to Plex via REST API every night at midnight. + +**Project name: Datum.** Named for the machining reference point — the fixed datum everything is measured from. Fusion 360 is the datum; Plex and Supabase stay in sync with it. + +--- + +## Repo: https://github.com/grace-shane/datum + +Forked from just-shane/plex-api. Grace Engineering's working copy. + +Renamed from `plex-api` → `datum` on 2026-04-09. + +--- + +## Notion pages + +Live project state and decision log live in Notion, outside the repo. +The repo has the "what" (code, specs, CI); Notion has the "where are we +right now" and the running conversation about trade-offs. + +| Page | URL | Purpose | +|---|---|---| +| Grace Engineering | https://www.notion.so/33c3160a3abf813f9db6c5f68bef8bf2 | Parent — all Grace work lives under this page | +| Datum | https://www.notion.so/Grace-Engineering-Fusion2Plex-33c3160a3abf81f1aac0e58101952be5 | **Read this at the start of every session.** Current State block = exactly where to pick up. | + +### Session protocol + +- **Start of session:** read the Datum Notion page. The Current + State block at the top tells you phase, next action, and test count + without having to diff the repo. +- **End of session:** update the Current State block (phase, next + action, test count) and append one line to the Decision Log describing + what changed and why. + +--- + +## Current situation (April 2026) + +- **App**: `Datum` in the Plex Developer Portal +- **Environment**: `https://connect.plex.com` — **PRODUCTION**, real Grace data +- **Tenant**: `58f781ba-1691-4f32-b1db-381cdb21300c` (`Grace`) — verified + empirically by `GET /mdm/v1/tenants` +- **Credentials**: Consumer Key + (optional) Secret in `.env.local`, + loaded by `bootstrap.py` at startup. Gitignored. +- **Key expires every 31 days** — see issue #12 for rotation cadence +- **Reads work** — `mdm/v1/tenants`, `mdm/v1/parts`, `mdm/v1/suppliers`, + `purchasing/v1/purchase-orders` all return 200 +- **Writes are blocked** at the proxy by default (PR #17 production guard). + To enable: set `PLEX_ALLOW_WRITES=1` in the environment and restart +- **There is NO test environment for this app.** The Datum Consumer + Key only authenticates against `connect.plex.com`, not `test.connect.plex.com`. + Every action you take is against real production data. + +--- + +## Auth — header model + +``` +X-Plex-Connect-Api-Key: # required — identifies the app +X-Plex-Connect-Tenant-Id: # required — selects the tenant +X-Plex-Connect-Api-Secret: # OPTIONAL — Plex authenticates on + # the key alone for this app +``` + +The Insomnia Generate Code output for a working request shows only the +key + tenant headers. The secret may be needed in some configurations +(future-proof, harmless to send), but is not currently required. + +Credentials are loaded from `.env.local` via `bootstrap.py`. +**Never hardcode credentials. Never commit credentials.** + +### Tenants + +| Name | Tenant ID | Status | +|-------------------|----------------------------------------|-------------------------------------| +| **Grace Eng.** | `58f781ba-1691-4f32-b1db-381cdb21300c` | **CURRENT** — verified live, prod | +| Grace (stale) | `a6af9c99-bce5-4938-a007-364dc5603d08` | Dead. Was in earlier docs. Wrong. | +| G5 | `b406c8c4-cef0-4d62-862c-1758b702cd02` | Another company. Old test app only. | + +Tenant IDs are not secrets — they are committed as defaults in +`plex_api.py` (`GRACE_TENANT_ID`) and `plex_diagnostics.py` +(`KNOWN_TENANTS`). + +--- + +## Architecture + +``` +Fusion 360 tool libraries + ├── APS cloud (primary, PR #43) ── aps_client.py ─┐ + └── ADC network share (fallback) ── tool_library_loader.py │ + ▼ + validate_library.py pre-sync validation gate (PR #28, spec: docs/validate_library_spec.md) + │ + ▼ + Supabase staging (PR #32, schema: libraries / tools / cutting_presets) + ├── enrich.py vendor catalog + geometry-based enrichment (PR #48, wired PR #54) + ├── React UI (PR #41) tool browser / library browser / scripts / qty indicators — deployed to Cloudflare Workers (PR #70) + ▼ + transform layer build_supply_item_payload (in progress, #3 — sprint PRs #82 / #84) + │ + ▼ + plex_api.py / PlexClient Plex REST API + ├── inventory/v1/inventory-definitions/supply-items cutting tools (category="Tools & Inserts") + ├── mdm/v1/suppliers resolve vendor UUIDs + └── production/v1/production-definitions/workcenters machine setup docs (per-id write — blocked on Classic API, #6) +``` + +The nightly sync runs via the `sync.py` CLI (PR #44), scheduled on an always-on host (PR #47). Going forward, the GCP migration ([#85](https://github.com/grace-shane/Datum/issues/85)) will drop the ADC fallback entirely, replace Supabase with Cloud SQL, and move scheduling to Cloud Scheduler — see `docs/GCP_MIGRATION.md`. + +### Industry hierarchy (Plex data model) + +1. Purchased consumables — cutting tools as bought parts (end mills, drills, etc.) +2. Tool assemblies — consumable + holder paired together +3. Routings / operations — assemblies mapped to machining ops +4. Jobs — ops executed on the shop floor +5. Manufactured parts — end product, with full tool traceability + +--- + +## Plex API access matrix — Datum on production + +Verified empirically against `connect.plex.com` with the Grace tenant. + +### URL pattern convention + +Plex uses two URL shapes for read endpoints: + +- **Master data, flat**: `/v1/` + → `mdm/v1/parts`, `mdm/v1/suppliers`, `mdm/v1/operations` +- **Definitions, nested**: `/v1/-definitions/` + → `production/v1/production-definitions/workcenters` + → `inventory/v1/inventory-definitions/supply-items` + +### Verified working endpoints + +Record counts are as of **2026-04-09** unless noted. For the full schema +of every resource + cross-reference discussion, see +[`docs/Plex_API_Reference.md`](./Plex_API_Reference.md) §3. + +| Path | Records | What it is | +|---|---:|---| +| `mdm/v1/tenants` | 1 | Grace tenant only. Auth canary. | +| `mdm/v1/parts` | 16,921 | +8 since 2026-04-07. **Finished products + raw materials only.** Tools are NOT here. | +| `mdm/v1/parts/{id}` | — | Per-id verified 2026-04-09. Same fields as list. | +| `mdm/v1/suppliers` | 1,575 | Supplier master. Has `parentSupplierId` self-FK. Mixed material + carrier types. | +| `mdm/v1/suppliers/{id}` | — | Per-id verified 2026-04-09. | +| `mdm/v1/customers` | 109 | 35-field schema. FKs to employees, contacts, suppliers. | +| `mdm/v1/customers/{id}` | — | Per-id verified 2026-04-09. | +| `mdm/v1/contacts` | 299 | | +| `mdm/v1/buildings` | 4 | Provides `buildingCode`/`buildingId` referenced by workcenters. | +| `mdm/v1/employees` | 641 | UUIDs here appear as `createdById`/`modifiedById` across every resource. | +| `mdm/v1/operations` | 122 | Process steps. Minimal schema (4 fields). No FK to tools/parts/routings — see Gotchas. | +| `mdm/v1/operations/{id}` | — | Per-id verified 2026-04-09. | +| `purchasing/v1/purchase-orders` | — | 44.2 MB unfiltered. `?updatedAfter` filter confirmed silent no-op 2026-04-09. | +| `production/v1/production-definitions/workcenters` | 143 | Includes 21 MILLs. **Codes 879/880 = Brother Speedio FTP IPs.** ⚠️ Primary key is `workcenterId`, not `id`. | +| `production/v1/production-definitions/workcenters/{id}` | — | Per-id verified 2026-04-09. | +| `inventory/v1/inventory-definitions/supply-items` | 2,516 | **WHERE TOOLS LIVE.** Filter `category="Tools & Inserts"` for the 1,109 cutting tools. **⚠️ No supplier FK, no cross-references of any kind — identity-only. See §3.5 of Plex_API_Reference.md.** | +| `inventory/v1/inventory-definitions/supply-items/{id}` | — | Per-id verified 2026-04-09. Same 7 fields, no hidden detail. | +| `inventory/v1/inventory-definitions/locations` | 1,270 | Inventory location master. Not referenced from supply-item. | +| `scheduling/v1/jobs` | 114,684 | **Deep-dived 2026-04-10.** 18 fields: `jobNumber, partId, partNumber, buildingId, quantity, dates, status`. **No tool/operation/workcenter FKs.** Does NOT unblock #5. | + +### Where tooling data actually lives + +Cutting tools and inserts are **`inventory/v1/inventory-definitions/supply-items`** +records with `category="Tools & Inserts"` and `group="Machining"`. There are +already 1,109 tools/inserts tracked in Plex Grace (verified 2026-04-09). +The schema is just 7 fields: + + - `category` (e.g. "Tools & Inserts") + - `description` (free-text, human-readable) + - `group` (e.g. "Machining", "Tool Room") + - `id` (UUID — Plex primary key) + - `inventoryUnit` (e.g. "Ea") + - `supplyItemNumber` (**legacy: free-text, not vendor part numbers**) + - `type` (e.g. "SUPPLY") + +**⚠️ CRITICAL: supply-items have NO cross-references to any other resource (verified 2026-04-09).** This is identity-only — Plex stores no link from a tool to: + +- its vendor (no `supplierId`) +- its physical location (no `locationId`) +- the part it helps produce (no `partId`) +- the machine it belongs to (no `workcenterId`) +- the operation it performs (no `operationId`) + +**Implication for the Datum architecture:** vendor/supplier data for tools MUST live in Supabase as the source of truth. The Fusion JSON carries `vendor` and `product-id` → those get written to the `tools` table in Supabase → and `build_supply_item_payload()` (issue #3) constructs the Plex POST body using only the 7 identity fields listed above. Plex never learns who the vendor is, because Plex doesn't model that relationship for tools. + +This kills the "use PO lines as a back-channel for the vendor link" hypothesis that was implicit earlier — `purchasing/v1/purchase-orders-lines` returned 404 on 2026-04-09. + +The Fusion sync will write to `inventory/v1/inventory-definitions/supply-items`, not to `mdm/v1/parts` (which is for finished products). + +**Sample existing `supplyItemNumber` values captured 2026-04-09** (confirming the legacy free-text nature of these records): + +- `"Insert HM90 AXCR 150508 IC28"` (description, not a part number) +- `"Screw Indexable Face Mill F75"` +- `"Tap #8-32 H3 Spiral Point"` + +Fusion writes will use clean vendor part numbers like `"990910"`, so expect ~100% INSERTs with zero collisions on first sync. + +### Workcenter ↔ machine mapping + +The 21 MILL workcenter records map directly to physical machines via +`workcenterCode` (which equals the machine number / DNC IP last octet): + +| Brother Speedio | FTP IP | Plex workcenterCode | Plex workcenterId | +|---|---|---|---| +| 879 | 192.168.25.79 | `879` | `0b6cf62b-2809-4d3d-ab24-369cd0171f62` | +| 880 | 192.168.25.80 | `880` | `8e262d5a-3ce8-4597-8726-d2b979b1b6b7` | + +Full mill list: 814, 825, 827, 830, 834-841, 845, 848, 851, 865, 873, +879, 880, DEFLECT. + +### How to read 401 vs 404 from Plex + +- **401 `REQUEST_NOT_AUTHENTICATED`** — bad credentials OR you're hitting + a recognized namespace your app isn't subscribed to. Same wire response. +- **404 `RESOURCE_NOT_FOUND`** — Plex's gateway has no route at that path. + Could mean unknown URL OR subscribed-but-no-resource. Same wire response. +- **The only way to tell apart cleanly** is to compare across many endpoints + with the same auth, AND ideally compare against a known-good client + (Insomnia → Generate Code) for ground truth. + +### Filter behavior — most query params are silently ignored + +Empirically verified: Plex's gateway accepts unknown query parameters +without complaint and just returns the unfiltered set. The only filter +we've seen actually work on `mdm/v1/parts` is `?status=Active` (reduces +19.6 MB → 7.8 MB). The `typeName`, `type`, `category`, `limit` parameters +all return the full unfiltered response. Always assume `limit` does +nothing and use real filters or accept the full DB pull. + +--- + +## Plex Classic Web Services (discovered 2026-04-10) + +The REST API at `connect.plex.com` exposes a curated subset of Plex +data. The older **Classic Web Services** at `plexonline.com` can access +virtually the entire Classic schema via Data Sources (parameterized +stored procedures). + +- **Endpoint:** `POST https://plexonline.com/Modules/Xmla/XmlDataSource.asmx` +- **WSDL:** append `?WSDL` to the endpoint URL +- **Auth:** Web Service User account (username/password + Company Code). + **NOT the Developer Portal Consumer Key** — completely separate credentials. +- **Format:** SOAP/XML (or JSON wrapper at `/api/datasources/{key}/execute`) +- **Licensing:** Included with base Plex subscription (no extra cost) +- **ID format:** Integer keys (not UUIDs — mapping needed if mixing with REST API) + +**What it can access that REST cannot:** + +| Capability | Classic Data Source | REST API status | +|---|---|---| +| Part Operations | `Part_Operation` tables | 4 fields, no FKs | +| Tool-to-operation assignments | Tool Assignment data sources | Does not exist | +| DCS / Attachments | `DCS_v2`, `Attachment_Group_Key` | 404 on all paths | +| Workcenter documents | Workcenter doc data sources | 11 identity fields only | +| Routing / op sequences | Routing data sources | 404 (`manufacturing/v1/routings`) | +| Supply item cross-refs | Full supply item schema | 7 identity fields only | + +**Status:** Access request pending. See +[`docs/Plex_Classic_API_Request.md`](./Plex_Classic_API_Request.md) +for the request document. + +--- + +## Fusion 360 JSON schema (key fields) + +Source file: BROTHER SPEEDIO ALUMINUM.json (28 entries, root "data" array) + +| Field | Maps to Plex | Notes | +|------------------------|-------------------------------------|------------------------------------| +| guid | External reference key | Use for dedup on re-sync | +| type | Item sub-category | Filter out "holder" and "probe" | +| description | Part description | | +| product-id | Part number | Vendor part number, key for PO link| +| vendor | Supplier (resolve to UUID first) | | +| post-process.number | Pocket / turret number | Critical for workcenter doc update | +| geometry.DC | Cutting diameter | | +| geometry.OAL | Overall length | | +| geometry.NOF | Number of flutes | | +| holder (object) | Assembly component / BOM link | | + +Tool type distribution in active library: +- flat end mill: 12 | holder: 6 | bull nose end mill: 4 | drill: 2 +- face mill: 1 | form mill: 1 | slot mill: 1 | probe: 1 + +Sync filter: include only `type != "holder" AND type != "probe"` + +--- + +## What's built + +### plex_api.py +- `PlexClient` base class with throttling (200 calls/min rate limit) +- Constructor takes `api_key`, `api_secret`, `tenant_id`, `use_test` +- All four config values read from environment variables via `bootstrap.py` + (`PLEX_API_KEY`, `PLEX_API_SECRET`, `PLEX_TENANT_ID`, `PLEX_USE_TEST`) +- `TENANT_ID` defaults to `GRACE_TENANT_ID` (production Grace) +- `USE_TEST` defaults to `False` (production is the only environment we have) +- `get()` returns parsed JSON or None (legacy) +- `get_envelope()` returns a structured envelope so callers can see HTTP errors +- Extraction helpers: `extract_purchase_orders`, `extract_parts`, `extract_workcenters` +- `discover_all()` endpoint probe utility + +### plex_diagnostics.py +- `list_tenants(client)` — GET /mdm/v1/tenants +- `get_tenant(client, id)` — GET /mdm/v1/tenants/{id} +- `tenant_whoami(client, configured_id)` — composite check that compares + visible tenants against `KNOWN_TENANTS` and returns a structured report + with `match` enum (`grace`, `g5`, `auth_failed`, `request_failed`, + `no_data`, `configured`, `other`). Run this first to verify tenant routing. + +### tool_library_loader.py +- `load_library(path)` — loads single .json, returns data array +- `load_all_libraries(directory)` — globs all .json files in CAMTools dir +- Stale file guard — aborts if files older than 25h (ADC sync stall detection) +- `PermissionError` and `JSONDecodeError` handling (ADC mid-sync file locks) +- `report_library_contents()` — diagnostic summary +- **Status:** local-ADC path only. `aps_client.py` is the primary source today; this loader is the fallback. Scheduled for removal under the GCP migration epic (#85). + +### aps_client.py (PR #43) +- OAuth client for Autodesk Platform Services (Fusion Hub online) +- Lists hub projects and downloads tool library JSON over HTTP +- Primary source for `sync.py`; removes the ADC install requirement on the runtime host + +### validate_library.py (PR #28, spec: `docs/validate_library_spec.md`) +- Pre-sync validation gate — FAIL aborts the sync; WARNs surface in verbose/debug +- Three entry points: CLI, programmatic (called from `tool_library_loader.load_library`), Flask `/api/fusion/validate` +- Library-level + per-tool rule tables, cached supplier lookup for vendor validation +- Source-agnostic engine — survives the APS migration with only a CLI default-path change + +### supabase_client.py + sync_supabase.py (PR #32) +- Dedicated Supabase project (`datum`, us-east-2): `libraries` / `tools` / `cutting_presets` +- `SUPABASE_URL` + `SUPABASE_SERVICE_ROLE_KEY` in `.env.local` — server-side only, never shipped to the browser +- Full tool record (geometry, holders, pockets) lives here; Plex gets the identity slice via `build_supply_item_payload` (#3) +- PR #34 dropped the `fusion2plex_` table prefix once DB isolation made it redundant +- Scheduled for replacement by Cloud SQL under the GCP migration epic (#85) + +### enrich.py (PR #48, wired in the sync pipeline via PR #54) +- Vendor reference catalog + geometry-based tool enrichment +- Runs before staging writes so enriched records land in Supabase directly + +### Plex staging pipeline (sprint PRs #82 / #84, issues #79 / #80 / #81) +- `plex_supply_items` staging table + payload computation +- Feeds the upsert path in #3 + +### sync.py + nightly deploy (PRs #44, #46, #47) +- `sync.py` CLI entrypoint, APS-first with local ADC fallback +- `--log-file` flag for persistent nightly logs (PR #46) +- Deployed to an always-on host; scheduled nightly at midnight (PR #47) + +### React UI (PR #41 + successive) +- Tool browser, library browser, Scripts page, last-sync indicator, qty columns +- Deployed to Cloudflare Workers Static Assets (PR #70) + +### bootstrap.py +- Loads `.env.local` (gitignored) into `os.environ` via `setdefault` + semantics — real shell env vars always win +- Imported at the very top of `plex_api.py` so credential reads happen + AFTER the file is loaded +- Tested in `tests/test_bootstrap.py` (16 tests) + +### app.py + templates/static +- Flask endpoint tester UI at http://localhost:5000 +- Left rail: Diagnostics (run first), Plex presets, Extractors, Fusion local +- Top: method selector + URL bar + query params + Send (Ctrl/Cmd+Enter) +- Tabbed response pane (Body / Headers / Raw), copy and clear, history +- Env-chip in header shows TEST (amber) or **PROD (red)**, plus + **READ ONLY** / **WRITES ON** sub-pill +- `/api/plex/raw` proxy lets the UI hit any Plex endpoint via PlexClient + without exposing credentials to the browser +- **Production write guard** in proxy refuses POST/PUT/PATCH/DELETE + against `connect.plex.com` unless `PLEX_ALLOW_WRITES=1` is set +- `/api/diagnostics/tenant` runs `tenant_whoami` +- `/api/config` exposes non-secret config including `is_production` and + `writes_allowed` + +### Tests +- `pytest` suite in `tests/`. CI on PRs to `master` via + `.github/workflows/test.yml`. Branch protection on master requires the + `pytest` check to pass before merge. Auto-merge enabled. +- Currently 262 tests, all green (as of 2026-04-17). + +--- + +## Immediate TODO (in priority order) + +All items below are mirrored as GitHub Issues — see +https://github.com/grace-shane/Datum/issues for live status. + +### Done (historical record — kept for context) + +- ~~PlexClient constructor, api_secret header~~ +- ~~Find the real Plex tooling endpoint~~ — `inventory/v1/inventory-definitions/supply-items` with `category="Tools & Inserts"`; 1,109 records +- ~~Read baseline tooling inventory from supply-items~~ — PR #21, issue #2 +- ~~`validate_library.py` pre-sync validation gate~~ — PR #28, issue #25 +- ~~Supabase staging layer (`libraries` / `tools` / `cutting_presets`)~~ — PR #32 + #34, issue #31 +- ~~APS cloud integration — no local Fusion install required~~ — PR #43 +- ~~Nightly sync CLI entrypoint + packaging~~ — PR #44, issue #9 +- ~~Deploy nightly sync to always-on host, scheduled at midnight~~ — PR #47, issues #10 / #11 +- ~~Plex API key rotation cadence established~~ — PR #33, issue #12 (next rotation 2026-05-08) +- ~~Vendor reference catalog + geometry-based enrichment~~ — PR #48 / #54 +- ~~React UI scaffold + Cloudflare Workers deploy~~ — PRs #41 / #68 / #70 +- ~~Plex `plex_supply_items` staging table + qty sync~~ — PRs #77 / #78 / #82 / #84 + +### Active / next + +1. `build_supply_item_payload(fusion_tool: dict) -> dict` — issue #3. + Reads from Supabase `tools`; maps to a supply-item POST body with + `category="Tools & Inserts"`, `group="Machining"`, + `supplyItemNumber=`. Staging pipeline (PRs #82 / #84) + has landed the prerequisites. +2. Match-and-upsert logic by `supplyItemNumber` — issue #3. + Decide POST (new) vs PUT (update existing) against the 1,109 current + supply-items. Writes require `PLEX_ALLOW_WRITES=1`. +3. Core sync logic — upsert with `supplyItemNumber` dedup — issue #7. + Dry-run by default. Calls validate_library gate before every run. +4. Error handling + logging on run failures — issue #8. `--log-file` + scaffold landed in PR #46; issue remains open for broader error paths. +5. GCP migration (umbrella [#85](https://github.com/grace-shane/Datum/issues/85)) + — see `docs/GCP_MIGRATION.md` for scope, affected code, and sequence. + +### Blocked on Plex Classic Web Services access + +6. Tool assemblies — issue #4. Classic `Part_Operation` Data Sources likely path. +7. Routing / operation linkage — issue #5. Classic Part Operations + tool assignments. +8. Workcenter doc push — issue #6. Classic DCS_v2. REST workcenter endpoint + is 11 identity fields, no document/attachment sub-resources. + +Access request tracked in `docs/Plex_Classic_API_Request.md`. + +### Architectural decisions — #4 and #5 (updated 2026-04-10) + +**REST API verdict: blocked.** The `scheduling/v1/jobs` deep-dive +(114,684 records, 18 fields) confirmed zero tool/operation/workcenter +FKs. All 7 document/attachment probe paths returned 404. The Connect +REST API does not expose tool-to-operation relationships, DCS +attachments, or workcenter documents. + +**Classic Web Services: viable path.** The older SOAP API at +`plexonline.com/Modules/Xmla/XmlDataSource.asmx` wraps Data Sources +(parameterized stored procs) that CAN access Part Operations, tool +assignments, DCS attachments, and routing data. Requires separate +credentials (Web Service User account + Company Code, NOT the Developer +Portal Consumer Key). Access request doc: +[`docs/Plex_Classic_API_Request.md`](./Plex_Classic_API_Request.md). + +- **#4 — Tool Assemblies**: Blocked on REST API. Classic Web Services + may expose assembly relationships via Part Operation Data Sources. + Pending Classic API access. +- **#5 — Routing/Operation linkage**: Blocked on REST API. Classic Web + Services can access Part Operations and tool assignments. Pending + Classic API access. +- **#6 — Workcenter doc push**: REST workcenter has 11 identity fields + only, no document/attachment sub-resources. Classic DCS_v2 is the + path. Pending Classic API access. + +--- + +## Gotchas — read before touching anything + +- **EVERY READ HITS PRODUCTION DATA.** There is no test environment for the + Datum app. Be conscious of rate limits (200/min) and response sizes + (`mdm/v1/parts` is 19.6 MB unfiltered). +- **Writes are blocked at the proxy by default** (PR #17). To enable: + `PLEX_ALLOW_WRITES=1` env var. Unset it as soon as you're done. +- **`mdm/v1/parts` and `purchasing/v1/purchase-orders` IGNORE the `limit` + query param** — empirically verified. `?limit=1` returns the entire + database (19.6 MB and 44 MB respectively). Always include a real filter + like `status=Active` and a date range. +- **`PLEX_API_KEY` / `PLEX_API_SECRET` come from `.env.local`** via + `bootstrap.py`. A real shell env var with the same name will OVERRIDE + `.env.local` via `setdefault` semantics — clear stale shell vars if you + have them. (See History §1 for the painful version of this lesson.) +- **The previously hardcoded API key (`k3SmLW3y…`) is dead.** It's in git + history but no longer authenticates anywhere. +- **Plex returns 401 `REQUEST_NOT_AUTHENTICATED` for both bad credentials + AND endpoints under unsubscribed API products.** The only way to tell + them apart is to compare across multiple endpoints AND against a + known-good client like Insomnia. See History §2. +- **`l` (lowercase L) and `I` (uppercase i) are visually identical in many + fonts.** When reading credentials from images, treat them as ambiguous. + Always paste credentials as text, never read them from a screenshot. + See History §1. +- **Visible categories in the dev portal ≠ URL prefixes.** "Common APIs, + Platform APIs, Standalone MES, IIoT" don't 1:1 map to `mdm/`, `purchasing/`, + `tooling/` etc. The mapping is opaque. +- supplierId in responses is a UUID, not a supplier code (MSC != "MSC001") +- URL-encode spaces in filter strings (`MRO SUPPLIES` -> `MRO%20SUPPLIES`) +- API key must be in header — URL parameter returns 401 +- PowerShell: use `Invoke-RestMethod`, not `curl` (alias doesn't pass headers) +- Fusion Tool objects from CAM API are copies, not references +- ADC stale file guard will abort sync if network share files are > 25h old +- `BROTHER SPEEDIO ALUMINUM.json` is committed to repo for reference only — + sync script must always read from network share, not this file + +--- + +## DNC / machine connections (for future NC program push work) + +| Machine | Protocol | Address | +|----------------------|----------------|-----------------------------| +| Brother Speedio 879 | FTP | 192.168.25.79 | +| Brother Speedio 880 | FTP | 192.168.25.80 | +| Citizen / Tsugami | RS-232 → TCP | Moxa NPort 5150/5250 | +| Haas VMCs | Ethernet | Sigma 5 native | + +--- + +## History of incorrect hypotheses + +This is a postmortem of four wrong turns this project took, written here +so the next agent (or future-me) doesn't repeat them. All four trace back +to one root cause: I misread an API key from a screenshot. + +### §1 — The I-vs-l misread (root cause of everything below) + +When the user shared a screenshot of the Fusion2Plex Consumer Key from the +Plex Developer Portal, I read the 9th character as `I` (uppercase i) when +it was actually `l` (lowercase L). In most fonts these are visually +indistinguishable. I wrote `AEiK3tYoIfA15wt3x3t0qmILFGAG2NkK` into +`.env.local` instead of the correct `AEiK3tYolfA15wt3x3t0qmILFGAG2NkK`. + +Plex's gateway is case-sensitive on the key value, so it returned 401 +`REQUEST_NOT_AUTHENTICATED` for everything. That's an entirely generic +"bad credentials" response. From the outside, it looked exactly like a +subscription problem or a tenant scoping problem. + +**Lesson**: never read credentials from images. Always have the user paste +the value as text, or use Insomnia "Generate Code" output as ground truth. + +### §2 — The "tenant routing" / "subscription" / "more subscription" cycle + +Driven by the 401s from §1, I cycled through three wrong hypotheses about +why endpoints were failing: + +- **Hypothesis A** (initial): "Tooling endpoints return 403 because IT + hasn't enabled the Tooling API collection in the dev portal" — sourced + from the original `Plex_API_Reference.md` written by the previous + developer. **Plausible but unverified.** +- **Hypothesis B** (my correction in PR #16): "Actually it's tenant + scoping, not subscription. The 403s will resolve once Courtney completes + tenant routing." — based on a misread of BRIEFING. **Wrong.** +- **Hypothesis C** (my second correction): "Actually the Plex_API_Reference + was right, it IS per-product subscription. The Datum app needs more + product approvals." — based on testing with the wrong key. **Also wrong.** + +The actual answer was: **the key value was wrong.** Once the right key +was loaded, every endpoint that was supposedly "blocked" started returning +200. There was no subscription problem and no tenant routing problem. +The whole investigation was an artifact of one character. + +**Lesson**: when you have a confusing 401 that resists every hypothesis, +the most likely explanation is that the credential value is wrong, even +if you "verified" it. Verify against a known-good client first. + +### §3 — Tooling/manufacturing/production-control 404s + +After fixing the key, the working endpoints (`mdm/`, `purchasing/`) all +returned 200. But `tooling/v1/tools`, `manufacturing/v1/operations`, and +`production/v1/control/workcenters` returned 404 `RESOURCE_NOT_FOUND`. + +These exact paths were in the original `Plex_API_Reference.md` and worked +for the previous developer with their old credentials on the test +environment. They don't work for the Datum app on production. + +There are three possible explanations and we don't yet know which: +- The URL patterns are different in this product set +- Those endpoints aren't included in the Fusion2Plex app's product subscriptions +- The previous developer was on a fundamentally different Plex deployment + +**Status**: unresolved. The user will need to share a working Insomnia +URL for one of those endpoints to make progress. Issues #4, #5, #6 +remain blocked on this. + +### §4 — The stale shell env var + +While debugging §1, I wasted ~45 minutes because the user's shell had a +DIFFERENT, also-invalid `PLEX_API_KEY` set as a User-level Windows +environment variable in `HKCU\Environment`. Even when `.env.local` had the +correct value, `bootstrap.setdefault()` correctly refused to override the +shell value, and Flask kept using the wrong key. + +The user's stale value was `uP4G8xgHdkoCFcJ00LPgfB5KYILsfdt6` — origin +unknown. Probably set via `setx` or System Properties at some earlier +point in the project's life. + +**Lesson**: the very first thing `tenant_whoami` should do is print which +key value (first 8 chars + length + first-source — env var or .env.local) +is being used. We should also probably make `bootstrap.py` log when +`.env.local` is being shadowed by an existing env var. + +--- + +## Session log + +Reverse chronological. Each entry: what was the goal, what landed, what's left. + +### 2026-04-09 — Postman buildout + connectivity sweep + supply-item cross-ref finding + +**Goal:** Build out the Postman collections to full known scope, then actually run a connectivity sweep to stop hedging about "verified vs unverified" and get ground truth on every endpoint. + +**Done:** + +- **Postman collections expanded** (PR #38): Plex collection 12 → 33 requests + `[SCHED] List Jobs` + 2 new `[PROBE]` entries = 36 total. Fusion collection 10 → 14 requests. Organized via `[NS]` name prefixes (Postman MCP minimal tier has no folder creation). New `docs/Postman_Collections.md` as the day-to-day reference. +- **Connectivity sweep** (23 requests, 2026-04-09): 18/23 returned 200, 5/23 returned 404, **zero 401s**. Clean ground truth on the full subscription scope. +- **Get-by-ID chain test** (6 requests): all 6 per-id endpoints work. **Every per-id view returns exactly the same fields as the list view** — no hidden detail on any resource. +- **Fresh record counts captured** for every list endpoint (see §3 table above). +- **Full field schemas captured** for every resource (see Plex_API_Reference.md §3). +- **New endpoint discovered:** `scheduling/v1/jobs` returns 200 (15.8s response — large body, schema TBD). Potentially relevant to issue #5. +- **Critical architectural finding:** `inventory/v1/inventory-definitions/supply-items` has **NO cross-references to any other resource**. Supply-items are identity-only — Plex does not model tool→vendor, tool→location, tool→part, tool→workcenter, or tool→operation. This resolves the question the user asked about "how do we get the supplier from a supply-item": **you can't, not from Plex alone.** Vendor data has to live in Supabase as the source of truth. (Also killed the "use PO lines as a back-channel" hypothesis — `purchasing/v1/purchase-orders-lines` returns 404.) +- **Filter no-op confirmed on POs:** unfiltered and `?updatedAfter=2025-01-01` both returned byte-identical 44.2 MB responses. The filter is silently ignored (same behavior as `?limit=N`). +- **Postman descriptions updated** for all 23 sweep endpoints + the 6 per-id endpoints + the 5 `[PROBE]` entries. Historical dates preserved (2026-04-07 initial / 2026-04-09 re-verification). + +**Key facts for the next session:** + +- The legacy `PLEX_API_KEY=uP4G...` stale shell env var is **still set** in `HKCU\Environment`. It shadows `.env.local` due to `bootstrap.setdefault()` semantics. User cannot permanently unset it in this environment — every session must `unset PLEX_API_KEY && export PLEX_API_KEY=''` before running anything that hits Plex. Document this as a project foot-gun (it's already in History §4 but the env var never got cleaned up). +- This worktree (`charming-hamilton`) does **not** have a `.env.local`. Per the `Bootstrap.py worktree foot-gun` memory, every worktree needs its own until issue #36 lands. +- The `scheduling/v1/jobs` endpoint is the highest-value follow-up — if its records carry tool references, we get the operation→tool mapping that issue #5 is blocked on without needing the `manufacturing/v1/routings` endpoint to ever become available. + +**What's left:** + +1. Deep-dive `scheduling/v1/jobs` — pull once, sample shape, document fields. Look specifically for `toolId`, `supplyItemId`, `workcenterId`, `operationId` references. +2. Issue #3 — `build_supply_item_payload` reading from Supabase `tools` table, now with full confidence that Plex never needs to know about vendors. +3. Issue #5 / #4 — architectural decisions remain blocked on product questions (not code questions). +4. Clean up the stale shell `PLEX_API_KEY=uP4G...` when the user gets admin access. + +### 2026-04-09 — project rename + key rotation + +**Goal:** Give the project a real name before it grows further. + +**Done:** +- Repo renamed `grace-shane/plex-api` → `grace-shane/datum` (GitHub preserves old URL redirects) +- Plex Developer Portal app renamed `Fusion2Plex` → `Datum` +- New Consumer Key issued and loaded into `.env.local` +- All docs updated: README, CLAUDE.md, TODO.md, docs/BRIEFING.md +- Issue #12 (key rotation) closed + +**Next session** (unchanged priority order): +1. Issue #25 — implement `validate_library.py` per `docs/validate_library_spec.md` +2. Issue #3 — `build_supply_item_payload` + match-and-upsert +3. Architectural decisions on #4, #5 +4. Issue #6 — workcenter write support + +### 2026-04-08 — docs reorg + validate_library spec + drift cleanup + +**Started with:** +- All long-form docs (BRIEFING, Plex_API_Reference, Fusion360_Tool_Library_Reference) sitting in the repo root alongside source code +- No design spec for the pre-sync validation gate — the need for one was implicit in #3 and #7 but nothing was written down +- Content drift across docs: architecture diagram in BRIEFING still showed discredited endpoints (`mdm/v1/parts`, `tooling/v1/tool-assemblies`, `production/v1/control/workcenters`), test count frozen at "119+", `docs/Plex_API_Reference.md` Section 4 Target State still pointed at `tooling/v1/tool-assemblies`, line 5 referenced `plexonline.com` (classic UI, not the REST gateway), TODO.md Phase 3 item #1 still `[ ]` despite PR #21 having closed #2 +- User had untracked `data/` (Fusion API reference PDFs, ~10 MB) and `outputs/` (CSV extractor snapshot, 154 KB) in the main workspace +- A fresh `docs/validate_library_spec.md` (455 lines) written locally but not yet committed + +**Ended with:** +- `docs/` folder created. `BRIEFING.md`, `Plex_API_Reference.md`, `Fusion360_Tool_Library_Reference.md` all moved. Git detected them as 100% renames, so history is preserved — `git log --follow docs/BRIEFING.md` still works. +- `docs/validate_library_spec.md` committed — full design spec for the `validate_library.py` pre-sync validation gate. Three entry points (CLI, programmatic hook in `tool_library_loader`, Flask `/api/fusion/validate`), full library-level + per-tool rule tables, supplier lookup strategy with closest-3 edit-distance hint in debug mode, integration hooks. +- **GitHub issue #25 opened** — `feat: implement validate_library.py pre-sync validation gate`. Blocks #3 and #7. Spec backfilled with the real issue number (was `#XX`). +- `.gitignore` additions: `data/`, `outputs/`, `.claude/worktrees/` +- All 6 drift items fixed (test count, architecture diagram, plexonline, Target State rewrite, TODO checkbox, spec issue number) +- README.md + CLAUDE.md link paths updated to the new `./docs/` prefix +- 156 tests still green — no code changes this session, docs-only + +**Pull requests merged this session** (newest first): +- #26 docs: fix stale content drift in BRIEFING, Plex_API_Reference, TODO, spec +- #24 docs: move long-form docs into `docs/`, add validate_library spec, gitignore large dirs + +**GitHub issues opened:** +- **#25** feat: implement `validate_library.py` pre-sync validation gate — blocks #3 and #7 + +**What's left to do next session** (in order): +1. **Issue #25** — implement `validate_library.py` per `docs/validate_library_spec.md`. This is now the highest-priority item since it gates the upsert work. Expect: new module + CLI + Flask routes + loader hook + ~30 pytest cases covering every Rule ID. +2. **Issue #3** — `build_supply_item_payload(fusion_tool)` + match-and-upsert logic, with the validate_library gate called first. +3. **Architectural decisions on #4, #5** — still blocked on a product question, not a code question. +4. **Issue #6** — workcenter doc write support (carefully, with `PLEX_ALLOW_WRITES=1` set deliberately). +5. **Issue #12** — key rotation deadline 2026-05-08. + +**Lessons** (follow-ups to "History of incorrect hypotheses" if anything goes sideways the same way): + +6. **Worktree gotcha — the painful one this session.** I burned ~30 minutes and a lot of tokens looking for a `docs/` folder the user said they'd added. I kept running `ls` and `git status` from a worktree at `.claude/worktrees/naughty-khayyam/`, not the main workspace at `C:/projects/plex-api/`. Worktrees share the `.git` directory (via `.git` file pointer) but have independent working trees — any new files the user creates in the main workspace are invisible to worktree `ls`. **Rule: when the user says "I added X locally" or "I moved stuff around", the first command is `cd "C:/projects/plex-api" && git status` in the main workspace, not the worktree.** Don't trust the worktree's view of the filesystem for anything the user did in File Explorer. +7. **Git rename detection is automatic.** The user moved files with File Explorer before I got there. Git saw them as deletes + untracked adds. Running `git rm` on the old paths and `git add` on the new paths in the **same commit** lets git's diff-rename detection catch them as 100% renames, preserving history. No special `git mv` step is needed — git is smart about this at commit time, not at stage time. The PR showed them as `rename BRIEFING.md => docs/BRIEFING.md (100%)` without any extra ceremony. +8. **Open issues before writing specs that reference them.** The validate_library spec had `#XX` placeholders for the implementation issue. Cleaner workflow: open the issue first, get the real number, then write the spec with the real number baked in. Otherwise you end up with a two-step commit (add spec with `#XX`, then a follow-up PR to backfill `#25`). +9. **Always re-run `git status` from the correct cwd after a worktree operation.** The shell in the Claude harness runs each Bash command with the cwd reset to the worktree root — which means `cd` inside a Bash call is ephemeral. Chain commands with `&&` when the later ones need to see the earlier `cd` effect. Every `Bash(cd X && git foo)` reminds you of this. + +--- + +### 2026-04-07 — full project bootstrap + Phase 3 read side + +**Started with:** +- Hardcoded API key in `plex_api.py` (still in git history) +- Old "gradient/glass dashboard" UI +- TODO.md as the only project tracker +- No tests, no CI, no .env.local concept +- BRIEFING claiming tenant routing was the IT blocker + +**Ended with:** +- 11 PRs merged, all via auto-merge after CI passes +- 156 pytest tests, all green, branch protection enforces them on master +- `.env.local` loader (`bootstrap.py`) + dev override (`run_dev.py`) +- Production write guard (`/api/plex/raw` refuses POST/PUT/PATCH/DELETE + unless `PLEX_ALLOW_WRITES=1`) +- Verified working credentials (`Fusion2Plex` Consumer Key) on + production with the real Grace tenant `58f781ba-...` +- **Issue #2 closed** — `extract_supply_items()` returns 1,109 + cutting tools and inserts from + `inventory/v1/inventory-definitions/supply-items` in 1.4s +- Brother Speedio mapping verified — workcenters 879/880 = FTP IPs + 192.168.25.79/.80 +- BRIEFING + Plex_API_Reference + TODO all rewritten to match + empirical reality (with the "History of incorrect hypotheses" + postmortem above documenting four wrong turns) + +**Pull requests merged this session** (newest first): +- #22 fix: stdout UTF-8 reconfigure + ASCII arrows +- #21 feat: extract_supply_items + Fusion testing-harness endpoints (closes #2) +- #20 docs: Plex tooling lives in inventory/v1/inventory-definitions/supply-items +- #19 feat: run_dev.py local launcher +- #18 feat: migrate to PROD Plex environment + verified Grace tenant +- #17 feat: production write guard at the proxy +- #16 docs: correct subscription-not-tenant hypothesis (later corrected by #20) +- #15 fix: surface HTTP errors instead of swallowing them as None +- #14 feat: .env.local loader + Claude Preview launch config +- #13 Endpoint tester UI, tenant diagnostics, env-var credentials, GH issue tracking + +**What's left to do tomorrow** (in order): +1. **Issue #3** — `build_supply_item_payload(fusion_tool)` writing to + `inventory/v1/inventory-definitions/supply-items`. We have the verified + read path and 1,109 records to learn the schema from. +2. **Architectural decision on issues #4 and #5** — descope or pivot. + Both are blocked on a real product question, not a code question. +3. **Issue #6** — probe write support on workcenters (carefully, with + `PLEX_ALLOW_WRITES=1` enabled deliberately). +4. **Issue #12** — key rotation deadline 2026-05-08. + +**Lessons** (additions to "History of incorrect hypotheses" if any +session goes sideways the same way again): +1. Never read credentials from images. Always have the user paste + them as text or via Insomnia "Generate Code" output. +2. Status codes from Plex are misleading on their own. 401 means + "bad creds OR unsubscribed product"; 404 means "wrong URL OR + unsubscribed namespace". Compare across endpoints to disambiguate. +3. Plex's URL convention is `/v1/-definitions/` + for definition data — not the bare `/v1/` we + kept guessing. Tools live at `inventory/v1/inventory-definitions/supply-items`, + not `tooling/v1/tools` or `mdm/v1/parts`. +4. Server-side filters on Plex endpoints are mostly silently ignored. + `?limit=1` on `mdm/v1/parts` returns 19.6 MB. Filter client-side. +5. pytest's `capsys` uses UTF-8, so stdout encoding bugs only show + up under live Flask. Add `sys.stdout.reconfigure(encoding="utf-8")` + at the top of any process whose stdout might end up captured by + Flask request handlers on Windows. diff --git a/Fusion360_Tool_Library_Reference.md b/docs/Fusion360_Tool_Library_Reference.md similarity index 100% rename from Fusion360_Tool_Library_Reference.md rename to docs/Fusion360_Tool_Library_Reference.md diff --git a/docs/GCP_MIGRATION.md b/docs/GCP_MIGRATION.md new file mode 100644 index 0000000..3e70947 --- /dev/null +++ b/docs/GCP_MIGRATION.md @@ -0,0 +1,272 @@ +# GCP Migration — Datum + +**Status:** Planning (2026-04-17). No code changes in the session that wrote this doc. +**Umbrella issue:** [#85](https://github.com/grace-shane/Datum/issues/85) +**Next-session prompts:** see [`NEXT_SESSION.md`](./NEXT_SESSION.md) for canned +prompts covering Cloud Scheduler start/stop and the Supabase → Cloud SQL DB +migration. + +This document captures the agreed architecture for moving Datum off Supabase + +Autodesk Desktop Connector (ADC) + the locked-down work machine, and onto GCP + +the Autodesk Platform Services (APS) HTTP API. Read this together with +[`BRIEFING.md`](./BRIEFING.md) for project context and +[`validate_library_spec.md`](./validate_library_spec.md) for the pre-sync gate +(the validation engine itself is source-agnostic and survives the migration +unchanged — only the CLI entry point that walks `CAMTools` needs to change). + +--- + +## Why migrate + +Three forcing functions collided in April 2026: + +1. **Dev machine at Grace is locked down.** New tooling can't be installed, + long-lived dev servers are awkward, `.env.local` churn is painful. A + persistent cloud dev environment (`datum-dev`) solves this. +2. **Supabase is a stopgap.** It earned its keep during Phase A, but it adds a + vendor we don't need once GCP is the deploy surface. Cloud SQL gives us one + infra control plane. +3. **APS removes the ADC dependency.** Autodesk Platform Services exposes Fusion + Hub tool libraries over HTTP. That kills the "ADC stall for >25h" failure + mode in `tool_library_loader.py:34`, the "file locked mid-sync" error path + in `tool_library_loader.py:104`, and the whole CAMTools network share as a + moving part. `aps_client.py` already exists in the tree — partial + implementation from earlier Fusion-cloud work. + +--- + +## Target architecture + +``` + Autodesk Hub (APS) + │ HTTP (OAuth, refresh via Secret Manager) + ▼ + ┌─────────────────┐ ┌─────────────────────┐ + │ datum-runtime │ ──────▶ │ Cloud SQL │ + │ e2-micro │ │ Postgres │ + │ always-on │ │ db-f1-micro │ + │ us-central1 │ │ libraries / tools │ + │ (sync + API) │ │ cutting_presets │ + └─────────────────┘ └─────────────────────┘ + │ Secret Manager: PLEX_API_KEY, PLEX_API_SECRET, + │ DB URL, APS client creds + ▼ + Plex connect.plex.com — identity slice → supply-items + + Cloudflare (datum.graceops.dev) ──▶ Flask on runtime VM ──▶ React UI + + ┌─────────────────┐ + │ datum-dev │ Cloud Scheduler: start 07:00 CT / stop 19:00 CT (Mon–Fri) + │ e2-standard-2 │ + │ Ubuntu 22.04 │ SSH target for VS Code Remote / Claude Code + └─────────────────┘ +``` + +--- + +## VM topology + +| VM | Purpose | Machine type | OS | Runtime model | +|---|---|---|---|---| +| `datum-dev` | Cloud dev environment — replaces the locked-down work machine. SSH target for VS Code Remote / Claude Code. | `e2-standard-2` | Ubuntu 22.04 | Business hours only. Cloud Scheduler start 07:00 CT / stop 19:00 CT, Mon–Fri. Off weekends and evenings. See [Cost — `datum-dev` weekday-only schedule](#cost--datum-dev-weekday-only-schedule) for the math. | +| `datum-runtime` | Nightly sync cron + Flask API surface for the React UI | `e2-micro` | Ubuntu 22.04 | Always-on in `us-central1` (free tier) | + +### Why split them + +`datum-dev` needs enough RAM/CPU to run VS Code Remote, pytest, and Claude Code +comfortably — but only during work hours. Keeping it on 24/7 wastes money and +lets state rot (nightly shutdowns force us to keep env setup scripted). +`datum-runtime` only needs to call APS once a night and serve a light Flask API, +so the free `e2-micro` fits. Keeping them separate means a dev-side crash, +reboot, or upgrade can't take the nightly sync down. + +--- + +## Service mapping + +| Today | After migration | Notes | +|---|---|---| +| Supabase (`datum` project, us-east-2) | Cloud SQL `db-f1-micro`, us-central1 | Same Postgres schema and bare table names (`libraries` / `tools` / `cutting_presets`) — Supabase is on its own DB, so no prefix is needed for isolation | +| Autodesk Desktop Connector + CAMTools network share | Autodesk Platform Services (APS) HTTP API | Removes 25h stale-file guard, mid-sync file-lock handling, and per-machine ADC install requirement. `aps_client.py` is already partially wired. | +| Windows Task Scheduler (planned, never built) | Cloud Scheduler → systemd timer on `datum-runtime` (or Cloud Run Job) | Cloud Scheduler also drives the dev VM start/stop | +| `.env.local` loaded by `bootstrap.py` | Secret Manager on both VMs, fallback `.env.local` for local dev | Never commit Secrets; `bootstrap.py` gains a Secret Manager loader path | +| Work machine (locked down) | `datum-dev` VM + Cloudflare DNS | `datum.graceops.dev` serves the React UI over TLS | +| Supabase service-role key (server-side only) | Cloud SQL connection via Cloud SQL Auth Proxy / IAM | Same "never ship to browser" model; Flask holds the connection server-side | +| (none — Phase 5 was never deployed) | Cloudflare in front of Cloud Run or runtime VM | `datum.graceops.dev` | + +**Not in scope:** Firebase, Cloud Data Connect, Firestore. Cloud SQL is the only +database. The React UI reads through Flask, not directly. + +--- + +## Data flow (ADC-free) + +1. Cloud Scheduler fires nightly at midnight CT → triggers the sync unit on + `datum-runtime`. +2. `sync.py` authenticates to APS using the OAuth client credentials stored in + Secret Manager, lists Fusion Hub projects, and downloads tool library JSON + over HTTP. +3. `validate_library.py` gates each library per + [`validate_library_spec.md`](./validate_library_spec.md). FAIL aborts that + library; PASS continues. +4. Upsert into Cloud SQL `libraries` / `tools` / `cutting_presets` via the new + DB client. +5. `build_supply_item_payload` (issue #3) reads the `tools` table and pushes + the identity slice (vendor part #, description) to Plex + `inventory/v1/inventory-definitions/supply-items`. +6. Flask (`app.py`) serves `/api/*` and the React UI from `datum-runtime`. + Cloudflare terminates TLS at `datum.graceops.dev` and proxies to the VM. + +The production write guard (`PLEX_ALLOW_WRITES=1`, PR #17) still applies. +Default OFF in the VM boot env; the systemd sync unit sets it just for the +invocation window. + +--- + +## Credentials — Secret Manager layout + +| Secret name | Contents | Consumers | +|---|---|---| +| `plex-api-key` | Datum Consumer Key (rotates every 31 days, next 2026-05-08) | `datum-runtime` (sync), `datum-dev` (optional) | +| `plex-api-secret` | Datum Consumer Secret (currently optional — reserved for future) | same | +| `plex-tenant-id` | Grace tenant UUID (`58f781ba-…`) — not actually secret, but convenient | same | +| `db-url` | Cloud SQL Postgres connection string | `datum-runtime`, `datum-dev` | +| `aps-client-id`, `aps-client-secret` | Autodesk Platform Services app credentials | `datum-runtime` | +| `aps-refresh-token` | APS OAuth refresh token (rotated by the sync runner) | `datum-runtime` | + +### IAM split + +- `datum-runtime` service account — `Secret Accessor` on all of the above, + `Cloud SQL Client` on the runtime DB. +- `datum-dev` service account — `Secret Accessor` on everything except + `aps-refresh-token` (the runner owns token rotation; dev shouldn't contend). + `Cloud SQL Client` on the runtime DB for read/write access during dev. + +### `bootstrap.py` behavior + +Add a `USE_SECRET_MANAGER=1` path that pulls secrets at process start via +`google-cloud-secret-manager`. `setdefault` semantics are preserved — a real +shell env var still wins (lesson from BRIEFING.md History §4, the stale +`PLEX_API_KEY` Windows env var that shadowed `.env.local`). Local dev falls +back to `.env.local` exactly as today. + +--- + +## Affected code (change-surface map) + +**No edits in the planning session. This is the enumeration that future +implementation PRs will work through.** + +### ADC / local-filesystem removal + +| File | Change | Reason | +|---|---|---| +| [`tool_library_loader.py`](../tool_library_loader.py) | Replace with an APS-backed loader or refactor to a source-agnostic interface with an APS adapter. The 25h stale-file guard becomes an "APS response freshness" check or is retired entirely. | Core ADC reader: `_DC_REL_PATH = DC\Fusion\XWERKS\Assets\CAMTools`, `load_library`, `load_all_libraries`, `_check_file_age`, `report_library_contents`. Every consumer path flows through here. | +| [`sync.py`](../sync.py) | Delete the "local ADC fallback" branch (~lines 265–420). APS becomes the only source. `--local-adc` flag becomes dead code. | File header currently reads "APS cloud-first, local ADC fallback" — the fallback is the thing we're removing. | +| [`app.py`](../app.py) | `/api/fusion/validate` GET currently walks `CAM_TOOLS_DIR`; switch to APS. `/api/aps/*` OAuth routes stay (they were built for this). `/api/fusion/libraries` GET likewise. | Flask endpoints that back the React library browser. | +| [`validate_library.py`](../validate_library.py) | CLI default resolves a CAMTools dir; switch to APS listing, or require an explicit `--file` / `--hub-project`. Engine itself is unchanged — the spec stays valid. | `CAM_TOOLS_DIR` references around line 996. | +| [`aps_client.py`](../aps_client.py) | Audit for completeness against the new required scope. Add refresh-token rotation writing back to Secret Manager. | Already partially implemented — reused, not rewritten. | +| [`tests/test_tool_library_loader.py`](../tests/test_tool_library_loader.py), [`tests/test_sync.py`](../tests/test_sync.py), [`tests/test_validate_library.py`](../tests/test_validate_library.py), [`tests/test_app_routes.py`](../tests/test_app_routes.py) | Replace filesystem fixtures with APS-response fixtures (mocked HTTP). | Follow the production code. | + +### Supabase → Cloud SQL + +| File | Change | Reason | +|---|---|---| +| [`supabase_client.py`](../supabase_client.py) | Replace with `db_client.py` (psycopg / SQLAlchemy against Cloud SQL). Preserve the public surface so call sites change by import only. | Single point of change if the adapter is clean. | +| [`sync_supabase.py`](../sync_supabase.py) | Rename → `sync_db.py` (or keep name, just swap client). Switch to new client. | | +| [`sync_tool_inventory.py`](../sync_tool_inventory.py), [`populate_supply_items.py`](../populate_supply_items.py), [`ingest_reference.py`](../ingest_reference.py), [`enrich.py`](../enrich.py), [`scripts/load_sample.py`](../scripts/load_sample.py) | Swap `from supabase_client import …` for new DB client import. | All currently depend on `supabase_client`. | +| [`app.py`](../app.py) | Swap Supabase reads for DB client reads on every Flask route that hits the DB. | React UI back-end. | +| [`tests/test_supabase_client.py`](../tests/test_supabase_client.py), [`tests/conftest.py`](../tests/conftest.py), [`tests/test_sync_supabase.py`](../tests/test_sync_supabase.py), [`tests/test_sync_tool_inventory.py`](../tests/test_sync_tool_inventory.py), [`tests/test_populate_supply_items.py`](../tests/test_populate_supply_items.py), [`tests/test_ingest_reference.py`](../tests/test_ingest_reference.py), [`tests/test_enrich.py`](../tests/test_enrich.py), [`tests/test_sync.py`](../tests/test_sync.py) | Point fixtures at a local Postgres (docker) or SQLAlchemy fake; drop the Supabase REST mocks. | Follow the production code. | + +### Credentials / secrets + +| File | Change | Reason | +|---|---|---| +| [`bootstrap.py`](../bootstrap.py) | Add Secret Manager loader path behind `USE_SECRET_MANAGER=1`. Keep `.env.local` fallback. Preserve `setdefault` semantics (shell env wins — see BRIEFING History §4). | Entry point for every credential read. | +| [`plex_api.py`](../plex_api.py) | No change — reads env vars which `bootstrap.py` populates. | Transparent to the API layer. | + +### Docs needing follow-up edits (not in this doc) + +- `CLAUDE.md` entry #7 (Supabase staging layer) — repoint at Cloud SQL +- `README.md` — status table, architecture diagram, "Why the pivot" paragraph +- `docs/BRIEFING.md` — "Current situation" block, architecture diagram, Notion link to schema page +- `docs/validate_library_spec.md` — any language mentioning "ADC share" or the network-share GET path + +--- + +## Migration sequence (suggested) + +1. **Provision GCP** — project, `datum-dev`, `datum-runtime`, Cloud SQL, + Secret Manager entries. No application code yet. +2. **Apply schema to Cloud SQL** with bare table names (`libraries` / `tools` + / `cutting_presets`) — matches the current Supabase schema post-PR #34. +3. **`bootstrap.py` Secret Manager path** — additive change, can land before + anything else is wired up; it's a no-op until `USE_SECRET_MANAGER=1` is set. +4. **`db_client.py`** — new module, drop-in for `supabase_client`. Land behind + a feature flag (`USE_CLOUD_SQL=1`), dual-read/dual-write if useful, then flip. +5. **APS-only loader** — replace/refactor `tool_library_loader.py`, gut the + local-ADC branch in `sync.py`, update Flask routes. Tests follow. +6. **Cloud Scheduler wiring** — nightly sync cron + dev VM start/stop schedules. +7. **Cloudflare DNS** — `datum.graceops.dev` → runtime VM (or Cloud Run if we + promote the Flask app off the VM; defer that decision). +8. **Decom** — remove Supabase project, strip ADC references from CLAUDE.md, + BRIEFING, README, validate_library_spec. + +--- + +## Open questions / risks + +- **Cold Cloud SQL on the nightly cron.** `db-f1-micro` + one-shot nightly + writes may hit cold-start latency. Acceptable for a midnight job; revisit if + it ever matters for interactive UI reads. +- **APS rate limits + OAuth refresh.** Need to confirm refresh-token lifetime + and build rotation into `aps_client.py`. Token write-back to Secret Manager + needs its own IAM grant. +- **Cloud Run vs runtime VM for Flask.** Either works. The VM is simpler given + we already have one; Cloud Run is cheaper at idle and scales to zero. Defer + the decision until the migration is otherwise done. +- **`datum-dev` state management.** Business-hours-only means no long-running + background processes on it. Fine for editor + pytest + Claude Code; document + so we don't get surprised. +- **Secret Manager IAM per service account.** `datum-dev` should not have + write access to production credentials; scope narrowly. +- **Production write guard on the runtime VM.** `PLEX_ALLOW_WRITES` should + default OFF at boot and be set only by the systemd unit that invokes the + nightly sync. Never in the VM's shell profile. +- **APS token vs Consumer Key lifetime.** Plex Consumer Key rotates every 31 + days; APS tokens rotate on their own cycle. Two independent rotation alarms; + document both in the runbook. + +--- + +## Cost — `datum-dev` weekday-only schedule + +`scripts/gcp/08-scheduler.sh` creates two Cloud Scheduler jobs that +start `datum-dev` weekdays at 07:00 America/Chicago and stop it at +19:00 America/Chicago. That's 12h × 5 days × ~4.33 weeks ≈ 260h/mo +versus 730h/mo for always-on — a ~64% compute reduction. + +| Mode | Hours/mo | `e2-standard-2` cost (us-central1, list) | +|---|---:|---:| +| Always-on (24/7) | 730 | ~$50/mo | +| Weekday 07:00–19:00 CT | ~260 | ~$15/mo | + +Persistent disk bills separately and isn't affected by stop/start — +the $35/mo delta is compute-only. On stop, the boot disk and hostname +are preserved; on start, the same VM comes back up. + +**IAM scope.** The script grants the runtime service account +`roles/compute.instanceAdmin.v1` on the `datum-dev` instance only +(not project-level), so the Scheduler auth identity can still only +touch that one VM. + +**Gotchas.** + +- An SSH session into `datum-dev` at 19:00 CT gets killed when the + stop job fires. For late work, trigger the start job manually: + `gcloud scheduler jobs run datum-dev-start --location=$REGION --project=$PROJECT_ID`. +- The schedule is weekday-only (`0 7 * * 1-5` / `0 19 * * 1-5`). Weekend + work needs a manual `gcloud compute instances start datum-dev ...`. +- Cloud Scheduler jobs live in `$REGION` (currently `us-central1`). + If you ever change `REGION` in `env.sh` the existing jobs won't follow — + delete them from the old region and re-run `08-scheduler.sh`. diff --git a/docs/NEXT_SESSION.md b/docs/NEXT_SESSION.md new file mode 100644 index 0000000..8c84a81 --- /dev/null +++ b/docs/NEXT_SESSION.md @@ -0,0 +1,70 @@ +# NEXT_SESSION.md — prompts for the next Datum Claude session on `datum-dev` + +This file holds canned prompts to paste into a fresh Claude Code session +running on the `datum-dev` GCP VM. The VM already has all repo plugins +installed; it just needs a `git pull` on `master` before the session +starts. + +Read these in order: + +- [`BRIEFING.md`](./BRIEFING.md) — overall project context +- [`GCP_MIGRATION.md`](./GCP_MIGRATION.md) — what's already provisioned +- [`REORG_AND_STACK.md`](./REORG_AND_STACK.md) — the pending stack swap + +## Prompt 1 — Cloud Scheduler start/stop for `datum-dev` + +``` +Read docs/GCP_MIGRATION.md, then set up Cloud Scheduler to start +datum-dev each weekday morning (07:00 America/Chicago) and stop it each +evening (19:00 America/Chicago). Use the HTTP target against +compute.googleapis.com with OAuth and the runtime service account. + +Add a new idempotent script scripts/gcp/08-scheduler.sh that creates +both jobs and the minimal IAM bindings needed for the SA to +start/stop the instance. Document the expected monthly-cost delta +(baseline ~$50/mo 24/7 → ~$15/mo weekday-only) in a short section at +the bottom of docs/GCP_MIGRATION.md. + +Open a PR on grace-shane/Datum with --repo grace-shane/Datum (do not +rely on gh's default remote — it picks upstream, which is wrong). +``` + +## Prompt 2 — Populate Supabase slots + migrate DB to Cloud SQL + +``` +Phase 2 of docs/REORG_AND_STACK.md: move the staging DB from Supabase +to the Cloud SQL instance datum-db. + +Before any migration work, add two Secret Manager slots for the +existing Supabase project (we still need to read from it to dump): +supabase-url and supabase-service-role-key. Update +scripts/gcp/env.sh SECRETS array and re-run scripts/gcp/05-secrets.sh +plus scripts/gcp/10-populate-secrets.sh. + +Then: pg_dump from Supabase (Shane has full admin there), restore +into datum-db, swap the app's DB layer to SQLAlchemy + psycopg3 as +specified in REORG_AND_STACK.md Phase 2. Keep Supabase reads working +behind a feature flag for one deploy cycle so we can roll back. + +Write a plan before touching code. Confirm with Shane before running +pg_dump — it's a one-way step in the sense that it locks in a +cutover moment. +``` + +## Prompt 3 — General session kickoff (if doing neither of the above) + +``` +Read CLAUDE.md and follow its reading order. Then check Notion's +Current State block to see what's actually next. If nothing's +pressing, look at TODO.md for the next unblocked GitHub issue. +``` + +## Gotchas this VM will hit + +- `gh` auto-picks the `upstream` remote (old `just-shane/plex-api` fork). + Always pass `--repo grace-shane/Datum`. +- The `datum-runtime` VM holds `aps-refresh-token`; `datum-dev` does not + (by IAM policy in `05-secrets.sh`). Don't try to read that secret from + this machine. +- Plex writes stay gated behind `PLEX_ALLOW_WRITES=1`. The GCP move did + not change that contract. diff --git a/docs/Plex_API_Reference.md b/docs/Plex_API_Reference.md new file mode 100644 index 0000000..dbcc322 --- /dev/null +++ b/docs/Plex_API_Reference.md @@ -0,0 +1,293 @@ +# Grace Engineering: Plex Connect REST API Reference + +## 1. Overview + +This reference document synthesizes the discoveries from preliminary API testing and aligns them with the **Fusion 360 Tool Library Synchronization** architectural goals. It serves as the master guide for developers interacting with the Grace Engineering Plex instance via the `connect.plex.com` REST API gateway. + +*Note: Grace Engineering runs Plex Classic, MES+ enabled, supporting Prime Archery and Montana Rifle Company.* + +> **Companion: Postman collections.** Every endpoint documented in +> §3 below has a matching request in the **Plex API — Datum** Postman +> collection (see [`Postman_Collections.md`](./Postman_Collections.md) +> for the full catalog and naming convention). Postman is the +> recommended way to explore endpoints by hand; this file is the +> authoritative reference for what each endpoint actually returns. + +--- + +## 2. Authentication & Headers + +All Plex APIs are routed through the developer portal. There is no session token or OAuth flow; a static subscription key is passed via request headers. + +- **Developer Portal**: `https://developers.plex.com/` +- **Rate Limit**: 200 API calls per minute across all endpoints. +- **Base URL**: `https://connect.plex.com` (Production) / `https://test.connect.plex.com` (Test) + +### `PLEX_BASE_URL` override + +`plex_api.py` honors a `PLEX_BASE_URL` environment variable that overrides +both `BASE_URL` and `PLEX_USE_TEST`. Used by the write-validation +workflow in [#92](https://github.com/grace-shane/Datum/issues/92) to +point `datum-sync` at the local Plex-mimic mock +(`tools/plex_mock/server.py`) instead of `connect.plex.com`. Unset in +normal production operation. + +Resolution order (first match wins): + +1. Explicit `base_url=` kwarg to `PlexClient()` — tests and ad-hoc scripts +2. `PLEX_BASE_URL` env var — deployment-time override (the mock) +3. `PLEX_USE_TEST=1` → `test.connect.plex.com` +4. Default → `connect.plex.com` + +**Required Header:** + +```http +X-Plex-Connect-Api-Key: +``` + +> [!WARNING] +> The API key **must** be in the Request Headers. Placing it as a URL parameter will result in a 401 Unauthorized error. + +--- + +## 3. Verified Endpoints & Access Matrix + +> [!IMPORTANT] +> All values below were verified empirically against `connect.plex.com` +> (production) on the Grace tenant +> (`58f781ba-1691-4f32-b1db-381cdb21300c`). Reproduce by running the +> diagnostic at `/api/diagnostics/tenant` from the local UI, or by +> running the Postman `[AUTH] List All Tenants — Auth Canary` request. +> +> **Verification history:** +> +> - **2026-04-07** — first full sweep with the `Fusion2Plex` Consumer +> Key. Discovered that tools live at `inventory/v1/inventory-definitions/supply-items` +> (not `tooling/v1/tools` as earlier docs claimed). +> - **2026-04-09** — re-verified with the rotated `Datum` Consumer Key +> after the project rename. 23-request connectivity sweep + 6-request +> Get-by-ID chain test. Captured full field schemas + discovered that +> **supply-items have no foreign key to suppliers, parts, locations, +> or any other resource** (see §3.5 below). Also discovered the +> `scheduling/v1/jobs` endpoint (new — not in any earlier doc). + +### URL pattern convention + +Plex uses two URL shapes for read endpoints: + +1. **Master data — flat**: `/v1/` + Example: `mdm/v1/parts`, `mdm/v1/suppliers`, `mdm/v1/operations` +2. **Definitions — nested**: `/v1/-definitions/` + Example: `production/v1/production-definitions/workcenters`, + `inventory/v1/inventory-definitions/supply-items`, + `inventory/v1/inventory-definitions/locations` +3. **Flat with sub-namespace** (new 2026-04-09): `/v1/` + — the `scheduling/v1/jobs` endpoint uses the first pattern but lives + under a namespace that wasn't previously catalogued. + +Both patterns are used in production. The bare `/v1` root +typically returns 404 (no resource at the root); the actual data lives +one level deeper. + +### Verified working endpoints + +Record counts are as of **2026-04-09** unless noted. Schemas captured +2026-04-09 by the Get-by-ID chain test. + +| Status | Path | Records | Schema / Notes | +|---|---|---:|---| +| **200** | `mdm/v1/tenants` | 1 | Single tenant: Grace. Auth canary — run first in any session. | +| **200** | `mdm/v1/parts` | **16,921** | +8 since 2026-04-07. 19.6 MB unfiltered. Tools are **NOT** here. Fields: `buildingCode, createdById, createdDate, description, group, id, leadTimeDays, modifiedById, modifiedDate, name, note, number, productType, revision, source, status, type`. `type` ∈ {`Finished Good`, `Raw Material`, `Sub Assembly`}. | +| **200** | `mdm/v1/parts?status=Active` | — | 7.8 MB — only verified working filter. All other query params silently ignored. | +| **200** | `mdm/v1/parts/{id}` | — | Same 17 fields as list view — no hidden detail. | +| **200** | `mdm/v1/suppliers` | **1,575** | 709 KB. Fields: `category, code, contactNote, createdById, createdDate, id, language, modifiedById, modifiedDate, name, note, oldCode, parentSupplierId, status, type, webAddress`. `parentSupplierId` is a self-referential FK. First record is a `Carrier` — list mixes material suppliers, carriers, etc. | +| **200** | `mdm/v1/suppliers/{id}` | — | Same 16 fields as list view. | +| **200** | `mdm/v1/customers` | **109** | 96 KB. 35 fields. FKs to employees (`assignedToId`, `assignedTo2Id`, `assignedTo3Id`), contacts (`contactResourceId`), suppliers (`defaultCarrierIds` array, `supplierCode`). | +| **200** | `mdm/v1/customers/{id}` | — | Same 35 fields as list view. | +| **200** | `mdm/v1/contacts` | **299** | 202 KB. | +| **200** | `mdm/v1/buildings` | **4** | 1.2 KB. Referenced from workcenters via `buildingCode`/`buildingId`. | +| **200** | `mdm/v1/employees` | **641** | 272 KB. UUIDs appear as `createdById`/`modifiedById` across essentially every other resource. | +| **200** | `mdm/v1/operations` | **122** | Minimal 4-field schema: `code, id, inventoryType, type`. **No FK to tools, parts, or routings** — the reason issue #5 is blocked. | +| **200** | `mdm/v1/operations/{id}` | — | Same 4 fields as list view. | +| **200** | `inventory/v1/inventory-definitions/supply-items` | **2,516** | 614 KB. Full unfiltered. | +| **200** | `inventory/v1/inventory-definitions/supply-items?category=Tools%20%26%20Inserts` | **1,109** | **TOOLS LIVE HERE.** Fields: `category, description, group, id, inventoryUnit, supplyItemNumber, type`. **No supplier FK. No cross-references of any kind.** See §3.5. | +| **200** | `inventory/v1/inventory-definitions/supply-items/{id}` | — | Same 7 fields as list view. | +| **200** | `inventory/v1/inventory-definitions/locations` | **1,270** | 279 KB. Not cross-referenced from supply-item. | +| **200** | `production/v1/production-definitions/workcenters` | **143** | 21 MILLs. ⚠️ **Primary key is `workcenterId`, not `id`.** Fields: `buildingCode, buildingId, ipAddress, name, plcName, productionLineId, tankSilo, workcenterCode, workcenterGroup, workcenterId, workcenterType`. | +| **200** | `production/v1/production-definitions/workcenters/{id}` | — | Same 11 fields as list view. | +| **200** | `purchasing/v1/purchase-orders` | — | **44.2 MB** unfiltered. Full PO history. `?updatedAfter=` filter confirmed as a silent no-op on 2026-04-09 (byte-identical response). | +| **200** | `inventory/v1-beta1/inventory-history/item-adjustments?ItemId=&StartDate=&EndDate=` | varies | **Supply-item adjustment log.** 31/1,109 tools have non-empty history (2026-04-15). Fields: `adjustmentDate, itemId, itemNo, location, locationId, quantity, transactionType`. Summing `quantity` (already signed) gives running balance. **Dates MUST be full ISO with `Z`** (plain YYYY-MM-DD → 400 ARGUMENT_INVALID). See §3.6. | +| **200** | `inventory/v1/inventory-tracking/containers` | **10,676** | On-hand for parts (RAW/WIP/FG). Fields include `quantity, partId, partNo, location, locationId, serialNo, lotId, inventoryType`. Disjoint from supply-items (tools). | +| **200** | `inventory/v1/inventory-history/container-adjustments?BeginDate=&EndDate=` | **6,298** | Per-container adjustment log. Fields: `adjustmentCode, adjustmentDate, location, partId, partNumber, quantity, serialNo, ...`. | +| **200** | `scheduling/v1/jobs` | TBD | **NEW — discovered 2026-04-09.** Returns 200 but **15.8s response time**, so the body is large. Schema, record count, and whether it carries tool/operation/workcenter FKs all TBD. Worth a deep-dive as follow-up to issue #5 (routing/operation linkage) — if jobs link to tools, we get the missing operation→tool mapping for free. | + +### Probed — returned 404 (not subscribed or doesn't exist) + +All of the following were probed on 2026-04-09 and returned `404 RESOURCE_NOT_FOUND`. Kept here so future sessions know they've been checked and don't waste a cycle re-testing them blindly. Re-probe periodically to detect subscription changes. + +| Path | First checked | Notes | +|---|---|---| +| `tooling/v1/tools` | 2026-04-07 (re-check 2026-04-09) | In original pre-Datum docs. Blocks #4. | +| `tooling/v1/tool-assemblies` | 2026-04-07 (re-check 2026-04-09) | Blocks #4. | +| `tooling/v1/assemblies` | 2026-04-09 | Alternate spelling, also 404. | +| `manufacturing/v1/routings` | 2026-04-07 (re-check 2026-04-09) | Blocks #5. | +| `quality/v1/inspections` | 2026-04-09 | Speculative probe. | +| `sales/v1/sales-orders` | 2026-04-09 | Speculative probe. | +| `inventory/v1/on-hand` | 2026-04-09 | Would have given tool stock levels. | +| `inventory/v1/containers` | 2026-04-09 | | +| `inventory/v1/inventory-definitions/container-types` | 2026-04-09 | | +| `mdm/v1/parts-buckets` | 2026-04-09 | | +| `production/v1/production-definitions/assets` | 2026-04-09 | | +| `production/v1/production-definitions/assemblies` | 2026-04-09 | | +| `purchasing/v1/purchase-orders-lines` | 2026-04-09 | Would have given supply-item → PO → supplier linkage. | + +### §3.5 — Supply-item cross-references: the critical finding + +**The `inventory/v1/inventory-definitions/supply-items` resource is identity-only.** Its 7 fields are: + +- `category` (string, e.g. `"Tools & Inserts"`) +- `description` (free-text, human-readable) +- `group` (string, e.g. `"Machining"`) +- `id` (UUID — Plex primary key) +- `inventoryUnit` (string, e.g. `"Ea"`) +- `supplyItemNumber` (string — see below) +- `type` (string, e.g. `"SUPPLY"`) + +**There is no field on this resource that references another resource.** Specifically: + +- No `supplierId` or `preferredSupplierId` — **you cannot derive the vendor for a tool from Plex alone.** +- No `locationId` or `warehouseId` — you cannot ask "where is this tool right now?" via this endpoint. +- No `partId` — supply-items are not linked to `mdm/v1/parts`. +- No `workcenterId` — supply-items are not assigned to machines. +- No `operationId` — supply-items are not linked to operations. + +**Implication for Datum sync architecture:** vendor/supplier data for tools MUST live in Supabase as the source of truth. The Fusion JSON carries `vendor` and `product-id`, those get written to the `tools` table in Supabase, and when `build_supply_item_payload()` (issue #3) constructs the Plex POST body it uses only the 7 identity fields. Plex never learns who the vendor is, because Plex doesn't model that relationship for tools. + +This finding also kills the hypothesis that PO lines could be used as a back-channel for the vendor link — `purchasing/v1/purchase-orders-lines` returned 404 on 2026-04-09. + +**Sample `supplyItemNumber` values captured 2026-04-09** (confirming the +"legacy free-text descriptions, not vendor part numbers" observation +from the 2026-04-08 Decision Log): + +- `"Insert HM90 AXCR 150508 IC28"` +- `"Screw Indexable Face Mill F75"` +- `"Tap #8-32 H3 Spiral Point"` + +Fusion will insert clean vendor part numbers like `"990910"`, so expect essentially zero collision with existing Plex records on first sync. + +### §3.6 — Supply-item `item-adjustments` and the `transactionType` sign table + +Endpoint: `GET inventory/v1-beta1/inventory-history/item-adjustments` +Required params: `ItemId` (supply-item UUID), `StartDate`, `EndDate` (full ISO with `Z`). + +**Key finding (probed 2026-04-15 across all 1,109 `category="Tools & Inserts"` supply-items):** the `quantity` field is delivered **pre-signed** — positive for additions, negative for removals. You do NOT need to apply sign based on `transactionType`. Sum `quantity` directly to get the running balance. + +The enumerated `transactionType` values across 2,005 real records: + +| transactionType | records | qty_min | qty_max | quantity sign | interpretation | +|---|---:|---:|---:|---|---| +| `PO Receipt` | 1,479 | 1.0 | 100.0 | always `+` | vendor received into stock | +| `Checkout` | 326 | -75.0 | -1.0 | always `-` | pulled from crib to production | +| `Correction` | 125 | -6433.0 | 78.0 | either | manual count adjustment, signed | +| `Check In` | 74 | 1.0 | 103.0 | always `+` | returned to crib / physical recount up | +| `null` | 1 | 19.0 | 19.0 | — | one record with missing `transactionType`; treat as data-quality issue, still sum the qty | + +**Implementation rule:** `running_balance = sum(r.quantity for r in records)`. No sign flip, no lookup table. If future records introduce a new `transactionType`, the pre-signed `quantity` contract should still hold — but the sync script should log any unknown `transactionType` values it encounters as a warning for review. + +Of Grace's 1,109 tools, **31 (2.8%) have non-empty adjustment history.** The remaining 1,078 have never been tracked in Plex inventory at all — a data-quality finding, not an API limitation. Datum distinguishes this in `tools.qty_tracked`: TRUE = ≥1 record, FALSE = linked but Plex has no history (display as "not tracked"), NULL = not yet checked by sync. + +### Where tooling data actually lives + +**Cutting tools and inserts are `inventory/v1/inventory-definitions/supply-items` +records** with `category="Tools & Inserts"`. This is NOT what the original +`Plex_API_Reference.md` claimed — that file referenced `tooling/v1/tools` and +`mdm/v1/parts`, neither of which works for tooling on this app. + +Verified empirically: 1,109 tools/inserts already exist in Plex Grace, mostly +in `group="Machining"` (1,039) and `group="Tool Room"` (104). The supply-item +schema is minimal — it tracks vendor part number identity, not geometry, so +the Fusion 360 sync will: + +1. Read existing tools via `GET inventory/v1/inventory-definitions/supply-items` +2. Filter client-side or via query string to `category=Tools & Inserts` +3. Match by `supplyItemNumber` (vendor part number, e.g. Harvey Tool's `990910`) +4. Create new supply-items for Fusion tools that don't exist +5. Update existing ones + +Geometry (DC, OAL, NOF, holder details) stays in Fusion as the source of +truth — Plex stores only the identity, description, and group/category. + +### Workcenter ↔ machine mapping (verified) + +The 21 MILL workcenter records map directly to physical Brother Speedio +machines via the `workcenterCode` field (which equals the machine number / +DNC IP last octet): + +- Workcenter `879` → Brother Speedio 879 → FTP `192.168.25.79` +- Workcenter `880` → Brother Speedio 880 → FTP `192.168.25.80` + +The full mill list: 814, 825, 827, 830, 834, 835, 836, 837, 839, 840, 841, +845, 848, 851, 865, 873, 879, 880, DEFLECT. + +### Reading Plex's status codes + +- **200** — success. +- **401 `REQUEST_NOT_AUTHENTICATED`** — bad credentials OR a recognized + namespace your app isn't subscribed to. Same wire response, indistinguishable + from outside. +- **404 `RESOURCE_NOT_FOUND`** — Plex's gateway has no route at that path. + Could mean unknown URL OR subscribed-but-no-resource. Same wire response. +- **400** — Plex recognizes the path but the request is malformed (often + treats a string as a UUID parameter and fails to parse). +- **403** — **never observed in practice on this app**. + +The 401-vs-404 distinction is **not** a clean signal on its own. The only +reliable way to disambiguate is to compare against a known-good client +(Insomnia "Generate Code" output is the gold standard). + +### No server-side pagination + +`mdm/v1/parts` and `purchasing/v1/purchase-orders` **silently ignore** the +`limit` query parameter. We learned this empirically — `?limit=1` returned +19.6 MB and 44 MB respectively. The only filter we've verified actually +works is `?status=Active` on `mdm/v1/parts` (reduces 19.6 MB → 7.8 MB). +The `typeName` filter is also silently ignored. **Always assume `limit` +does nothing and use real filters or accept the full DB pull.** + +--- + +## 4. Current Tooling Data Flow (Fusion 360 to Plex) + +Data flows from Fusion 360 to Plex via the REST API. The `tooling/v1/*` path namespace referenced in earlier drafts of this document does NOT exist on the Fusion2Plex app — see Section 3 and [`BRIEFING.md` History §3](./BRIEFING.md) for the postmortem. + +1. **REST API Automation (Target State)** + - A scheduled script parses the network share Fusion 360 tool library JSON files. + - Extracts `product-id`, `vendor`, `description`, and `geometry`. + - Pre-sync validation gate runs via `validate_library.py` (spec only, see [`validate_library_spec.md`](./validate_library_spec.md), implementation issue #25). + - Pushes payloads to `inventory/v1/inventory-definitions/supply-items` with `category="Tools & Inserts"`, `group="Machining"`, and `supplyItemNumber=` as the dedup key. Read path verified (1,109 records); write logic in progress (issue #3). + - Pushes payloads to `production/v1/production-definitions/workcenters/{id}` utilizing `post-process.number` for turret/pocket placement. Read path verified; write shape TBD (issue #6). + +2. **CSV Upload System (Historical Fallback)** + - Prior to API access being verified, engineering used bulk CSV uploads. + - Sequence: **Tool Assembly Upload** ➔ **Tool Inventory Upload** ➔ **Tool BOM Upload** ➔ **Routing Upload**. + - The supply-items REST path above is the target state and supersedes this workflow once issues #3, #6, and #7 land. + +--- + +## 5. Machine Integration (DNC Overview) + +Outside of the Plex database, NC programs and tool alignments require pushing to physical machines on the floor: + +- **Brother Speedio (879/880)**: Native FTP integration (`192.168.25.79`, `192.168.25.80`). Scripts can push programs directly via standard FTP. +- **Citizen / Tsugami**: Connected via Moxa NPort 5150/5250 converting RS-232 to TCP/IP. +- **Haas VMCs**: Native Ethernet on Sigma 5 boards. + +*Plex DCS acts as the source-of-truth for NC programs natively; DNC protocols transfer them to machines just-in-time.* + +--- + +## 6. Known Issues & Development Gotchas + +- **Supplier UUIDs**: The `supplierId` in API responses is a UUID, NOT the supplier code (i.e. MSC is not `MSC001`). You must query the MDM endpoint to resolve vendor names to their internal UUIDs. +- **PO Filters**: Filtering by `type` strings containing spaces (`MRO SUPPLIES`) requires proper URL encoding (`%20`). Undetected encoding issues will result in zero-record responses rather than explicit HTTP errors. +- **PowerShell Curl**: Do not use the alias `curl` in PowerShell scripts. Use `Invoke-RestMethod` to guarantee proper header passage and JSON native ingestion. diff --git a/docs/Plex_Classic_API_Request.md b/docs/Plex_Classic_API_Request.md new file mode 100644 index 0000000..493cc93 --- /dev/null +++ b/docs/Plex_Classic_API_Request.md @@ -0,0 +1,146 @@ +# Plex Classic Web Services — Access Request + +**From:** Shane Waid, Grace Engineering +**Date:** April 10, 2026 +**Project:** Datum — Fusion 360 Tool Library Sync +**Repo:** https://github.com/grace-shane/datum + +--- + +## What we're building + +Datum is an internal automation that syncs our Autodesk Fusion 360 CAM +tool library data into Plex. Fusion 360 JSON files on our network share +are the source of truth for cutting tools — the script reads them and +pushes tooling data to Plex nightly so tool information stays current +across programming, purchasing, and the shop floor. + +## What works today + +We have a working integration with the **Plex Connect REST API** +(`connect.plex.com`) using a Consumer Key from the Developer Portal. +The Datum app can: + +- Authenticate against Grace Engineering's production tenant +- Read supply items, parts, suppliers, workcenters, operations, jobs, + and 10+ other endpoints +- Write to `inventory/v1/inventory-definitions/supply-items` (our tool + master list — 1,109 existing records) + +The tool identity sync (name, vendor part number, description, category) +is ready to go live via the REST API. + +## What we can't do with the REST API + +After a thorough investigation of every available REST endpoint (36 +requests across 8 namespace groups, verified April 9-10, 2026), we've +confirmed that the Connect REST API **does not expose**: + +| Capability | REST API status | Why we need it | +|---|---|---| +| **Part Operations** | `mdm/v1/operations` has 4 fields, no FK to parts or tools | Link tools to the operations they perform | +| **Tool-to-Operation assignments** | No endpoint exists | Operators need to see which tools are required for each op | +| **Routing / operation sequences** | `manufacturing/v1/routings` returns 404 | Define the order of operations on a part | +| **DCS / Document attachments** | `documents/v1/*` and `dcs/v1/*` return 404 | Attach tool setup sheets to Part Operations | +| **Workcenter documents** | `workcenters/{id}/documents` returns 404 | Push tool lists to machine setup docs | +| **Supply item cross-references** | `supply-items` has 7 identity fields only — no supplier FK, no location, no operation link | Connect tools to vendors, locations, machines | + +These relationships **do exist in Plex** — we can see them in the +Classic UI at `plexonline.com` (Control Panel, Part Operation +Attachments, Workcenter views). They're just not available through the +Connect REST API. + +## What we're requesting: Classic Web Services access + +The **Plex Web Services** endpoint at +`plexonline.com/Modules/Xmla/XmlDataSource.asmx` can access the full +Classic schema via Data Sources. This would let Datum: + +1. **Read Part Operations** — which operations run on which parts, at + which workcenters +2. **Assign tools to operations** — so the shop floor sees the right + tools for each job +3. **Upload setup sheet attachments** via the DCS (Document Control + System) — the "Part Operation Attachments" screen that's currently + empty for our milling operations +4. **Push tool lists to workcenter documents** — so machine operators + on the Brother Speedios (879, 880) have current tool data + +### What we found so far (April 10, 2026) + +We tested the Classic Web Services endpoint from multiple angles: + +1. **Unauthenticated GET** to + `https://www.plexonline.com/Modules/Xmla/XmlDataSource.asmx?WSDL` + — returned the Plex login page (IAM Login button only, no + username/password form). Confirms the endpoint path exists and + Plex Classic now authenticates through Rockwell IAM. + +2. **Authenticated GET** (logged into Plex via IAM in the same browser + session, then navigated to the WSDL URL in the same tab) — returned + a **system error page**: *"A system error has occurred on this page. + Plex personnel have been automatically notified and are working on + the problem."* The Plex header bar rendered (session is valid), but + the ASMX endpoint itself threw a server-side exception. + +3. **Session-prefixed URL** — Classic Plex URLs include a per-login + session GUID (e.g. `plexonline.com/0daa8ab4-4c2e-.../Modules/...`). + We tried `plexonline.com/{session-guid}/Modules/Xmla/XmlDataSource.asmx?WSDL` + but navigating outside the Classic UI window forces a re-login, which + generates a new session GUID. The ASMX URL was stripped during the + redirect and never reached. + +4. **Classic UI navigation** — The Plex Classic window locks out the + address bar (kiosk-style). There is no way to navigate to the ASMX + endpoint from within the Classic session. + +**Conclusion:** The ASMX endpoint exists but is non-functional for +Grace Engineering's authenticated user. This is either a subscription +issue, a deprecation, or a configuration that needs Plex support to +enable. + +### What we need + +| Item | Details | +|---|---| +| **Is Classic Web Services available to us?** | The ASMX endpoint at `plexonline.com/Modules/Xmla/XmlDataSource.asmx` returns a system error when accessed by an authenticated Grace Engineering user. Is this feature enabled for our subscription? If not, what does it take to enable it? | +| **Correct endpoint URL** | If the ASMX path has moved during the IAM migration, what is the current URL for programmatic Data Source access? | +| **Programmatic auth method** | How does a script (not a browser) authenticate to Classic Web Services now that IAM SSO is required? Is there an OAuth2 client credentials flow, an API key, or a service account token? This is separate from the Developer Portal Consumer Key we already use for the REST API at `connect.plex.com`. | +| **Company Code** | Grace Engineering's numeric Company Code in Classic Plex (not the tenant UUID `58f781ba-...` used by the REST API). This may be required as a parameter in SOAP calls. | +| **Data Source inventory** | If Web Services is available (or can be enabled), we need a list of Data Sources related to: Part Operations, Tool Assignments, Workcenter Assignments, and DCS/Attachments. If custom Data Sources need to be created, we can specify the exact fields we need. | + +### What we will NOT do + +- We will not modify any existing Part, Operation, or Workcenter records + without explicit approval +- All writes will go through a dry-run validation step first +- The integration already has a production write guard that blocks + mutations by default (`PLEX_ALLOW_WRITES` must be explicitly enabled) +- Credentials will be stored in environment variables, never committed + to source control + +## Architecture overview + +``` +Fusion 360 JSON (network share, nightly) + | + v + validate_library.py <-- pre-sync validation gate + | + v + sync_supabase.py <-- full record upsert (geometry, vendor, presets) + | + |---> Supabase (datum) <-- enriched tool database + React UI + | + |---> Plex REST API <-- tool identity (supply-items) + | connect.plex.com WORKS TODAY + | + |---> Plex Classic Web Services <-- tool assignments, attachments, routing + plexonline.com NEED ACCESS +``` + +## Contact + +Shane Waid +shanewaid@graceeng.com +Grace Engineering — CNC Programming diff --git a/docs/Postman_Collections.md b/docs/Postman_Collections.md new file mode 100644 index 0000000..c2eced6 --- /dev/null +++ b/docs/Postman_Collections.md @@ -0,0 +1,448 @@ +# Postman Collections — Datum + +This document is the authoritative reference for the two Postman collections +that back the Datum project. It lives next to `BRIEFING.md` and +`Plex_API_Reference.md` because the collections are the day-to-day +exploration tool — when you need to poke at a Plex endpoint or the local +Flask harness, the collections are where you start. + +> **Read order.** If you have not yet read `docs/BRIEFING.md` and +> `docs/Plex_API_Reference.md`, read those first. This document assumes +> you understand the Datum project, the Plex auth model, and the +> "verified vs probe" distinction. The collections inherit those rules. + +--- + +## 1. Where the collections live + +Both collections live in Shane's Grace Engineering Postman workspace. + +| Field | Value | +|---|---| +| Workspace name | (Grace Engineering — Datum workspace) | +| Workspace ID | `154e8d9a-cde9-4036-8e07-6913e468ab05` | +| Owner | `shanewaid@graceeng.com` (Shane's Grace Postman account, owner ID `53648712`) | + +### Collections + +| Name | Collection ID | UID (MCP form) | +|---|---|---| +| **Plex API — Datum** | `75b28dc4-9c73-4e27-90d0-1539777f52ea` | `53648712-75b28dc4-9c73-4e27-90d0-1539777f52ea` | +| **Fusion 360 Tool Libraries — Datum** | `8a9b5ce6-f541-4301-b15d-fd95970df0e8` | `53648712-8a9b5ce6-f541-4301-b15d-fd95970df0e8` | + +**Request counts as of 2026-04-09:** Plex collection has **36 requests** (34 reads + new `[SCHED] List Jobs` + expanded `[PROBE]` group with 2 new entries) across 8 `[NS]` prefix groups. Fusion collection has 14 requests across 4 groups. + +The bare collection ID (no owner prefix) is the form most Postman MCP +endpoints want. The UID form (with the `53648712-` prefix) is what +`getCollection`, `getCollections`, etc. return as a stable handle. + +### Environment + +| Field | Value | +|---|---| +| Name | `Plex — Grace Engineering (Production)` | +| Variables | `api_key`, `api_secret` (secret type), `tenant_id`, `base_url` | + +The Fusion collection doesn't strictly need this environment because it +runs against the local Flask app, but having it active is harmless. + +--- + +## 2. Auth model + +Plex auth lives entirely at the collection level. Every request in the +**Plex API — Datum** collection inherits a pre-request script that injects +three headers from the active environment: + +```javascript +pm.request.headers.add({ key: 'X-Plex-Connect-Api-Key', value: pm.environment.get('api_key') }); +pm.request.headers.add({ key: 'X-Plex-Connect-Api-Secret', value: pm.environment.get('api_secret') }); +pm.request.headers.add({ key: 'X-Plex-Connect-Tenant-Id', value: pm.environment.get('tenant_id') }); +``` + +You should never have to set these headers manually. If a Plex request +returns 401 `REQUEST_NOT_AUTHENTICATED`, check that: + +1. The `Plex — Grace Engineering (Production)` environment is **active** + (top-right environment selector in Postman). +2. `api_key` is the current Datum Consumer Key from the Plex Developer + Portal — keys rotate every 31 days (see issue #12). +3. You haven't recently regenerated the key in the portal. + +The Fusion collection has no auth — it's all local Flask, no credentials. + +--- + +## 3. Naming convention + +Both collections use a `[NS]` prefix on every request name to group them +visually in the Postman sidebar. This is a workaround for the fact that +the Postman MCP minimal tool surface doesn't expose folder creation, so +we can't use real folders. Sort the request list alphabetically in +Postman to see the groups together. + +### Plex — Datum + +| Prefix | Meaning | Folder analogue | +|---|---|---| +| `[AUTH]` | Auth canary + tenant lookups | Run **first** in any session | +| `[MDM]` | Master Data Management — parts, suppliers, customers, contacts, buildings, employees, operations | Verified namespace | +| `[INV]` | Inventory — supply-items (where TOOLS live) and locations | Verified namespace | +| `[PROD]` | Production — workcenters, including the Brother Speedio per-id reads | Verified namespace | +| `[PURCH]` | Purchasing — purchase orders | Verified namespace | +| `[WRITE]` | Mutating requests — POST/PUT/DELETE templates | **Production data — see §6** | +| `[PROBE]` | Unverified namespaces (`tooling/`, `manufacturing/`, `quality/`, `sales/`) | Run to detect subscription gaps | + +### Fusion — Datum + +| Prefix | Meaning | +|---|---| +| `[SRV]` | Server-level Flask endpoints — config, tenant diagnostic | +| `[LIB]` | Fusion library reads — list, upload, stats, consumables | +| `[VAL]` | Pre-sync validation gate variants | +| `[PROXY]` | Raw Plex proxy through the local Flask app | + +--- + +## 4. Plex API — Datum — full endpoint catalog + +All paths are relative to `{{base_url}}` which resolves to +`https://connect.plex.com` from the environment. + +> **Verification state (as of 2026-04-09).** 23 GET requests were run in +> the 2026-04-09 connectivity sweep + 6 Get-by-ID requests in the chain +> test. **18/23 returned 200, 5/23 returned 404, all 6 per-id endpoints +> returned 200.** Zero 401s. Record counts and schemas in the tables +> below come from that sweep. `[WRITE]` requests are NOT tested (per +> user instruction, writes only run on explicit approval). + +### `[AUTH]` — Auth & Diagnostics + +| Request | Method | Path | Status | +|---|---|---|---| +| List All Tenants — Auth Canary | GET | `/mdm/v1/tenants` | **Verified** — must return 1 record (Grace) | +| Get Tenant by ID | GET | `/mdm/v1/tenants/{tenant_id_grace}` | **Verified** | + +The auth canary has a Postman test script that asserts: + +- Status 200 +- Response is an array of length 1 +- The Grace tenant UUID `58f781ba-1691-4f32-b1db-381cdb21300c` matches + +If those tests fail, your auth is broken — **stop and fix the credential +before running anything else**. See `docs/BRIEFING.md` History §1 for why +this matters. + +### `[MDM]` — Master Data + +All records verified 2026-04-09. Per-id endpoints all return the exact same fields as the list view — no hidden detail. + +| Request | Method | Path | Verified | Notes | +|---|---|---|---|---| +| List Parts — Unfiltered | GET | `/mdm/v1/parts` | ✅ **16,921** records | **19.6 MB.** +8 since 2026-04-07. Tools are NOT here. 17 fields. | +| List Parts — Active only | GET | `/mdm/v1/parts?status=Active` | ✅ 7.8 MB | Only filter that actually works on this endpoint. | +| Get Part by ID | GET | `/mdm/v1/parts/:partId` | ✅ verified 2026-04-09 | Same 17 fields as list view. | +| List Suppliers | GET | `/mdm/v1/suppliers` | ✅ **1,575** records / 709 KB | 16 fields. `parentSupplierId` self-FK. Mixes material suppliers + carriers. Cached by `validate_library --use-api`. | +| Get Supplier by ID | GET | `/mdm/v1/suppliers/:supplierId` | ✅ verified 2026-04-09 | Same 16 fields. | +| List Customers | GET | `/mdm/v1/customers` | ✅ **109** records / 96 KB | 35 fields. FKs to employees, contacts, suppliers. | +| Get Customer by ID | GET | `/mdm/v1/customers/:customerId` | ✅ verified 2026-04-09 | Same 35 fields. | +| List Contacts | GET | `/mdm/v1/contacts` | ✅ **299** records / 202 KB | | +| List Buildings | GET | `/mdm/v1/buildings` | ✅ **4** records / 1.2 KB | Provides `buildingCode`/`buildingId` referenced by workcenters. | +| List Employees | GET | `/mdm/v1/employees` | ✅ **641** records / 272 KB | UUIDs appear as `createdById`/`modifiedById` across every resource. | +| List Operations | GET | `/mdm/v1/operations` | ✅ **122** records | Minimal 4-field schema — no FK to tools/parts/routings. Issue #5 blocker. | +| Get Operation by ID | GET | `/mdm/v1/operations/:operationId` | ✅ verified 2026-04-09 | Same 4 fields. | + +### `[INV]` — Inventory + +| Request | Method | Path | Verified | Notes | +|---|---|---|---|---| +| List Supply Items — All | GET | `/inventory/v1/inventory-definitions/supply-items` | ✅ **2,516** records | Full unfiltered, ~614 KB. | +| List Supply Items — Tools & Inserts | GET | `/inventory/v1/inventory-definitions/supply-items?category=Tools%20%26%20Inserts` | ✅ **1,109** after client filter | **Target endpoint for the Fusion sync.** ⚠️ **No supplier FK, no cross-refs of any kind — see §4.5.** Has a test script asserting schema. | +| Get Supply Item by ID | GET | `/inventory/v1/inventory-definitions/supply-items/:supplyItemId` | ✅ verified 2026-04-09 | Same 7 fields as list view. No hidden detail. | +| List Inventory Locations | GET | `/inventory/v1/inventory-definitions/locations` | ✅ **1,270** records / 279 KB | Not cross-referenced from supply-item. | + +### `[PROD]` — Production + +| Request | Method | Path | Verified | Notes | +|---|---|---|---|---| +| List Workcenters | GET | `/production/v1/production-definitions/workcenters` | ✅ **143** records | Includes 21 MILLs. ⚠️ Primary key is `workcenterId`, not `id`. Test script logs the count. | +| Get Workcenter by ID — generic | GET | `/production/v1/production-definitions/workcenters/:workcenterId` | ✅ verified 2026-04-09 | Same 11 fields as list view. | +| Get Workcenter — Brother Speedio 879 | GET | `/production/v1/production-definitions/workcenters/0b6cf62b-2809-4d3d-ab24-369cd0171f62` | ✅ verified 2026-04-09 | workcenterCode `879`, FTP `192.168.25.79`. | +| Get Workcenter — Brother Speedio 880 | GET | `/production/v1/production-definitions/workcenters/8e262d5a-3ce8-4597-8726-d2b979b1b6b7` | ✅ verified 2026-04-09 | workcenterCode `880`, FTP `192.168.25.80`. | + +### `[PURCH]` — Purchasing + +| Request | Method | Path | Verified | Notes | +|---|---|---|---|---| +| List Purchase Orders — Unfiltered | GET | `/purchasing/v1/purchase-orders` | ✅ 44.2 MB | Full PO history. **Be careful — 44 MB every call.** | +| List Purchase Orders — Filtered (template) | GET | `/purchasing/v1/purchase-orders?updatedAfter=...` | 🚫 **Filter is a silent no-op** (verified 2026-04-09) | Both filtered and unfiltered returned byte-identical 44.2 MB. Kept as a probe template for future Plex behavior changes. | + +### `[SCHED]` — Scheduling (new 2026-04-09) + +| Request | Method | Path | Verified | Notes | +|---|---|---|---|---| +| List Jobs | GET | `/scheduling/v1/jobs` | ✅ 200 (2026-04-09) | **New endpoint discovered during the 2026-04-09 sweep.** ~15.8s response (large body). Schema, record count, and FK structure all TBD. Potentially relevant to issue #5 if jobs carry tool references — would give the operation→tool mapping for free. | + +### `[WRITE]` — Mutating requests + +| Request | Method | Path | Status | Notes | +|---|---|---|---|---| +| POST Supply Item — Create New | POST | `/inventory/v1/inventory-definitions/supply-items` | **Live writes blocked at proxy** | Body uses `inventoryUnit: "Ea"` and `type: "SUPPLY"` (both confirmed from live production data). Issue #3. | +| PUT Supply Item — Update Existing | PUT | `/inventory/v1/inventory-definitions/supply-items/{supply_item_id}` | **Live writes blocked at proxy** | Same body shape as POST. Issue #3. | +| DELETE Supply Item by ID | DELETE | `/inventory/v1/inventory-definitions/supply-items/:supplyItemId` | **Destructive** | For test cleanup only — Fusion sync should never call this. | +| PUT Workcenter Doc — issue #6 | PUT | `/production/v1/production-definitions/workcenters/:workcenterId` | **Write shape UNVERIFIED** | Placeholder body — do not run against production until issue #6 is closed. | + +> ⚠️ **Postman bypasses the local production write guard.** The Flask +> proxy at `/api/plex/raw` refuses POST/PUT/PATCH/DELETE against +> `connect.plex.com` unless `PLEX_ALLOW_WRITES=1` is set in the shell. +> **Postman talks directly to Plex** and has no such guard. Sending a +> request from the `[WRITE]` group **will hit production**. See §6 for the +> safe write workflow, and use the `[PROXY]` requests in the Fusion +> collection if you want the guard to apply. + +> ℹ️ **About the `inventoryUnit` and `type` field values.** When the +> Plex POST/PUT supply-item write shape was first added to the +> collection (PR pending in issue #3), `inventoryUnit` was set to `"Ea"` +> and `type` was set to `"SUPPLY"`. Both values were confirmed by +> reading existing rows from `[INV] List Supply Items — Tools & Inserts`. +> A separate observation: existing Plex `supplyItemNumber` values are +> mostly free-text descriptions, **not** vendor part numbers. Expect the +> first sync run to be mostly INSERTs, not UPDATEs. + +### `[PROBE]` — Unverified namespaces + +**All probes returned 404 on 2026-04-09.** Kept so future sessions know they've been checked. + +| Request | Method | Path | First checked | Last check | +|---|---|---|---|---| +| tooling/v1/tools | GET | `/tooling/v1/tools` | 2026-04-07 | **2026-04-09 — still 404** | +| tooling/v1/tool-assemblies | GET | `/tooling/v1/tool-assemblies` | 2026-04-07 | **2026-04-09 — still 404** | +| manufacturing/v1/routings | GET | `/manufacturing/v1/routings` | 2026-04-07 | **2026-04-09 — still 404** | +| quality/v1/inspections | GET | `/quality/v1/inspections` | 2026-04-09 | **2026-04-09 — 404 (first check)** | +| sales/v1/sales-orders | GET | `/sales/v1/sales-orders` | 2026-04-09 | **2026-04-09 — 404 (first check)** | +| inventory/v1/on-hand | GET | `/inventory/v1/on-hand` | 2026-04-09 | **2026-04-09 — 404 (first check)** — would have given stock levels | +| purchasing/v1/purchase-orders-lines | GET | `/purchasing/v1/purchase-orders-lines` | 2026-04-09 | **2026-04-09 — 404 (first check)** — would have enabled supply-item → supplier back-channel | + +The `tooling/v1/*` and `manufacturing/v1/*` paths were in the original +pre-Datum API reference and worked for the previous developer on a +different Plex deployment, but the Datum app subscription returns 404 for +all of them. `inventory/v1/on-hand` and `purchasing/v1/purchase-orders-lines` +were added 2026-04-09 after being probed while investigating how to +derive vendor/stock data for supply-items (neither exists). + +All are kept here so that if the subscription set ever changes, we can +rerun the probes and detect it. See `docs/BRIEFING.md` History §3 and +`docs/Plex_API_Reference.md` §3 "Probed — returned 404". + +### §4.5 — Supply-item cross-reference map (the critical finding) + +Re-verified 2026-04-09 via the Get-by-ID chain test: + +**`inventory/v1/inventory-definitions/supply-items` has NO cross-references to any other resource.** The record is identity-only, 7 fields: + +``` +category, description, group, id, inventoryUnit, supplyItemNumber, type +``` + +Specifically — this resource does NOT have: + +| Missing FK | Would have given us | +|---|---| +| `supplierId` / `preferredSupplierId` | Who to buy the tool from. **You cannot get a tool's vendor from Plex alone.** | +| `locationId` / `warehouseId` | Where the tool physically is. | +| `partId` | Which finished product this tool helps produce. | +| `workcenterId` | Which machine the tool is assigned to. | +| `operationId` | Which operation/process step the tool is used for. | + +**Consequence for Datum:** vendor data for tools MUST live in Supabase as the source of truth. The Fusion sync writes vendor + product-id + description + geometry to `libraries`/`tools`/`cutting_presets` in Supabase; `build_supply_item_payload()` (issue #3) then constructs the Plex POST body from only the 7 identity fields. Plex never learns who the vendor is — and that's fine, because nothing in Plex depends on that information. + +Also killed the "use PO lines as a back-channel for the vendor link" hypothesis — `purchasing/v1/purchase-orders-lines` returned 404 on 2026-04-09. There is no sub-resource for PO line items; they must be embedded in the parent PO records (verification pending). + +For the full cross-reference map of *which* relationships DO exist in Plex (e.g. customer→employee, workcenter→building, supplier→parentSupplier), see the Notion page **"Plex Data Model — Cross-References"** under the Datum project page. + +--- + +## 5. Fusion 360 Tool Libraries — Datum — full endpoint catalog + +All paths are relative to `{{base_url}}` which is set as a **collection +variable** to `http://localhost:5000`. The Flask app must be running: + +```powershell +py run_dev.py +``` + +ADC (Autodesk Desktop Connector) must also be running and synced for any +endpoint that touches the network share. + +### `[SRV]` — Server-level + +| Request | Method | Path | What it does | +|---|---|---|---| +| Get Server Config | GET | `/api/config` | Returns base URL, environment, `is_production`, `writes_allowed`, tenant ID, key/secret presence. | +| Tenant Diagnostic (via Flask) | GET | `/api/diagnostics/tenant` | Runs `tenant_whoami()` end-to-end through the Flask app. The most thorough auth canary. | + +### `[LIB]` — Fusion library reads + +| Request | Method | Path | What it does | +|---|---|---|---| +| Get All Libraries | GET | `/api/fusion/libraries` | List all libraries from the ADC share. | +| Upload Library (File) | POST | `/api/fusion/libraries` | Upload a `.json` file (no ADC required). | +| Get Tool Library Stats | GET | `/api/fusion/tools/stats` | Type and vendor distribution across all loaded libraries. | +| Get Consumable Tools (sync candidates) | GET | `/api/fusion/tools/consumable` | Filtered list — excludes holders and probes. **This is the input to `build_supply_item_payload()` (issue #3).** Test asserts no holders or probes in the result. | + +> ⚠️ **Known route divergence.** The Postman URLs above were captured +> from an earlier app version. The current Flask routes in `app.py` are +> `/api/fusion/tools` (GET/POST), `/api/fusion/tools/stats`, and +> `/api/fusion/tools/consumables` (plural). If a `[LIB]` request returns +> 404, the route was probably renamed and the collection is stale. +> **Verify against `app.py` before assuming a backend bug.** + +### `[VAL]` — Pre-sync validation gate + +These hit `/api/fusion/validate` (per `app.py:395`) — the entry point +described in `docs/validate_library_spec.md` (issue #25). + +| Request | Method | Query / Body | When to use | +|---|---|---|---| +| Validate Library — Live ADC (default) | GET | `?abort_on_stale=true` | The default sweep — validates every `*.json` in the ADC `CAMTools` directory. | +| Validate Library — Single File (live ADC) | GET | `?file=BROTHER%20SPEEDIO%20ALUMINUM.json` | Iterating on one library without running the full sweep. | +| Validate Library — With Live Plex Supplier Lookup | GET | `?use_api=1` | Most thorough — also resolves vendors against `mdm/v1/suppliers`. Catches typos like `"HARVEY TOOL"` vs `"HARVEY TOOLS"`. **Use this before any actual sync push.** | +| Validate Library — Upload (POST, no ADC) | POST | multipart/form-data | Validate uploaded files without touching the share. Useful when ADC is down or you want to inspect a candidate library before saving it. | + +All four return the same `ValidationResult` shape. See +`docs/validate_library_spec.md` for the full rule table and the +`debug_trace` field semantics. + +### `[PROXY]` — Raw Plex Proxy + +| Request | Method | Path | Notes | +|---|---|---|---| +| Raw Plex Proxy — GET | GET | `/api/plex/raw?path={{plex_path}}` | Always allowed regardless of `PLEX_ALLOW_WRITES`. | +| Raw Plex Proxy — POST | POST | `/api/plex/raw?path={{plex_path}}` | **Blocked by write guard unless `PLEX_ALLOW_WRITES=1`.** | +| Raw Plex Proxy — PUT | PUT | `/api/plex/raw?path={{plex_path}}` | **Blocked by write guard unless `PLEX_ALLOW_WRITES=1`.** | +| Raw Plex Proxy — DELETE | DELETE | `/api/plex/raw?path={{plex_path}}` | **Blocked by write guard unless `PLEX_ALLOW_WRITES=1`.** | + +The `plex_path` collection variable defaults to `mdm/v1/tenants` so a +proxy GET out of the box returns the same payload as the Plex collection's +auth canary. Override it per call. + +**Use the proxy variants in preference to the direct Plex collection's +`[WRITE]` requests when the production write guard matters.** + +--- + +## 6. Safe write workflow + +If you need to actually run a write against Plex: + +1. **Run the matching read first.** Use `[INV] Get Supply Item by ID` (or + the equivalent for whatever you're modifying) to confirm current state + and capture the UUID. +2. **Set the write guard env var:** + ```powershell + $env:PLEX_ALLOW_WRITES = "1" + py run_dev.py + ``` + This enables the proxy to forward mutating methods. The Flask UI + header chip will switch from **READ ONLY** to **WRITES ON** so you + can see at a glance what mode the server is in. +3. **Decide which path you're sending through:** + - **Through the proxy (`[PROXY]` requests in Fusion collection):** the + guard is enforced, so this is the safest option. Use this for all + normal write testing. + - **Direct to Plex (`[WRITE]` requests in Plex collection):** bypasses + the guard. Only use when you specifically need to test the wire + payload without Flask in the middle. +4. **Send the request.** Watch the response carefully. +5. **Re-run the matching read** to confirm the change took effect. +6. **Unset the guard immediately when you're done:** + ```powershell + Remove-Item Env:PLEX_ALLOW_WRITES + ``` + Or just close the shell. **Do not leave `PLEX_ALLOW_WRITES=1` + sticking around.** + +--- + +## 7. Adding new requests + +When the Datum project discovers a new endpoint or needs a new template, +add it to the appropriate collection following these conventions: + +1. **Pick the right collection.** Plex namespaces go in `Plex API — Datum`. + Anything that hits the local Flask app goes in `Fusion 360 Tool + Libraries — Datum`. +2. **Use the `[NS]` naming prefix.** Pick from §3 above. If a new + namespace appears (e.g. `quality/v1/*` starts working), add a new + prefix and document it in this file. +3. **Use `{{base_url}}` in the URL.** Don't hardcode hosts. +4. **Mark verification status in the description.** Use `**Verified.**`, + `**UNVERIFIED.**`, `**Returns 404 as of YYYY-MM-DD.**` etc. so the + next reader knows what to trust. +5. **For writes, default to a placeholder body** with `REPLACE-ME-*` + sentinel values. Never commit a body containing real product data. +6. **For reads with known response sizes, mention the byte / record + count** in the description. The `[MDM] List Parts — Unfiltered` row + above is a good template — knowing it's 19.6 MB up front saves + someone an unintended download. +7. **Add a Postman test script** if there's a useful invariant to + assert (e.g. "supplyItemNumber field present", "category equals + 'Tools & Inserts'"). Several existing requests already have these — + look at `[INV] List Supply Items — Tools & Inserts` and `[AUTH] List + All Tenants — Auth Canary` for the pattern. + +### Tooling — how to add via the Postman MCP + +Both collections are managed via the Postman MCP server (`createCollectionRequest`, +`updateCollectionRequest`). Folder creation is **not available on the +minimal MCP tier** — that's why we use the `[NS]` prefix convention +instead of real folders. If the MCP tier is upgraded, the prefix +convention can be replaced with real folders via `createCollectionFolder`. + +To add a new request from a Claude Code session: + +``` +mcp____createCollectionRequest( + collectionId="75b28dc4-9c73-4e27-90d0-1539777f52ea", + name="[INV] My New Endpoint", + method="GET", + url="{{base_url}}/inventory/v1/inventory-definitions/foo", + description="..." +) +``` + +Use the bare collection ID (no `53648712-` prefix) for `createCollectionRequest`, +but the prefixed UID for `getCollection`. (Yes, this is inconsistent — +it's a Postman API quirk, not ours.) + +--- + +## 8. Update protocol + +Some changes invalidate parts of these collections. When any of the +following happens, update the collection AND this document in the same +PR: + +| Trigger | What to update | +|---|---| +| Plex Datum Consumer Key rotates (~31 days) | Environment `api_key` value (Postman UI). No code change. | +| New Plex namespace verified working | Move from `[PROBE]` to its real namespace prefix; update `docs/Plex_API_Reference.md` access matrix. | +| New Plex namespace probed and confirmed not subscribed | Add to `[PROBE]` group with the date in the description. | +| Brother Speedio FTP IP changes | Update collection variables `workcenter_id_speedio_879` / `workcenter_id_speedio_880` AND `docs/BRIEFING.md` machine table. | +| `app.py` route renamed | Update the matching `[LIB]` / `[VAL]` / `[PROXY]` request URL. The current Postman URLs may already be stale — see the warning under §5 `[LIB]`. | +| New `[WRITE]` shape verified | Replace the `_TBD` / `REPLACE-ME` sentinel values with the verified body and mark `**Verified.**` in the description. | + +--- + +## 9. References + +- `docs/BRIEFING.md` — primary project context +- `docs/Plex_API_Reference.md` — verified endpoint matrix and 401-vs-404 reading guide +- `docs/Fusion360_Tool_Library_Reference.md` — Fusion JSON schema +- `docs/validate_library_spec.md` — pre-sync validation gate spec +- `app.py` — local Flask routes that the Fusion collection hits +- `plex_api.py` — `PlexClient` and the extraction helpers +- GitHub issue #3 — supply-item upsert (drives the `[WRITE]` requests) +- GitHub issue #6 — workcenter doc push (drives the `[WRITE] PUT Workcenter Doc` placeholder) +- GitHub issue #12 — key rotation cadence +- GitHub issue #25 — `validate_library.py` implementation (drives the `[VAL]` requests) diff --git a/docs/REORG_AND_STACK.md b/docs/REORG_AND_STACK.md new file mode 100644 index 0000000..932827f --- /dev/null +++ b/docs/REORG_AND_STACK.md @@ -0,0 +1,317 @@ +# Reorg + Stack Update — Datum + +**Status:** Planning (2026-04-17). No code changes in the session that wrote this doc. +**Trigger:** Grace Engineering has provided Shane company-supplied GCP access and +budget for Datum. That funds the [GCP migration epic #85](https://github.com/grace-shane/Datum/issues/85) +and opens room for stack changes that wouldn't have been justifiable on a +zero-budget project. +**Relationship to #85:** This plan executes *before* the GCP migration. The +repo ships to Cloud SQL + Cloud Run / VMs easier if it's organized and the DB +layer is already vendor-neutral. See the sequencing block at the bottom. + +--- + +## Scope boundaries (read first) + +These are explicit limits, not stretch goals. Written up front because +"everyone asks when you have a UI" and scope creep is the fastest way to +wreck a cleanup PR series. + +| Boundary | Meaning | +|---|---| +| **React UI is debug + show-and-tell only** | `datum.graceops.dev` is not a product Shane actively develops. Other Grace engineers use it and that's valuable, but feature requests land in a user-goals conversation before any code. No speculative features. | +| **No mobile support** | Not now, not as part of this reorg, not as a stretch goal. No PWA, no mobile-first refactors, no React Native. If the UI renders acceptably on a tablet, that's a bonus, not a requirement. | +| **Plex writes stay last** | Nothing in this plan changes the "DB first, Plex last, can't dry-run prod" sequencing. The reorg and stack update are strictly below the Plex write layer. | +| **No new features masquerading as cleanup** | Organize moves files. Stack updates change imports. Neither adds behavior. New features get their own issues and PRs. | +| **Don't touch the Flask endpoint-tester scope** | `app.py` + `templates/` + `static/` is Shane's personal Plex-API poking tool. It's not user-facing. Retain it through the reorg — Shane uses it to sanity-check Plex responses. React UI is the user-facing surface. | + +If any PR in this series starts drifting past these boundaries, pause and +split the work instead of letting it grow. + +--- + +## Why now, why this order + +Three reasons to organize before migrating, not after: + +1. **Vendor-neutral DB layer is the single highest-leverage stack change** — + and it's also the natural prerequisite for swapping Supabase → Cloud SQL. + Doing it after migration means a second round of rewrites. Doing it before + means Cloud SQL is a connection-string flip. +2. **15 flat `.py` files at the repo root is the visible symptom of "we didn't + know what we were building yet."** That excuse is gone. We know what we're + building. The layout should reflect it. +3. **Mixing a reorg PR with a migration PR makes every diff ambiguous** — is + this a rename, a rewrite, a behavior change? Separating them keeps review + tractable. + +Stack updates that *don't* unblock Cloud SQL (FastAPI, httpx, uv) are +secondary. They're in §3 but flagged as "weigh against whether it unblocks +anything," not mandatory. + +--- + +## Current state + +``` +datum/ +├── 15 top-level *.py files ← the main symptom +├── tests/ flat, 17 test files +├── scripts/load_sample.py single script, no organization needed +├── db/migrations/ SQL migrations +├── web/ React UI (product surface) +├── templates/, static/ legacy Flask endpoint-tester UI +├── docs/ documentation +├── .github/workflows/ CI +├── pyproject.toml Python packaging +├── requirements.txt, requirements-dev.txt pinned deps +└── README.md, CLAUDE.md, TODO.md +``` + +Pain points that motivate the reorg: + +- Four sync-adjacent entrypoints (`sync.py`, `sync_supabase.py`, + `sync_tool_inventory.py`, `populate_supply_items.py`) with overlapping + concerns — hard to tell which one is the real nightly path without + reading each +- `supabase_client.py` is the direct blocker for Cloud SQL migration +- No `datum/` package — imports are all top-level, which means worktree + discovery and cloud packaging both hit edge cases +- Tests mirror the flat layout, which means `test_sync_supabase.py` + + `test_sync.py` + `test_sync_tool_inventory.py` live side by side with + no obvious relationship +- The React UI sits in `web/` with no statement about whether it's part + of the Python project (it isn't) or a peer (it is) — affects how + Cloudflare, CI, and dependency management read the repo + +--- + +## Phase 1: Organize + +One PR series, three or four PRs, each one mechanical. Goal: every file +in `*.py` at the repo root gets a home. + +### Target Python package layout + +``` +datum/ # Python package (importable as `datum.*`) +├── __init__.py +├── bootstrap.py # credential loader (moved from root) +├── plex/ +│ ├── __init__.py +│ ├── client.py # from plex_api.py +│ ├── diagnostics.py # plex_diagnostics.py +│ └── extractors.py # extract_* helpers split from plex_api.py +├── fusion/ +│ ├── __init__.py +│ ├── aps.py # aps_client.py (primary source) +│ └── adc_loader.py # tool_library_loader.py (fallback, slated +│ # for deletion under #85) +├── db/ +│ ├── __init__.py +│ ├── client.py # supabase_client.py → becomes SQLAlchemy +│ │ # in Phase 2 +│ └── ingest.py # ingest_reference.py +├── sync/ +│ ├── __init__.py +│ ├── runner.py # sync.py — the nightly CLI entry point +│ ├── staging.py # sync_supabase.py (writes to DB staging) +│ ├── inventory.py # sync_tool_inventory.py (Plex qty pull) +│ ├── supply_items.py # populate_supply_items.py +│ ├── payload.py # build_supply_item_payload (new home +│ │ # for issue #3 work) +│ └── enrich.py # enrich.py +├── validate/ +│ ├── __init__.py +│ └── library.py # validate_library.py +└── web/ # Flask API only — React UI stays separate + ├── __init__.py + ├── app.py # from top-level app.py + ├── templates/ # Flask endpoint-tester (retained) + ├── static/ # Flask endpoint-tester assets (retained) + └── routes/ + ├── plex.py # /api/plex/* + ├── fusion.py # /api/fusion/*, /api/aps/* + └── diagnostics.py # /api/diagnostics/* +``` + +Top-level after reorg: + +``` +/ +├── datum/ # Python package (above) +├── web/ # React UI (unchanged — peer to datum/, +│ # NOT absorbed into the Python package) +├── tests/ # mirror datum/ structure +├── db/migrations/ # SQL migrations (unchanged location) +├── scripts/ # one-off CLI scripts +├── docs/ +├── .github/workflows/ +├── pyproject.toml +├── requirements.txt +├── requirements-dev.txt +└── README.md, CLAUDE.md, TODO.md +``` + +**Key decision:** `web/` (React) is a peer to `datum/` (Python), not a +subdirectory of it. Different language, different toolchain, different +deploy (Cloudflare Workers vs runtime VM). Monorepo layout, not nested. + +### File-move table + +| From | To | Notes | +|---|---|---| +| `plex_api.py` | `datum/plex/client.py` | Split `extract_*` helpers into `datum/plex/extractors.py` | +| `plex_diagnostics.py` | `datum/plex/diagnostics.py` | | +| `aps_client.py` | `datum/fusion/aps.py` | | +| `tool_library_loader.py` | `datum/fusion/adc_loader.py` | Renamed to make its "fallback path" status visible | +| `supabase_client.py` | `datum/db/client.py` | Becomes SQLAlchemy in Phase 2 | +| `ingest_reference.py` | `datum/db/ingest.py` | | +| `sync.py` | `datum/sync/runner.py` | The nightly CLI entry point | +| `sync_supabase.py` | `datum/sync/staging.py` | | +| `sync_tool_inventory.py` | `datum/sync/inventory.py` | | +| `populate_supply_items.py` | `datum/sync/supply_items.py` | | +| `enrich.py` | `datum/sync/enrich.py` | | +| `validate_library.py` | `datum/validate/library.py` | Spec doc stays in `docs/` | +| `bootstrap.py` | `datum/bootstrap.py` | | +| `app.py` | `datum/web/app.py` | Split route handlers into `datum/web/routes/*.py` | +| `templates/` | `datum/web/templates/` | Flask tester — retain | +| `static/` | `datum/web/static/` | Flask tester — retain | +| `run_dev.py` | stays at root | Dev launcher — convenient at the top level | +| `scripts/load_sample.py` | unchanged | | +| `web/` (React) | unchanged | Peer, not absorbed | + +`pyproject.toml` gets a `packages = ["datum"]` entry (or the src-layout +equivalent) so `pip install -e .` picks it up. + +### Suggested PR breakdown + +1. **PR A — add `datum/` package skeleton, move leaf modules** (`bootstrap`, + `plex`, `fusion`, `validate`). Update imports. Green CI. +2. **PR B — move sync layer** (`sync/*`). Update all imports. Green CI. +3. **PR C — move Flask app** (`datum/web/`). Split route handlers. React UI + untouched. Green CI. +4. **PR D — mirror-structure test reorg** — move `tests/test_*.py` into + `tests/plex/`, `tests/fusion/`, etc. Optional; low value if the flat + layout is readable. + +Each PR is ~15–30 file moves + import-path updates. Git catches renames +automatically (see BRIEFING session log 2026-04-08, lesson #7), so history +is preserved. + +--- + +## Phase 2: Stack updates + +### Primary: SQLAlchemy 2.0 + psycopg3 (unblocks Cloud SQL) + +Replace the Supabase Python client with SQLAlchemy 2.0 over psycopg3. + +**What changes:** + +- `datum/db/client.py` gains a SQLAlchemy `Engine` and session factory. +- `datum/db/models.py` (new) — SQLAlchemy declarative models for + `libraries`, `tools`, `cutting_presets`, `plex_supply_items`. +- Supabase-client calls (`client.table("tools").upsert(...)`) become + SQLAlchemy `session.merge(...)` or `insert(...).on_conflict_do_update(...)`. +- Connection string moves from `SUPABASE_URL` + `SUPABASE_SERVICE_ROLE_KEY` + to a standard `DATABASE_URL` env var (psycopg3 parses it directly). + `.env.local` gains `DATABASE_URL` — Supabase exposes a direct Postgres + connection that works with psycopg3. +- Tests swap Supabase-REST mocks for an in-process SQLite-or-Postgres + fixture. `pytest-postgresql` or `sqlalchemy-utils` create/drop. + +**Why this unblocks Cloud SQL:** after the swap, pointing `DATABASE_URL` +at Cloud SQL is a connection-string flip. No application code changes. +The migration PR becomes infra + secret rotation, not a rewrite. + +### Secondary (pick-and-choose, not mandatory) + +| Change | Verdict | Reason | +|---|---|---| +| `ruff` for lint + format | **Do it** | Single tool replaces black + flake8 + isort. Cheap to adopt, fast, widely used. | +| `uv` for dep resolution | **Probably** | Pins `requirements.txt` stay, but `uv pip install` is dramatically faster. Low risk, no lock-in. | +| Type-check with `mypy` / `pyright` | **Skip for now** | Codebase is small and well-tested (262 tests). The ROI shows up at 50k+ LOC, not 5k. Revisit if the reorg surfaces unclear interfaces. | +| Flask → FastAPI | **Skip** | Flask is stable, the production write guard is non-trivial to port, and the React UI doesn't need async. Swap later if there's a real pain point. | +| `requests` → `httpx` | **Skip** | Plex client is throttled at 200/min and synchronous is fine. `httpx` buys nothing concrete. | +| Structured logging (`structlog` or plain `logging` JSON) | **Do it with GCP migration** | GCP Cloud Logging parses structured JSON natively. Align with the `datum-runtime` deploy, not this phase. | +| `pytest-postgresql` for DB tests | **Do it, with SQLAlchemy** | Pairs with the primary stack change. | + +### Stack change file surface + +| File | Change | +|---|---| +| `datum/db/client.py` | SQLAlchemy `Engine` + `sessionmaker` replaces Supabase client | +| `datum/db/models.py` | New — declarative models for all 4 tables | +| `datum/sync/staging.py`, `datum/sync/inventory.py`, `datum/sync/supply_items.py`, `datum/sync/enrich.py`, `datum/db/ingest.py` | Swap Supabase API calls for SQLAlchemy session ops | +| `datum/web/routes/*.py` | Swap Supabase reads for SQLAlchemy query objects | +| `requirements.txt` | Drop `supabase`, add `sqlalchemy>=2`, `psycopg[binary]>=3`, `alembic` (optional — for migrations going forward) | +| `pyproject.toml` | Add `[tool.ruff]` config | +| `tests/conftest.py` | Swap Supabase REST mocks for a `pytest-postgresql` fixture or SQLAlchemy in-memory SQLite | +| All test files that mock Supabase | Update fixtures | + +--- + +## Phase 3: React UI — lock the scope + +No code changes in this phase — just a docs pass that makes the scope +boundaries visible on the UI itself and in the repo. + +- Add a "scope" section to `README.md` under the UI section: "debug + + show-and-tell; no mobile; feature requests need a documented use case." +- Add a small footer to the React UI (`web/src/components/Footer.tsx` + or similar) that links to the scope statement. Makes the boundary + visible where the users actually are. +- `web/README.md` (if it doesn't exist) — one-page statement of what + the UI is for, who owns it (Shane), and what the contribution bar is. +- No mobile-related dependencies in `web/package.json`. No PWA manifest. + No responsive-design retrofit. + +Cost: one PR, maybe 20 LOC + two doc paragraphs. Pays for itself the +first time someone asks "why doesn't this work on my phone." + +--- + +## Sequencing + +Suggested order, with rough effort estimates: + +1. **Reorg PRs A → D** (Phase 1) — ~4 PRs, ~1 day each if done in one + pass. Tests should stay green throughout — these are pure moves. +2. **Stack update: SQLAlchemy + psycopg3** (Phase 2 primary) — ~1 week. + Biggest risk is test fixture rewrites. Land behind a feature flag + (`DATUM_USE_SQLALCHEMY=1`) so Supabase client stays in place as the + fallback during bring-up. +3. **Stack update: ruff + uv** (Phase 2 secondary) — ~half a day total. + Low risk, immediate payoff. +4. **UI scope doc pass** (Phase 3) — ~1 PR, under an hour. +5. **Now ready for GCP migration (#85)** — Cloud SQL cutover is a + connection string flip, APS was already primary, Cloudflare DNS + already points at Workers. + +Everything below #5 (Phase 5 deploy items, etc.) is already done. + +--- + +## Open questions + +- **Should the DB schema move from raw SQL in `db/migrations/` to + Alembic?** Alembic is nicer for collaborative schema changes but you're + the only dev touching it. Weigh against: raw SQL is simpler to read, + Cloud SQL accepts both, Alembic adds a dependency. Default: **stay + with raw SQL until there are 2+ devs.** +- **Should `run_dev.py` stay at the repo root or move under `datum/`?** + Root is friendlier for `py run_dev.py` muscle memory. `datum/` is more + correct. Default: **leave at root**, it's a dev-only file. +- **Do we write `web/README.md` now or when other engineers ask to + contribute?** Writing it now is cheap and prevents the "everyone asks" + drift. Default: **now, alongside Phase 3**. +- **Structured logging in Phase 2 or Phase 6 (#85)?** Doing it now makes + the GCP cutover cleaner but couples two unrelated changes. Default: + **defer to Phase 6**, keep this plan focused. +- **Do we keep `supabase_client.py` side-by-side during SQLAlchemy bring-up + for a dual-write period, or cut over in one commit?** Dual-write is + safer but adds temporary complexity. Given the Supabase DB is + single-tenant and low-volume, a single-commit cutover is probably fine, + but the feature-flag approach gives you an escape hatch. Default: + **feature-flag, flip when tests pass.** diff --git a/docs/superpowers/plans/2026-04-17-plex-mimic-mock.md b/docs/superpowers/plans/2026-04-17-plex-mimic-mock.md new file mode 100644 index 0000000..2c31d9d --- /dev/null +++ b/docs/superpowers/plans/2026-04-17-plex-mimic-mock.md @@ -0,0 +1,1494 @@ +# Plex-Mimic Mock HTTP Server Implementation Plan + +> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking. + +**Goal:** Stand up a local HTTP server that mimics the Plex REST surface (supply-items + workcenters) so `datum-sync` can dress-rehearse writes without touching `connect.plex.com`. Blocks issues [#3](https://github.com/grace-shane/Datum/issues/3) and [#6](https://github.com/grace-shane/Datum/issues/6); tracked in [#92](https://github.com/grace-shane/Datum/issues/92). + +**Architecture:** Flask app deployed as a systemd unit on `datum-runtime`, bound to `127.0.0.1:8080`. GET handlers serve canned snapshots captured once from real Plex; POST/PUT/PATCH handlers log the full request to SQLite and return Plex-shape responses with synthetic UUIDs. `plex_api.py` gets a `PLEX_BASE_URL` env override so the sync points at the mock with no code branches. A diff CLI reports payload drift between a mock run's captures and an expected-payload fixture. + +**Tech Stack:** Flask (already in `app.py`), SQLite (stdlib), pytest + monkeypatch (existing test conventions), systemd on Ubuntu 24.04 (`datum-runtime`). + +--- + +## File Structure + +**Create:** +- `tools/__init__.py` — empty package marker +- `tools/plex_mock/__init__.py` — package marker + version constant +- `tools/plex_mock/server.py` — Flask app, route handlers +- `tools/plex_mock/store.py` — SQLite capture store (create/append/query) +- `tools/plex_mock/snapshots/README.md` — how to refresh +- `tools/plex_mock/snapshots/supply_items_list.json` — canned GET response (committed) +- `tools/plex_mock/snapshots/workcenters_list.json` — canned GET response (committed) +- `tools/plex_mock/capture_snapshots.py` — one-off CLI that hits real Plex and writes the snapshot files +- `tools/plex_mock/diff.py` — CLI that compares captures vs expected fixture +- `tools/plex_mock/systemd/datum-plex-mock.service` — systemd unit (deployed, not auto-installed) +- `tools/plex_mock/README.md` — how to run, how to deploy, validation-window protocol +- `tests/test_plex_mock_store.py` +- `tests/test_plex_mock_server.py` +- `tests/test_plex_mock_diff.py` +- `tests/fixtures/plex_mock/expected_supply_items.json` — reference payload shape for the diff CLI + +**Modify:** +- `plex_api.py:49-63` — add `PLEX_BASE_URL` override; extend `PlexClient.__init__` with an optional `base_url` arg +- `tests/test_plex_api.py:66-80` — new tests covering the override +- `pyproject.toml` — add three console scripts (`datum-plex-mock-serve`, `datum-plex-mock-snapshot`, `datum-plex-mock-diff`) +- `docs/Plex_API_Reference.md` — one-paragraph section on `PLEX_BASE_URL` + the mock +- `.gitignore` — ignore `tools/plex_mock/captures/` and `tools/plex_mock/*.db` + +**Don't touch:** `bootstrap.py` (already reads env via `setdefault`; no change needed for `PLEX_BASE_URL` to flow through). + +--- + +## Task 1: `PLEX_BASE_URL` override in `plex_api.py` (TDD) + +Smallest, safest chunk. Additive, no behavior change when unset. Can land immediately, independent of the rest. + +**Files:** +- Modify: `plex_api.py:49-53` (module-level constants), `plex_api.py:61-63` (client constructor) +- Modify: `tests/test_plex_api.py` — add tests to `TestPlexClientEnvironment` and `TestModuleDefaults` + +- [ ] **Step 1.1: Write failing tests for the override** + +Append to `tests/test_plex_api.py`, inside class `TestPlexClientEnvironment`: + +```python + def test_explicit_base_url_arg_wins(self): + c = PlexClient(api_key="k", base_url="http://localhost:8080") + assert c.base == "http://localhost:8080" + + def test_explicit_base_url_arg_wins_even_over_use_test(self): + c = PlexClient(api_key="k", use_test=True, base_url="http://localhost:8080") + assert c.base == "http://localhost:8080" +``` + +Append to class `TestModuleDefaults`: + +```python + def test_override_url_empty_when_env_unset(self, monkeypatch): + monkeypatch.delenv("PLEX_BASE_URL", raising=False) + importlib.reload(plex_api) + assert plex_api.OVERRIDE_URL == "" + importlib.reload(plex_api) + + def test_override_url_set_from_env(self, monkeypatch): + monkeypatch.setenv("PLEX_BASE_URL", "http://localhost:8080") + importlib.reload(plex_api) + assert plex_api.OVERRIDE_URL == "http://localhost:8080" + importlib.reload(plex_api) + + def test_client_uses_override_url_when_env_set(self, monkeypatch): + monkeypatch.setenv("PLEX_BASE_URL", "http://localhost:8080") + importlib.reload(plex_api) + c = plex_api.PlexClient(api_key="k") + assert c.base == "http://localhost:8080" + importlib.reload(plex_api) + + def test_client_override_url_wins_over_use_test(self, monkeypatch): + monkeypatch.setenv("PLEX_BASE_URL", "http://localhost:8080") + importlib.reload(plex_api) + c = plex_api.PlexClient(api_key="k", use_test=True) + assert c.base == "http://localhost:8080" + importlib.reload(plex_api) + + def test_client_unchanged_when_override_unset(self, monkeypatch): + monkeypatch.delenv("PLEX_BASE_URL", raising=False) + importlib.reload(plex_api) + c = plex_api.PlexClient(api_key="k") + assert c.base == plex_api.BASE_URL + importlib.reload(plex_api) +``` + +- [ ] **Step 1.2: Run the new tests; expect failures** + +Run: `python -m pytest tests/test_plex_api.py::TestPlexClientEnvironment::test_explicit_base_url_arg_wins tests/test_plex_api.py::TestModuleDefaults::test_override_url_empty_when_env_unset -v` + +Expected: both fail with `AttributeError: module 'plex_api' has no attribute 'OVERRIDE_URL'` / `TypeError: __init__() got an unexpected keyword argument 'base_url'`. + +- [ ] **Step 1.3: Add the module-level constant** + +In `plex_api.py`, replace lines 49-53: + +```python +BASE_URL = "https://connect.plex.com" +TEST_URL = "https://test.connect.plex.com" +USE_TEST = os.environ.get("PLEX_USE_TEST", "").strip().lower() in ( + "1", "true", "yes", "on", "enabled", +) +``` + +with: + +```python +BASE_URL = "https://connect.plex.com" +TEST_URL = "https://test.connect.plex.com" +# PLEX_BASE_URL — explicit override for the Plex base URL (e.g. the local +# mock at tools/plex_mock/server.py running on localhost:8080). Empty +# string means "no override"; BASE_URL / TEST_URL selection applies. +# Used by the write-validation workflow in issue #92 so the sync can +# dress-rehearse against a fake-Plex without touching connect.plex.com. +OVERRIDE_URL = os.environ.get("PLEX_BASE_URL", "").strip() +USE_TEST = os.environ.get("PLEX_USE_TEST", "").strip().lower() in ( + "1", "true", "yes", "on", "enabled", +) +``` + +- [ ] **Step 1.4: Extend the client constructor** + +In `plex_api.py:61-63`, replace: + +```python +class PlexClient: + def __init__(self, api_key, api_secret="", tenant_id="", use_test=False): + self.base = TEST_URL if use_test else BASE_URL +``` + +with: + +```python +class PlexClient: + def __init__(self, api_key, api_secret="", tenant_id="", use_test=False, base_url=None): + # Resolution order: + # 1. explicit base_url kwarg (tests, ad-hoc scripts) + # 2. PLEX_BASE_URL env var (deployment-time override — the mock) + # 3. TEST_URL if use_test else BASE_URL (original behavior) + if base_url: + self.base = base_url + elif OVERRIDE_URL: + self.base = OVERRIDE_URL + else: + self.base = TEST_URL if use_test else BASE_URL +``` + +- [ ] **Step 1.5: Run the tests; expect all green** + +Run: `python -m pytest tests/test_plex_api.py -v` + +Expected: all tests pass, including the new 7. + +- [ ] **Step 1.6: Commit** + +```bash +git add plex_api.py tests/test_plex_api.py +git commit -m "feat(plex-api): PLEX_BASE_URL override + base_url client kwarg (#92)" +``` + +--- + +## Task 2: `tools/plex_mock/` package scaffold + +Lay down the directory skeleton and `.gitignore` rules so later tasks can drop files in without restructuring. + +**Files:** +- Create: `tools/__init__.py`, `tools/plex_mock/__init__.py`, `tools/plex_mock/README.md`, `tools/plex_mock/snapshots/README.md` +- Modify: `.gitignore` + +- [ ] **Step 2.1: Create the package markers** + +```bash +mkdir -p tools/plex_mock/snapshots tools/plex_mock/systemd +``` + +Create `tools/__init__.py` with: + +```python +"""Internal tooling for the Datum project (not packaged for distribution).""" +``` + +Create `tools/plex_mock/__init__.py` with: + +```python +""" +Local mock HTTP server mirroring the Plex REST surface for write-pipeline +validation. See tools/plex_mock/README.md and issue #92. +""" +__version__ = "0.1.0" +``` + +- [ ] **Step 2.2: Stub the READMEs** + +Create `tools/plex_mock/README.md` with (placeholder — filled out in Task 9): + +```markdown +# Plex-Mimic Mock + +Local HTTP server mimicking the Plex REST surface. See issue #92 and +`docs/superpowers/plans/2026-04-17-plex-mimic-mock.md` for the full plan. + +Full usage + validation-window protocol lands in Task 9 of the plan. +``` + +Create `tools/plex_mock/snapshots/README.md` with: + +```markdown +# Canned GET snapshots + +JSON responses captured from real `connect.plex.com` so the mock can +serve realistic GETs without a live-Plex dependency. Refresh via +`python -m tools.plex_mock.capture_snapshots` when Plex shapes change. + +Files here are committed. Ad-hoc mock captures (POSTs the sync sent) +live in `tools/plex_mock/captures/` which is gitignored. +``` + +- [ ] **Step 2.3: Update `.gitignore`** + +Append to `.gitignore`: + +``` +# Plex mock — ephemeral capture data (POSTs the sync sent against the mock) +tools/plex_mock/captures/ +tools/plex_mock/*.db +tools/plex_mock/*.db-journal +``` + +- [ ] **Step 2.4: Commit** + +```bash +git add tools/ .gitignore +git commit -m "feat(plex-mock): package scaffold for Plex-mimic mock (#92)" +``` + +--- + +## Task 3: SQLite capture store (TDD) + +Pure-Python module, no Flask dep. Append-only store of every POST/PUT/PATCH the mock sees, queryable by the diff CLI. + +**Files:** +- Create: `tools/plex_mock/store.py` +- Create: `tests/test_plex_mock_store.py` + +- [ ] **Step 3.1: Write the failing tests** + +Create `tests/test_plex_mock_store.py`: + +```python +"""Tests for the Plex-mock SQLite capture store.""" +import json +import sqlite3 +from pathlib import Path + +import pytest + +from tools.plex_mock.store import CaptureStore + + +@pytest.fixture +def store(tmp_path: Path) -> CaptureStore: + return CaptureStore(tmp_path / "captures.db") + + +class TestCaptureStoreInit: + def test_creates_db_file_on_open(self, tmp_path: Path): + db = tmp_path / "captures.db" + assert not db.exists() + CaptureStore(db) + assert db.exists() + + def test_creates_table_schema(self, store: CaptureStore): + with sqlite3.connect(store.path) as con: + cols = {row[1] for row in con.execute("PRAGMA table_info(captures)")} + assert {"id", "ts", "method", "path", "body_json", "run_id"} <= cols + + +class TestCaptureStoreAppend: + def test_append_returns_integer_id(self, store: CaptureStore): + rid = store.append(method="POST", path="/foo", body={"a": 1}, run_id="r1") + assert isinstance(rid, int) + assert rid >= 1 + + def test_append_persists_row(self, store: CaptureStore): + store.append(method="POST", path="/foo", body={"a": 1}, run_id="r1") + rows = store.query(run_id="r1") + assert len(rows) == 1 + assert rows[0]["method"] == "POST" + assert rows[0]["path"] == "/foo" + assert rows[0]["body"] == {"a": 1} + assert rows[0]["run_id"] == "r1" + + def test_append_stores_body_as_json(self, store: CaptureStore): + payload = {"nested": {"k": [1, 2, 3]}} + store.append(method="PUT", path="/x", body=payload, run_id="r1") + with sqlite3.connect(store.path) as con: + raw = con.execute("SELECT body_json FROM captures").fetchone()[0] + assert json.loads(raw) == payload + + def test_append_handles_null_body(self, store: CaptureStore): + store.append(method="PATCH", path="/x", body=None, run_id="r1") + rows = store.query(run_id="r1") + assert rows[0]["body"] is None + + +class TestCaptureStoreQuery: + def test_query_filters_by_run_id(self, store: CaptureStore): + store.append(method="POST", path="/a", body={}, run_id="r1") + store.append(method="POST", path="/b", body={}, run_id="r2") + assert len(store.query(run_id="r1")) == 1 + assert len(store.query(run_id="r2")) == 1 + + def test_query_filters_by_method(self, store: CaptureStore): + store.append(method="POST", path="/a", body={}, run_id="r1") + store.append(method="PUT", path="/a", body={}, run_id="r1") + assert len(store.query(run_id="r1", method="POST")) == 1 + assert len(store.query(run_id="r1", method="PUT")) == 1 + + def test_query_orders_by_id_ascending(self, store: CaptureStore): + store.append(method="POST", path="/a", body={"n": 1}, run_id="r1") + store.append(method="POST", path="/b", body={"n": 2}, run_id="r1") + rows = store.query(run_id="r1") + assert [r["body"]["n"] for r in rows] == [1, 2] + + def test_query_empty_when_no_match(self, store: CaptureStore): + assert store.query(run_id="nope") == [] +``` + +- [ ] **Step 3.2: Run tests; expect import failure** + +Run: `python -m pytest tests/test_plex_mock_store.py -v` + +Expected: collection error — `ModuleNotFoundError: No module named 'tools.plex_mock.store'`. + +- [ ] **Step 3.3: Implement the store** + +Create `tools/plex_mock/store.py`: + +```python +""" +SQLite-backed capture store for the Plex-mimic mock. + +Every POST/PUT/PATCH the mock server sees is appended here so the +diff CLI (#92) can report what the sync *would have* sent to real +Plex, and three-runs-in-a-row idempotency checks can compare run sets. + +Append-only by design — no update/delete path. Gitignored; survives +mock restarts. +""" +from __future__ import annotations + +import json +import sqlite3 +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + + +SCHEMA = """ +CREATE TABLE IF NOT EXISTS captures ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + ts TEXT NOT NULL, + method TEXT NOT NULL, + path TEXT NOT NULL, + body_json TEXT, + run_id TEXT NOT NULL +); +CREATE INDEX IF NOT EXISTS captures_run_id_idx ON captures(run_id); +CREATE INDEX IF NOT EXISTS captures_run_method_idx ON captures(run_id, method); +""" + + +class CaptureStore: + """Thin wrapper around a SQLite file used as an append-only capture log.""" + + def __init__(self, path: Path | str): + self.path = Path(path) + self.path.parent.mkdir(parents=True, exist_ok=True) + with sqlite3.connect(self.path) as con: + con.executescript(SCHEMA) + + def append( + self, + *, + method: str, + path: str, + body: Any, + run_id: str, + ) -> int: + """Record one captured request. Returns the rowid.""" + ts = datetime.now(timezone.utc).isoformat() + body_json = json.dumps(body) if body is not None else None + with sqlite3.connect(self.path) as con: + cur = con.execute( + "INSERT INTO captures (ts, method, path, body_json, run_id) " + "VALUES (?, ?, ?, ?, ?)", + (ts, method, path, body_json, run_id), + ) + return cur.lastrowid + + def query( + self, + *, + run_id: str, + method: str | None = None, + ) -> list[dict]: + """Return all captures for a run, oldest first. Optional method filter.""" + sql = "SELECT id, ts, method, path, body_json, run_id FROM captures WHERE run_id = ?" + args: list[Any] = [run_id] + if method is not None: + sql += " AND method = ?" + args.append(method) + sql += " ORDER BY id ASC" + with sqlite3.connect(self.path) as con: + rows = con.execute(sql, args).fetchall() + return [ + { + "id": r[0], + "ts": r[1], + "method": r[2], + "path": r[3], + "body": json.loads(r[4]) if r[4] is not None else None, + "run_id": r[5], + } + for r in rows + ] +``` + +- [ ] **Step 3.4: Run tests; expect all green** + +Run: `python -m pytest tests/test_plex_mock_store.py -v` + +Expected: all 10 tests pass. + +- [ ] **Step 3.5: Commit** + +```bash +git add tools/plex_mock/store.py tests/test_plex_mock_store.py +git commit -m "feat(plex-mock): SQLite capture store (#92)" +``` + +--- + +## Task 4: Snapshot capture script + +One-off CLI that hits real Plex and writes the GET snapshots we'll serve from the mock. Read-only against live Plex — safe. + +**Files:** +- Create: `tools/plex_mock/capture_snapshots.py` +- Will produce: `tools/plex_mock/snapshots/supply_items_list.json`, `tools/plex_mock/snapshots/workcenters_list.json` + +No unit tests in this task — it's an I/O-bound one-off. Tests for the snapshot-serving path live in Task 5. + +- [ ] **Step 4.1: Implement the capture CLI** + +Create `tools/plex_mock/capture_snapshots.py`: + +```python +""" +One-off: hit real connect.plex.com and persist GET responses for the two +endpoints the mock needs to serve. Commit the output files. + +Run with credentials loaded the usual way (.env.local + bootstrap.py): + + python -m tools.plex_mock.capture_snapshots + +Refresh when the Plex shape changes. This script only GETs — safe to +run any time without the PLEX_ALLOW_WRITES guard. +""" +from __future__ import annotations + +import json +import sys +from pathlib import Path + +from plex_api import API_KEY, API_SECRET, TENANT_ID, USE_TEST, PlexClient + + +SNAPSHOTS_DIR = Path(__file__).parent / "snapshots" + + +def capture(client: PlexClient, collection: str, version: str, resource: str, outfile: str) -> int: + env = client.get_envelope(collection, version, resource) + if not env["ok"]: + print(f" FAILED {collection}/{version}/{resource}: HTTP {env['status']}", file=sys.stderr) + return 1 + data = env["body"] + out = SNAPSHOTS_DIR / outfile + out.write_text(json.dumps(data, indent=2, sort_keys=True), encoding="utf-8") + count = len(data) if isinstance(data, list) else 1 + print(f" wrote {out.relative_to(Path.cwd())} ({count} records, {out.stat().st_size} bytes)") + return 0 + + +def main() -> int: + if not API_KEY: + print("PLEX_API_KEY is not set; can't capture snapshots.", file=sys.stderr) + return 2 + client = PlexClient(API_KEY, API_SECRET, TENANT_ID, use_test=USE_TEST) + rc = 0 + rc |= capture(client, "inventory", "v1", "inventory-definitions/supply-items", + "supply_items_list.json") + rc |= capture(client, "production", "v1", "production-definitions/workcenters", + "workcenters_list.json") + return rc + + +if __name__ == "__main__": + raise SystemExit(main()) +``` + +- [ ] **Step 4.2: Sanity-run the script locally** + +Run: `python -m tools.plex_mock.capture_snapshots` + +Expected (with live creds in `.env.local`): two files written under `tools/plex_mock/snapshots/`, roughly 30 KB and ~10 KB respectively, printed confirmation lines. + +Expected (no creds): exit code 2 and error message about `PLEX_API_KEY`. + +- [ ] **Step 4.3: Review the captured files** + +Run: `wc -l tools/plex_mock/snapshots/*.json` + +Open each file and eyeball: the arrays should have the expected record counts (supply-items ~2,500; workcenters ~143 per `docs/BRIEFING.md`). + +- [ ] **Step 4.4: Commit the script + captured snapshots** + +```bash +git add tools/plex_mock/capture_snapshots.py \ + tools/plex_mock/snapshots/supply_items_list.json \ + tools/plex_mock/snapshots/workcenters_list.json +git commit -m "feat(plex-mock): snapshot capture CLI + initial snapshots (#92)" +``` + +*If credentials aren't available in the execution environment, commit just the script and flag to Shane that the snapshots need to be captured on a VM that has live-Plex creds. Task 5 can proceed with stub snapshots in the meantime.* + +--- + +## Task 5: Flask mock server — GET handlers (TDD) + +GET routes serve the captured snapshots. Tests use Flask's test client against fixture JSON; no real HTTP server started. + +**Files:** +- Create: `tools/plex_mock/server.py` +- Create: `tests/test_plex_mock_server.py` + +- [ ] **Step 5.1: Write failing tests** + +Create `tests/test_plex_mock_server.py`: + +```python +"""Tests for the Plex-mock Flask server.""" +import json +from pathlib import Path + +import pytest + +from tools.plex_mock.server import create_app + + +@pytest.fixture +def snapshots_dir(tmp_path: Path) -> Path: + d = tmp_path / "snapshots" + d.mkdir() + supply = [ + {"id": "11111111-1111-1111-1111-111111111111", "supplyItemNumber": "ABC-1", + "description": "Test tool", "category": "Tools & Inserts", + "group": "Machining - End Mills", "inventoryUnit": "Ea", "type": "SUPPLY"}, + {"id": "22222222-2222-2222-2222-222222222222", "supplyItemNumber": "ABC-2", + "description": "Test tool 2", "category": "Tools & Inserts", + "group": "Machining - Drills", "inventoryUnit": "Ea", "type": "SUPPLY"}, + ] + workcenters = [ + {"workcenterId": "0b6cf62b-2809-4d3d-ab24-369cd0171f62", + "workcenterCode": "879", "name": "Brother Speedio 879", + "workcenterGroup": "MILLS"}, + ] + (d / "supply_items_list.json").write_text(json.dumps(supply)) + (d / "workcenters_list.json").write_text(json.dumps(workcenters)) + return d + + +@pytest.fixture +def client(tmp_path: Path, snapshots_dir: Path): + app = create_app(snapshots_dir=snapshots_dir, db_path=tmp_path / "captures.db", run_id="test-run") + return app.test_client() + + +class TestSupplyItemsGetList: + def test_returns_200(self, client): + rv = client.get("/inventory/v1/inventory-definitions/supply-items") + assert rv.status_code == 200 + + def test_returns_snapshot_body(self, client): + rv = client.get("/inventory/v1/inventory-definitions/supply-items") + body = rv.get_json() + assert isinstance(body, list) + assert len(body) == 2 + assert body[0]["supplyItemNumber"] == "ABC-1" + + +class TestSupplyItemsGetById: + def test_returns_200_when_found(self, client): + rv = client.get("/inventory/v1/inventory-definitions/supply-items/11111111-1111-1111-1111-111111111111") + assert rv.status_code == 200 + assert rv.get_json()["supplyItemNumber"] == "ABC-1" + + def test_returns_404_when_unknown(self, client): + rv = client.get("/inventory/v1/inventory-definitions/supply-items/does-not-exist") + assert rv.status_code == 404 + + +class TestWorkcentersGet: + def test_returns_200_list(self, client): + rv = client.get("/production/v1/production-definitions/workcenters") + assert rv.status_code == 200 + assert len(rv.get_json()) == 1 + + def test_returns_200_by_id(self, client): + rv = client.get("/production/v1/production-definitions/workcenters/0b6cf62b-2809-4d3d-ab24-369cd0171f62") + assert rv.status_code == 200 + assert rv.get_json()["workcenterCode"] == "879" + + def test_returns_404_for_unknown_workcenter(self, client): + rv = client.get("/production/v1/production-definitions/workcenters/nope") + assert rv.status_code == 404 + + +class TestHealth: + def test_health_endpoint(self, client): + rv = client.get("/healthz") + assert rv.status_code == 200 + assert rv.get_json() == {"ok": True} +``` + +- [ ] **Step 5.2: Run tests; expect import failure** + +Run: `python -m pytest tests/test_plex_mock_server.py -v` + +Expected: `ModuleNotFoundError: No module named 'tools.plex_mock.server'`. + +- [ ] **Step 5.3: Implement `create_app` with GET routes** + +Create `tools/plex_mock/server.py`: + +```python +""" +Flask app mimicking the Plex REST endpoints the sync writes to. +GETs serve canned snapshots from disk; POST/PUT/PATCH handlers land +in Task 6 (this file grows, the tests drive the shape). + +Bound to 127.0.0.1 by the systemd unit — never expose publicly. +Issue: #92. +""" +from __future__ import annotations + +import json +from pathlib import Path + +from flask import Flask, abort, jsonify + +from tools.plex_mock.store import CaptureStore + + +def _load_snapshot(snapshots_dir: Path, name: str) -> list[dict]: + path = snapshots_dir / name + if not path.exists(): + return [] + return json.loads(path.read_text(encoding="utf-8")) + + +def create_app( + *, + snapshots_dir: Path, + db_path: Path, + run_id: str, +) -> Flask: + app = Flask(__name__) + app.config["PLEX_MOCK_SNAPSHOTS_DIR"] = snapshots_dir + app.config["PLEX_MOCK_STORE"] = CaptureStore(db_path) + app.config["PLEX_MOCK_RUN_ID"] = run_id + + supply_items = _load_snapshot(snapshots_dir, "supply_items_list.json") + workcenters = _load_snapshot(snapshots_dir, "workcenters_list.json") + supply_by_id = {rec["id"]: rec for rec in supply_items} + workcenter_by_id = {rec["workcenterId"]: rec for rec in workcenters} + + @app.get("/healthz") + def healthz(): + return jsonify({"ok": True}) + + @app.get("/inventory/v1/inventory-definitions/supply-items") + def supply_items_list(): + return jsonify(supply_items) + + @app.get("/inventory/v1/inventory-definitions/supply-items/") + def supply_items_get(item_id: str): + rec = supply_by_id.get(item_id) + if rec is None: + abort(404) + return jsonify(rec) + + @app.get("/production/v1/production-definitions/workcenters") + def workcenters_list(): + return jsonify(workcenters) + + @app.get("/production/v1/production-definitions/workcenters/") + def workcenter_get(wc_id: str): + rec = workcenter_by_id.get(wc_id) + if rec is None: + abort(404) + return jsonify(rec) + + return app + + +def main() -> int: + """Console-script entry (datum-plex-mock-serve).""" + import argparse + import uuid + + ap = argparse.ArgumentParser(description="Plex-mimic mock server") + ap.add_argument("--host", default="127.0.0.1") + ap.add_argument("--port", type=int, default=8080) + ap.add_argument("--snapshots", default=Path(__file__).parent / "snapshots") + ap.add_argument("--db", default=Path(__file__).parent / "captures.db") + ap.add_argument("--run-id", default=None, help="Override run_id (default: random uuid4)") + args = ap.parse_args() + + app = create_app( + snapshots_dir=Path(args.snapshots), + db_path=Path(args.db), + run_id=args.run_id or str(uuid.uuid4()), + ) + print(f"plex-mock serving on http://{args.host}:{args.port} run_id={app.config['PLEX_MOCK_RUN_ID']}") + app.run(host=args.host, port=args.port, debug=False) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) +``` + +- [ ] **Step 5.4: Run tests; expect all green** + +Run: `python -m pytest tests/test_plex_mock_server.py -v` + +Expected: all 8 tests pass. + +- [ ] **Step 5.5: Commit** + +```bash +git add tools/plex_mock/server.py tests/test_plex_mock_server.py +git commit -m "feat(plex-mock): Flask server + GET snapshot handlers (#92)" +``` + +--- + +## Task 6: POST/PUT/PATCH capture handlers (TDD) + +Writes to the mock are captured to SQLite and return a Plex-shape response with a synthetic UUID. No state mutation between requests — every POST "succeeds" with a fresh UUID; every PUT/PATCH echoes the body back with `id` preserved if present. + +**Files:** +- Modify: `tools/plex_mock/server.py` — add write handlers +- Modify: `tests/test_plex_mock_server.py` — add write-handler tests + +- [ ] **Step 6.1: Write failing tests** + +Append to `tests/test_plex_mock_server.py`: + +```python +import uuid + + +class TestSupplyItemsPost: + def test_post_returns_201_with_synthetic_id(self, client): + payload = {"supplyItemNumber": "NEW-1", "description": "New tool", + "category": "Tools & Inserts", "group": "Machining - End Mills", + "inventoryUnit": "Ea", "type": "SUPPLY"} + rv = client.post("/inventory/v1/inventory-definitions/supply-items", json=payload) + assert rv.status_code == 201 + body = rv.get_json() + assert "id" in body + uuid.UUID(body["id"]) # valid uuid4 + assert body["supplyItemNumber"] == "NEW-1" + + def test_post_echoes_payload_fields(self, client): + payload = {"supplyItemNumber": "NEW-2", "description": "x", + "group": "Machining - Drills", "inventoryUnit": "Ea", + "category": "Tools & Inserts", "type": "SUPPLY"} + rv = client.post("/inventory/v1/inventory-definitions/supply-items", json=payload) + body = rv.get_json() + for k, v in payload.items(): + assert body[k] == v + + def test_post_persists_to_capture_store(self, client): + from tools.plex_mock.store import CaptureStore + payload = {"supplyItemNumber": "NEW-3"} + client.post("/inventory/v1/inventory-definitions/supply-items", json=payload) + store: CaptureStore = client.application.config["PLEX_MOCK_STORE"] + rows = store.query(run_id=client.application.config["PLEX_MOCK_RUN_ID"]) + assert len(rows) == 1 + assert rows[0]["method"] == "POST" + assert rows[0]["path"].endswith("/supply-items") + assert rows[0]["body"]["supplyItemNumber"] == "NEW-3" + + def test_post_409_on_duplicate_supply_item_number(self, client): + # Snapshot already has "ABC-1" — mock should treat that as a conflict + payload = {"supplyItemNumber": "ABC-1", "description": "dup"} + rv = client.post("/inventory/v1/inventory-definitions/supply-items", json=payload) + assert rv.status_code == 409 + + +class TestSupplyItemsPut: + def test_put_200_and_captured(self, client): + payload = {"description": "updated description"} + rv = client.put( + "/inventory/v1/inventory-definitions/supply-items/11111111-1111-1111-1111-111111111111", + json=payload, + ) + assert rv.status_code == 200 + assert rv.get_json()["description"] == "updated description" + + store = client.application.config["PLEX_MOCK_STORE"] + rows = store.query(run_id=client.application.config["PLEX_MOCK_RUN_ID"], method="PUT") + assert len(rows) == 1 + + def test_put_404_on_unknown_id(self, client): + rv = client.put( + "/inventory/v1/inventory-definitions/supply-items/not-a-real-id", + json={"description": "x"}, + ) + assert rv.status_code == 404 + + +class TestWorkcenterWrites: + def test_put_workcenter_captured(self, client): + # #6 probe — we don't yet know the body shape, just confirm the mock + # records whatever we send it. + payload = {"unknownFieldForProbe": True} + rv = client.put( + "/production/v1/production-definitions/workcenters/0b6cf62b-2809-4d3d-ab24-369cd0171f62", + json=payload, + ) + assert rv.status_code == 200 + store = client.application.config["PLEX_MOCK_STORE"] + rows = store.query(run_id=client.application.config["PLEX_MOCK_RUN_ID"], method="PUT") + assert any(r["body"].get("unknownFieldForProbe") is True for r in rows) + + def test_patch_workcenter_captured(self, client): + rv = client.patch( + "/production/v1/production-definitions/workcenters/0b6cf62b-2809-4d3d-ab24-369cd0171f62", + json={"note": "patched"}, + ) + assert rv.status_code == 200 + store = client.application.config["PLEX_MOCK_STORE"] + rows = store.query(run_id=client.application.config["PLEX_MOCK_RUN_ID"], method="PATCH") + assert len(rows) == 1 +``` + +- [ ] **Step 6.2: Run tests; expect failures** + +Run: `python -m pytest tests/test_plex_mock_server.py -v` + +Expected: 8 new tests fail with 405 (Method Not Allowed) or similar — the Flask app doesn't register POST/PUT/PATCH routes yet. + +- [ ] **Step 6.3: Implement the write handlers** + +In `tools/plex_mock/server.py`, inside `create_app()` just before `return app`, add: + +```python + @app.post("/inventory/v1/inventory-definitions/supply-items") + def supply_items_post(): + from flask import request + payload = request.get_json(silent=True) or {} + store: CaptureStore = app.config["PLEX_MOCK_STORE"] + store.append( + method="POST", + path=request.path, + body=payload, + run_id=app.config["PLEX_MOCK_RUN_ID"], + ) + # Dedup by supplyItemNumber against the snapshot — Plex returns 409 + sin = payload.get("supplyItemNumber") + if sin and any(rec.get("supplyItemNumber") == sin for rec in supply_items): + return jsonify({"error": "duplicate supplyItemNumber", "supplyItemNumber": sin}), 409 + import uuid as _uuid + resp = dict(payload) + resp["id"] = str(_uuid.uuid4()) + return jsonify(resp), 201 + + @app.put("/inventory/v1/inventory-definitions/supply-items/") + def supply_items_put(item_id: str): + from flask import request + if item_id not in supply_by_id: + abort(404) + payload = request.get_json(silent=True) or {} + store: CaptureStore = app.config["PLEX_MOCK_STORE"] + store.append( + method="PUT", + path=request.path, + body=payload, + run_id=app.config["PLEX_MOCK_RUN_ID"], + ) + merged = {**supply_by_id[item_id], **payload, "id": item_id} + return jsonify(merged), 200 + + @app.route( + "/production/v1/production-definitions/workcenters/", + methods=["PUT", "PATCH"], + ) + def workcenter_write(wc_id: str): + from flask import request + if wc_id not in workcenter_by_id: + abort(404) + payload = request.get_json(silent=True) or {} + store: CaptureStore = app.config["PLEX_MOCK_STORE"] + store.append( + method=request.method, + path=request.path, + body=payload, + run_id=app.config["PLEX_MOCK_RUN_ID"], + ) + merged = {**workcenter_by_id[wc_id], **payload, "workcenterId": wc_id} + return jsonify(merged), 200 +``` + +- [ ] **Step 6.4: Run the full test file; expect all green** + +Run: `python -m pytest tests/test_plex_mock_server.py -v` + +Expected: 16 tests pass (8 from Task 5 + 8 new). + +- [ ] **Step 6.5: Commit** + +```bash +git add tools/plex_mock/server.py tests/test_plex_mock_server.py +git commit -m "feat(plex-mock): POST/PUT/PATCH capture handlers (#92)" +``` + +--- + +## Task 7: End-to-end rehearsal — `datum-sync` against the mock + +Run the real sync binary with `PLEX_BASE_URL=http://localhost:8080` + `PLEX_ALLOW_WRITES=1` and confirm captures land. No new code — this is a validation step that writes nothing but a log file. + +- [ ] **Step 7.1: Start the mock locally** + +```bash +python -m tools.plex_mock.server --run-id rehearsal-1 --db /tmp/plex-mock-rehearsal.db & +sleep 2 +curl -sf http://127.0.0.1:8080/healthz | python -m json.tool +``` + +Expected: `{"ok": true}`. + +- [ ] **Step 7.2: Run a dry-run sync pointed at the mock** + +```bash +PLEX_BASE_URL=http://127.0.0.1:8080 \ + datum-sync --dry-run 2>&1 | tail -30 +``` + +(`datum-sync` is the console script registered at `pyproject.toml:21` — `datum-sync = "sync:cli"`.) + +Expected: sync reaches the Plex-read phase, receives the snapshot, exits cleanly. No captures yet because `--dry-run`. + +- [ ] **Step 7.3: Run a real (guarded) sync against the mock** + +```bash +PLEX_BASE_URL=http://127.0.0.1:8080 \ +PLEX_ALLOW_WRITES=1 \ + datum-sync 2>&1 | tail -30 +``` + +Expected: sync completes; mock captures N POSTs in `/tmp/plex-mock-rehearsal.db`. + +- [ ] **Step 7.4: Inspect the captures** + +```bash +sqlite3 /tmp/plex-mock-rehearsal.db \ + "SELECT method, path, substr(body_json, 1, 80) FROM captures WHERE run_id='rehearsal-1' LIMIT 5" +``` + +Expected: rows showing the POST method, supply-items path, and the first 80 chars of each payload. + +- [ ] **Step 7.5: Stop the mock + document what worked** + +```bash +kill %1 +``` + +Write findings to `tools/plex_mock/REHEARSAL_NOTES.md` (gitignored or committed, as preferred) covering: +- Actual N of captures vs expected N (rows in `plex_supply_items WHERE plex_id IS NULL`) +- Any errors in the sync log +- Any unexpected payload shapes + +- [ ] **Step 7.6: Commit the rehearsal doc if valuable** + +```bash +git add tools/plex_mock/REHEARSAL_NOTES.md +git commit -m "docs(plex-mock): record first rehearsal findings (#92)" +``` + +--- + +## Task 8: Diff CLI (TDD) + +Compares a run's captures against an expected-payload fixture. Flags drift: missing fields, extra fields, type mismatches, count mismatches. + +**Files:** +- Create: `tools/plex_mock/diff.py` +- Create: `tests/test_plex_mock_diff.py` +- Create: `tests/fixtures/plex_mock/expected_supply_items.json` + +- [ ] **Step 8.1: Write the expected-payload fixture** + +Create `tests/fixtures/plex_mock/expected_supply_items.json`: + +```json +{ + "supply_items_post_shape": { + "required_fields": [ + "category", + "description", + "group", + "inventoryUnit", + "supplyItemNumber", + "type" + ], + "forbidden_fields": [ + "id", + "posted_to_plex_at" + ], + "field_types": { + "category": "str", + "description": "str", + "group": "str", + "inventoryUnit": "str", + "supplyItemNumber": "str", + "type": "str" + } + } +} +``` + +- [ ] **Step 8.2: Write failing tests** + +Create `tests/test_plex_mock_diff.py`: + +```python +"""Tests for the Plex-mock diff CLI.""" +import json +from pathlib import Path + +import pytest + +from tools.plex_mock.diff import diff_run, DiffResult +from tools.plex_mock.store import CaptureStore + + +FIXTURE = Path(__file__).parent / "fixtures" / "plex_mock" / "expected_supply_items.json" + + +@pytest.fixture +def store(tmp_path: Path) -> CaptureStore: + return CaptureStore(tmp_path / "captures.db") + + +@pytest.fixture +def expected() -> dict: + return json.loads(FIXTURE.read_text()) + + +class TestDiffRun: + def test_clean_run_returns_no_issues(self, store: CaptureStore, expected: dict): + store.append( + method="POST", + path="/inventory/v1/inventory-definitions/supply-items", + body={ + "category": "Tools & Inserts", "description": "x", + "group": "Machining - End Mills", "inventoryUnit": "Ea", + "supplyItemNumber": "ABC-1", "type": "SUPPLY", + }, + run_id="r1", + ) + result = diff_run(store=store, run_id="r1", expected=expected) + assert isinstance(result, DiffResult) + assert result.issues == [] + assert result.ok is True + + def test_missing_required_field_flagged(self, store: CaptureStore, expected: dict): + store.append( + method="POST", + path="/inventory/v1/inventory-definitions/supply-items", + body={"supplyItemNumber": "ABC-1"}, # missing everything else + run_id="r1", + ) + result = diff_run(store=store, run_id="r1", expected=expected) + assert result.ok is False + msgs = " ".join(result.issues) + assert "missing" in msgs.lower() + assert "category" in msgs + + def test_forbidden_field_flagged(self, store: CaptureStore, expected: dict): + store.append( + method="POST", + path="/inventory/v1/inventory-definitions/supply-items", + body={ + "category": "Tools & Inserts", "description": "x", + "group": "Machining - End Mills", "inventoryUnit": "Ea", + "supplyItemNumber": "ABC-1", "type": "SUPPLY", + "id": "client-should-not-send-this", + }, + run_id="r1", + ) + result = diff_run(store=store, run_id="r1", expected=expected) + assert result.ok is False + assert any("forbidden" in m.lower() and "id" in m for m in result.issues) + + def test_wrong_field_type_flagged(self, store: CaptureStore, expected: dict): + store.append( + method="POST", + path="/inventory/v1/inventory-definitions/supply-items", + body={ + "category": "Tools & Inserts", "description": 42, # should be str + "group": "Machining - End Mills", "inventoryUnit": "Ea", + "supplyItemNumber": "ABC-1", "type": "SUPPLY", + }, + run_id="r1", + ) + result = diff_run(store=store, run_id="r1", expected=expected) + assert result.ok is False + assert any("description" in m and "str" in m for m in result.issues) +``` + +- [ ] **Step 8.3: Run tests; expect import failure** + +Run: `python -m pytest tests/test_plex_mock_diff.py -v` + +Expected: `ModuleNotFoundError: No module named 'tools.plex_mock.diff'`. + +- [ ] **Step 8.4: Implement the diff module** + +Create `tools/plex_mock/diff.py`: + +```python +""" +Diff captured Plex-mock POSTs against an expected-payload fixture. + +Checks each supply-items POST for: + - required fields present + - forbidden fields absent (things the client shouldn't send) + - field types match the fixture + +Exit code 0 on clean, 1 on drift. Usage: + + python -m tools.plex_mock.diff --run-id --db --expected +""" +from __future__ import annotations + +import argparse +import json +import sys +from dataclasses import dataclass, field +from pathlib import Path + +from tools.plex_mock.store import CaptureStore + + +TYPE_MAP = {"str": str, "int": int, "float": float, "bool": bool, "list": list, "dict": dict} + + +@dataclass +class DiffResult: + issues: list[str] = field(default_factory=list) + + @property + def ok(self) -> bool: + return not self.issues + + +def _check_supply_item_post(body: dict, shape: dict, row_id: int) -> list[str]: + issues: list[str] = [] + for f in shape["required_fields"]: + if f not in body: + issues.append(f"row {row_id}: missing required field '{f}'") + for f in shape["forbidden_fields"]: + if f in body: + issues.append(f"row {row_id}: forbidden field '{f}' present") + for f, t in shape["field_types"].items(): + if f in body: + expected_t = TYPE_MAP.get(t) + if expected_t and not isinstance(body[f], expected_t): + actual = type(body[f]).__name__ + issues.append(f"row {row_id}: field '{f}' expected {t}, got {actual}") + return issues + + +def diff_run(*, store: CaptureStore, run_id: str, expected: dict) -> DiffResult: + result = DiffResult() + shape = expected.get("supply_items_post_shape") + if not shape: + result.issues.append("fixture missing 'supply_items_post_shape'") + return result + + for row in store.query(run_id=run_id, method="POST"): + if not row["path"].endswith("/supply-items"): + continue + body = row["body"] or {} + result.issues.extend(_check_supply_item_post(body, shape, row["id"])) + return result + + +def main() -> int: + ap = argparse.ArgumentParser(description="Plex-mock capture diff") + ap.add_argument("--run-id", required=True) + ap.add_argument("--db", required=True, type=Path) + ap.add_argument("--expected", required=True, type=Path) + args = ap.parse_args() + + if not args.db.exists(): + print(f"DB not found: {args.db}", file=sys.stderr) + return 2 + if not args.expected.exists(): + print(f"Expected fixture not found: {args.expected}", file=sys.stderr) + return 2 + + store = CaptureStore(args.db) + expected = json.loads(args.expected.read_text()) + result = diff_run(store=store, run_id=args.run_id, expected=expected) + if result.ok: + print(f"plex-mock diff: CLEAN (run_id={args.run_id})") + return 0 + print(f"plex-mock diff: DRIFT (run_id={args.run_id}, {len(result.issues)} issues)") + for issue in result.issues: + print(f" {issue}") + return 1 + + +if __name__ == "__main__": + raise SystemExit(main()) +``` + +- [ ] **Step 8.5: Run tests; expect all green** + +Run: `python -m pytest tests/test_plex_mock_diff.py -v` + +Expected: all 4 tests pass. + +- [ ] **Step 8.6: Commit** + +```bash +git add tools/plex_mock/diff.py tests/test_plex_mock_diff.py \ + tests/fixtures/plex_mock/expected_supply_items.json +git commit -m "feat(plex-mock): capture-diff CLI with payload-shape fixture (#92)" +``` + +--- + +## Task 9: Console scripts, docs, final README + +Wire the three CLIs into `pyproject.toml` and flesh out the user-facing docs. + +**Files:** +- Modify: `pyproject.toml` — three new console scripts +- Modify: `tools/plex_mock/README.md` — replace stub with full usage +- Modify: `docs/Plex_API_Reference.md` — add `PLEX_BASE_URL` paragraph + +- [ ] **Step 9.1: Add console scripts to `pyproject.toml`** + +The `[project.scripts]` table exists at `pyproject.toml:20` alongside `datum-sync`, `datum-sync-inventory`, and `datum-populate-supply-items`. Append three new entries in the same block: + +```toml +datum-plex-mock-serve = "tools.plex_mock.server:main" +datum-plex-mock-snapshot = "tools.plex_mock.capture_snapshots:main" +datum-plex-mock-diff = "tools.plex_mock.diff:main" +``` + +- [ ] **Step 9.2: Replace the `tools/plex_mock/README.md` stub** + +Replace the entire file with (note: outer fence uses four backticks because the README embeds a bash block): + +````markdown +# Plex-Mimic Mock + +Local HTTP server mirroring the Plex REST surface for write-pipeline +validation. Tracked in [#92](https://github.com/grace-shane/Datum/issues/92); +blocks [#3](https://github.com/grace-shane/Datum/issues/3) and +[#6](https://github.com/grace-shane/Datum/issues/6). + +## Quick start + +```bash +# Refresh snapshots from real Plex (read-only; safe to re-run) +python -m tools.plex_mock.capture_snapshots + +# Start the mock on localhost:8080 +python -m tools.plex_mock.server --run-id $(date +%Y%m%d-%H%M%S) + +# In another shell: point the sync at it +PLEX_BASE_URL=http://127.0.0.1:8080 \ +PLEX_ALLOW_WRITES=1 \ + datum-sync + +# After the run: diff captures against the expected payload shape +python -m tools.plex_mock.diff \ + --run-id \ + --db tools/plex_mock/captures.db \ + --expected tests/fixtures/plex_mock/expected_supply_items.json +``` + +## What it serves + +| Endpoint | Behavior | +|---|---| +| `GET /healthz` | liveness probe, returns `{"ok": true}` | +| `GET /inventory/v1/inventory-definitions/supply-items` | serves `snapshots/supply_items_list.json` | +| `GET /inventory/v1/inventory-definitions/supply-items/{id}` | one record from the snapshot; 404 if unknown | +| `POST /inventory/v1/inventory-definitions/supply-items` | captures body, returns 201 with synthetic UUID; 409 if `supplyItemNumber` collides with snapshot | +| `PUT /inventory/v1/inventory-definitions/supply-items/{id}` | captures body, merges over snapshot record, returns 200; 404 if unknown | +| `GET /production/v1/production-definitions/workcenters` | serves `snapshots/workcenters_list.json` | +| `GET /production/v1/production-definitions/workcenters/{id}` | one record; 404 if unknown | +| `PUT/PATCH /production/v1/production-definitions/workcenters/{id}` | captures body, returns merged record (the #6 probe path) | + +Every write lands in `captures.db` keyed by `run_id` for later diffing. + +## Validation-window protocol + +Before we flip `PLEX_ALLOW_WRITES=1` against real `connect.plex.com`: + +1. Three consecutive `datum-sync` runs against the mock produce identical capture sets (same count, same payload shapes). +2. `datum-plex-mock-diff` reports CLEAN against `expected_supply_items.json` for all three runs. +3. Rehearsal notes in `tools/plex_mock/REHEARSAL_NOTES.md` document at least one full mock-sync cycle end-to-end. +4. Only then: PR that enables writes to real Plex, and only with explicit Shane approval in the PR description. + +The mock is the validation surface. `test.connect.plex.com` (`PLEX_USE_TEST=1`) is not — the Datum Consumer Key only authenticates against production (see `docs/BRIEFING.md`). + +## Deploy on `datum-runtime` + +See `tools/plex_mock/systemd/datum-plex-mock.service`. Copy into +`/etc/systemd/system/`, `systemctl daemon-reload && systemctl enable --now datum-plex-mock`. +Bound to `127.0.0.1:8080` — no external exposure, no TLS needed. +```` + +- [ ] **Step 9.3: Update `docs/Plex_API_Reference.md`** + +Find the section on URL routing / environments and append: + +```markdown +### `PLEX_BASE_URL` override + +`plex_api.py` honors a `PLEX_BASE_URL` environment variable that overrides +both `BASE_URL` and `PLEX_USE_TEST`. Used by the write-validation +workflow in [#92](https://github.com/grace-shane/Datum/issues/92) to +point `datum-sync` at the local Plex-mimic mock +(`tools/plex_mock/server.py`) instead of `connect.plex.com`. Unset in +normal production operation. + +Resolution order (first match wins): + +1. Explicit `base_url=` kwarg to `PlexClient()` — tests and ad-hoc scripts +2. `PLEX_BASE_URL` env var — deployment-time override (the mock) +3. `PLEX_USE_TEST=1` → `test.connect.plex.com` +4. Default → `connect.plex.com` +``` + +- [ ] **Step 9.4: Verify pyproject console scripts install cleanly** + +Run: `pip install -e . && datum-plex-mock-serve --help` + +Expected: argparse help for the mock server CLI. + +- [ ] **Step 9.5: Commit** + +```bash +git add pyproject.toml tools/plex_mock/README.md docs/Plex_API_Reference.md +git commit -m "docs(plex-mock): console scripts, README, PLEX_BASE_URL reference (#92)" +``` + +--- + +## Task 10: systemd unit + deploy to `datum-runtime` + +Persistent mock service on the runtime VM. Localhost-bound, free-tier friendly. + +**Files:** +- Create: `tools/plex_mock/systemd/datum-plex-mock.service` + +- [ ] **Step 10.1: Write the unit file** + +Create `tools/plex_mock/systemd/datum-plex-mock.service`: + +```ini +[Unit] +Description=Datum Plex-Mimic Mock HTTP Server +After=network.target +Documentation=https://github.com/grace-shane/Datum/issues/92 + +[Service] +Type=simple +User=datum +Group=datum +WorkingDirectory=/opt/datum +EnvironmentFile=/opt/datum/.env.local +ExecStart=/opt/datum/.venv/bin/datum-plex-mock-serve \ + --host 127.0.0.1 \ + --port 8080 \ + --snapshots /opt/datum/tools/plex_mock/snapshots \ + --db /var/lib/datum/plex-mock-captures.db +Restart=on-failure +RestartSec=5 +# Hardening — mock has no reason to touch anything outside its data dir +ReadWritePaths=/var/lib/datum +PrivateTmp=true +ProtectSystem=strict +ProtectHome=true +NoNewPrivileges=true + +[Install] +WantedBy=multi-user.target +``` + +- [ ] **Step 10.2: Document deploy steps in the unit-file directory** + +Create `tools/plex_mock/systemd/README.md` (outer fence uses four backticks because the README embeds a bash block): + +````markdown +# Deploy `datum-plex-mock` on `datum-runtime` + +Assumes the Datum repo is at `/opt/datum` and a virtualenv at +`/opt/datum/.venv` with `pip install -e .` having registered +`datum-plex-mock-serve`. + +```bash +# SSH in via IAP +gcloud compute ssh datum-runtime --zone=us-central1-a --tunnel-through-iap \ + --project=$PROJECT_ID + +# On the VM: +sudo mkdir -p /var/lib/datum +sudo chown datum:datum /var/lib/datum +sudo cp /opt/datum/tools/plex_mock/systemd/datum-plex-mock.service \ + /etc/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl enable --now datum-plex-mock +sudo systemctl status datum-plex-mock +curl -sf http://127.0.0.1:8080/healthz +``` + +Troubleshooting: +- Logs: `journalctl -u datum-plex-mock -f` +- Stop: `sudo systemctl stop datum-plex-mock` +- Test snapshot refresh from the VM: `cd /opt/datum && /opt/datum/.venv/bin/datum-plex-mock-snapshot` +```` + +- [ ] **Step 10.3: Commit** + +```bash +git add tools/plex_mock/systemd/ +git commit -m "feat(plex-mock): systemd unit + datum-runtime deploy doc (#92)" +``` + +- [ ] **Step 10.4: Open the deploy PR** + +Deploy to `datum-runtime` happens **after PR merge** — it's an out-of-repo step that Shane runs manually. PR description should include the deploy checklist verbatim from Step 10.2 so Shane can cross-check. + +--- + +## Out-of-scope follow-ups (tracked separately) + +- Pre-commit hook or CI check that runs `datum-plex-mock-diff` against a golden capture fixture every PR — cheap regression net once we have the tooling. +- Capture-replay feature: re-drive historical mock captures against a new code path to detect payload-shape drift. +- Multi-run comparison (N runs vs N runs): the current diff is fixture-vs-run; a runs-vs-runs variant would catch nondeterminism in payload generation. +- Extend the mock surface if #4 / #5 ever unblock (Tool Assemblies, Routings). + +Do not roll these into #92 — each earns its own issue. diff --git a/docs/validate_library_spec.md b/docs/validate_library_spec.md new file mode 100644 index 0000000..0e0139a --- /dev/null +++ b/docs/validate_library_spec.md @@ -0,0 +1,455 @@ +# `validate_library.py` — Full Design Spec + +**Project:** Fusion 360 → Plex tooling sync — Grace Engineering +**Repo:** https://github.com/grace-shane/Datum +**Status:** Implemented — landed in [PR #28](https://github.com/grace-shane/Datum/pull/28) (2026-04-08), closing issue [#25](https://github.com/grace-shane/Datum/issues/25). This document is retained as the design reference. + +--- + +## Purpose + +Pre-sync validation gate for Fusion 360 tool library JSON files. Runs before +any data touches Plex. Three entry points share one validation engine. A FAIL +aborts the sync. WARNs are surfaced in verbose/debug modes and the Flask UI. + +--- + +## File Location + +``` +plex-api/ + validate_library.py ← new file + tool_library_loader.py ← calls validate_library as pre-sync gate + app.py ← adds /api/fusion/validate endpoint +``` + +--- + +## Output Modes + +| Mode | Trigger | Shows | +|---|---|---| +| Production | default / no flags | PASS or FAIL + failing rules only | +| Verbose | `--verbose` / `-v` | PASS / FAIL + WARN + failing rules | +| Debug | `--debug` / `-d` | Everything — every field checked, full supplier list on vendor lookup | + +The Flask endpoint always behaves as **verbose** since a human is reading it. + +--- + +## Entry Points + +### 1 — CLI + +```bash +# Production default — PASS/FAIL only +python validate_library.py + +# Specific file +python validate_library.py --file "BROTHER SPEEDIO ALUMINUM.json" + +# Verbose — adds WARNs +python validate_library.py --verbose + +# Debug — full field trace + supplier list +python validate_library.py --debug + +# Skip the live Plex supplier lookup (offline mode) +python validate_library.py --no-api +``` + +**Exit codes:** + +| Code | Meaning | +|---|---| +| `0` | PASS | +| `1` | FAIL | +| `2` | Script / environment error (missing file, no API creds, etc.) | + +--- + +### 2 — Programmatic (called from `tool_library_loader.py`) + +```python +from validate_library import validate_library, ValidationMode + +result = validate_library( + tools=raw_tool_list, # list[dict] already loaded by load_library() + library_name="BROTHER SPEEDIO ALUMINUM", + mode=ValidationMode.PRODUCTION, + use_api=False, # False to skip supplier lookup +) + +if not result.passed: + log.error("Validation failed — aborting sync") + log.error(result.summary()) + return None # tool_library_loader returns None, sync aborts +``` + +`tool_library_loader.py` calls this **after** `load_library()` succeeds but +**before** returning tools to the sync layer. Stale/locked file errors are +still caught by the existing loader guards — validation only runs on +successfully parsed data. + +`use_api=False` is the default in the loader to keep it fast and offline-safe. +API vendor validation is an explicit opt-in from the CLI or Flask. + +--- + +### 3 — Flask Endpoint + +``` +GET /api/fusion/validate +POST /api/fusion/validate +``` + +- **GET** — validates live files from the ADC network share +- **POST** — validates an uploaded JSON file without touching the share + (same multipart upload shape as `/api/fusion/libraries`) + +Always runs in verbose mode. No `--debug` toggle from the UI in the initial +implementation (can be added later as a query param). + +**Response shape:** + +```json +{ + "library_name": "BROTHER SPEEDIO ALUMINUM", + "passed": false, + "tool_count": 28, + "sync_candidate_count": 21, + "issues": [ + { + "severity": "FAIL", + "rule": "REQUIRED_FIELD", + "tool_index": 4, + "tool_description": "5/8x4x1-3/4 in SQ. END", + "field": "product-id", + "message": "Missing required field 'product-id' — this tool cannot be deduped in Plex" + }, + { + "severity": "WARN", + "rule": "VENDOR_NOT_IN_PLEX", + "tool_index": 7, + "tool_description": "1/4 BALL END MILL", + "field": "vendor", + "value": "GARR TOOL", + "message": "Vendor 'GARR TOOL' not found in mdm/v1/suppliers — will fail on sync" + } + ], + "debug_trace": null +} +``` + +`debug_trace` is `null` unless `debug=True` is passed programmatically or a +future `?debug=1` query param is supported. + +--- + +## Core Data Structures + +### `ValidationMode` enum + +```python +class ValidationMode(Enum): + PRODUCTION = "production" # PASS/FAIL only + VERBOSE = "verbose" # + WARNs + DEBUG = "debug" # + field trace +``` + +### `ValidationIssue` dataclass + +```python +@dataclass +class ValidationIssue: + severity: Literal["FAIL", "WARN"] + rule: str # machine-readable rule ID (see Rule Table) + tool_index: int | None + tool_description: str | None + field: str | None + value: Any + message: str # human-readable +``` + +### `ValidationResult` dataclass + +```python +@dataclass +class ValidationResult: + library_name: str + passed: bool + tool_count: int + sync_candidate_count: int # count after filtering holders + probes + issues: list[ValidationIssue] + debug_trace: list[str] | None + + def summary(self) -> str: ... # single-line human-readable string + def to_dict(self) -> dict: ... # Flask JSON response +``` + +--- + +## Constants Block + +Define at the top of `validate_library.py`: + +```python +# ── Known tool types ───────────────────────────────────────────────────────── +KNOWN_TOOL_TYPES = { + "flat end mill", "bull nose end mill", "drill", + "face mill", "form mill", "slot mill", + "holder", "probe" +} + +# Types excluded from sync — identity only, not purchasable consumables +NON_SYNC_TYPES = {"holder", "probe"} + +# ── Geometry bounds ─────────────────────────────────────────────────────────── +# TODO (issue #XX): confirm real shop floor bounds with Shane before enabling +# range WARN rules. The nonpositive FAIL rules (DC <= 0, NOF <= 0) are always +# active regardless of these values. +DC_MIN = None # cutting diameter min, inches +DC_MAX = None # cutting diameter max, inches +OAL_MIN = None # overall length min, inches +OAL_MAX = None # overall length max, inches +NOF_MIN = None # number of flutes min +NOF_MAX = None # number of flutes max +``` + +When a bound is `None`, the corresponding range check is skipped entirely. +In debug mode, skipped checks are logged: +``` +[DEBUG] GEOMETRY_DC_RANGE skipped — DC_MIN/DC_MAX not set +``` + +--- + +## Rule Table + +Rules run in this order. **Library-level rules run first.** If any library-level +rule FAILs, per-tool rules are skipped — there is nothing safe to iterate. + +--- + +### Library-Level Rules + +| Rule ID | Severity | Condition | Message | +|---|---|---|---| +| `STRUCT_ROOT_KEY` | FAIL | `"data"` key missing from root | Root `"data"` key missing — not a valid Fusion tool library | +| `STRUCT_DATA_LIST` | FAIL | `data` is not a list | Root `"data"` is not a list | +| `STRUCT_EMPTY` | FAIL | `data` list is empty | Library contains zero entries | +| `SYNC_CANDIDATES_ZERO` | FAIL | After filtering `NON_SYNC_TYPES`, zero tools remain | No syncable tools after filtering — check type values | +| `DUPLICATE_GUID` | FAIL | Two entries share the same `guid` | Duplicate guid `{guid}` at indexes {i} and {j} | +| `DUPLICATE_PRODUCT_ID` | FAIL | Two sync-candidate entries share `product-id` | Duplicate product-id `{id}` at indexes {i} and {j} — upsert will collide | +| `CROSS_LIBRARY_DUPLICATE` | WARN | `product-id` already seen in a previously validated library (multi-library runs only) | product-id `{id}` also exists in `{other_library}` — check for cross-library collision | +| `UNKNOWN_TYPE_PRESENT` | WARN | A `type` value is not in `KNOWN_TOOL_TYPES` | Unknown type `"{type}"` at index {i} — will be included in sync unless filter is updated | + +--- + +### Per-Tool Rules + +Runs on sync candidates only (entries where `type` is not in `NON_SYNC_TYPES`). +Holders and probes are silently skipped. + +#### Required Field Rules + +| Rule ID | Severity | Field | Condition | +|---|---|---|---| +| `REQUIRED_FIELD` | FAIL | `guid` | missing or empty string | +| `REQUIRED_FIELD` | FAIL | `type` | missing or empty string | +| `REQUIRED_FIELD` | FAIL | `description` | missing or empty string | +| `REQUIRED_FIELD` | FAIL | `product-id` | missing or empty string | + +#### Vendor Rules + +| Rule ID | Severity | Condition | Message | +|---|---|---|---| +| `VENDOR_MISSING` | WARN | `vendor` key missing or empty string | Tool has no vendor — supplier linkage will fail on sync | +| `VENDOR_NOT_IN_PLEX` | WARN | vendor present but not matched in `mdm/v1/suppliers` (only when `use_api=True`) | Vendor `"{vendor}"` not found in Plex supplier master — will fail at sync time | + +In debug mode, `VENDOR_NOT_IN_PLEX` additionally logs: +- The full supplier name list it matched against +- The 3 closest names by edit distance (to catch `"GARR TOOL"` vs `"Garr Tool"`, + trailing spaces, abbreviation differences, etc.) + +#### Geometry Rules + +All geometry lives under `tool["geometry"]`. If `geometry` is absent entirely +on a sync candidate, that is its own WARN before any field-level checks run. + +| Rule ID | Severity | Field | Condition | Note | +|---|---|---|---|---| +| `GEOMETRY_MISSING` | WARN | `geometry` | key absent on sync candidate | Logged once per tool; field checks below are skipped | +| `GEOMETRY_DC_MISSING` | WARN | `geometry.DC` | key absent | | +| `GEOMETRY_DC_NONPOSITIVE` | FAIL | `geometry.DC` | `<= 0` | Hard rule — always active | +| `GEOMETRY_DC_RANGE` | WARN | `geometry.DC` | outside `[DC_MIN, DC_MAX]` | Skipped when either bound is `None` | +| `GEOMETRY_OAL_MISSING` | WARN | `geometry.OAL` | key absent | | +| `GEOMETRY_OAL_SHORTER_THAN_DC` | WARN | `geometry.OAL` vs `geometry.DC` | `OAL < DC` | Physically implausible — always active when both fields present | +| `GEOMETRY_OAL_RANGE` | WARN | `geometry.OAL` | outside `[OAL_MIN, OAL_MAX]` | Skipped when either bound is `None` | +| `GEOMETRY_NOF_MISSING` | WARN | `geometry.NOF` | key absent | | +| `GEOMETRY_NOF_NONPOSITIVE` | FAIL | `geometry.NOF` | `<= 0` | Hard rule — always active | +| `GEOMETRY_NOF_RANGE` | WARN | `geometry.NOF` | outside `[NOF_MIN, NOF_MAX]` | Skipped when either bound is `None` | + +#### Post-Process Rules + +| Rule ID | Severity | Field | Condition | +|---|---|---|---| +| `POSTPROCESS_NUMBER_MISSING` | WARN | `post-process.number` | `post-process` object absent or `number` key absent | +| `POSTPROCESS_NUMBER_NONPOSITIVE` | WARN | `post-process.number` | `<= 0` | + +--- + +## Supplier Lookup + +Implemented as a module-level cached function — hits the API once per process +run regardless of how many libraries are validated. + +```python +_supplier_cache: list[str] | None = None + +def _get_supplier_names(client: PlexClient, debug: bool = False) -> list[str]: + """ + Fetch supplier names from mdm/v1/suppliers. + Cached after first call. Returns empty list on API failure (non-fatal — + VENDOR_NOT_IN_PLEX checks are skipped if the supplier list cannot be loaded). + """ +``` + +**Matching strategy:** +1. Case-insensitive exact match +2. If no match and debug mode: log the 3 closest names by edit distance + +API failure during supplier fetch is non-fatal. Log a warning that vendor +checks were skipped and continue. Never abort the validation run because +the supplier endpoint was unreachable. + +--- + +## CLI Output Examples + +### Production — PASS + +``` +✓ BROTHER SPEEDIO ALUMINUM — 21 tools valid, ready to sync +``` + +### Production — FAIL + +``` +✗ BROTHER SPEEDIO ALUMINUM — FAILED (2 errors) + + [FAIL] REQUIRED_FIELD — tool 4 "5/8x4x1-3/4 in SQ. END" + Field: product-id + Missing required field 'product-id' — this tool cannot be deduped in Plex + + [FAIL] GEOMETRY_DC_NONPOSITIVE — tool 11 "1/4 BALL END MILL" + Field: geometry.DC + Cutting diameter must be > 0 (got 0.0) +``` + +### Verbose — FAIL with WARNs + +``` +✗ BROTHER SPEEDIO ALUMINUM — FAILED (2 errors, 1 warning) + + [FAIL] REQUIRED_FIELD — tool 4 "5/8x4x1-3/4 in SQ. END" + Field: product-id + Missing required field 'product-id' — this tool cannot be deduped in Plex + + [FAIL] GEOMETRY_DC_NONPOSITIVE — tool 11 "1/4 BALL END MILL" + Field: geometry.DC + Cutting diameter must be > 0 (got 0.0) + + [WARN] VENDOR_NOT_IN_PLEX — tool 7 "1/4 BALL END MILL" + Field: vendor + Value: "GARR TOOL" + Vendor not found in Plex supplier master — will fail at sync time +``` + +### Debug — adds full field trace per tool (truncated) + +``` + [DEBUG] tool 0 "5/8x4x1-3/4 in SQ. END" + guid .................. OK (a3f1...) + type .................. OK (flat end mill) + description ........... OK + product-id ............ OK (990910) + vendor ................ OK — matched "Harvey Tool" in supplier master + geometry.DC ........... OK (0.625) — range check skipped (bounds not set) + geometry.OAL .......... OK (4.0) + geometry.NOF .......... OK (4) + post-process.number ... OK (1) +``` + +In debug mode, vendor mismatch additionally prints: + +``` + [DEBUG] Supplier master (47 records): + Closest matches to "GARR TOOL": + 1. "Garr Tool Co." (edit distance 5) + 2. "GARR TOOLING INC" (edit distance 7) + 3. "GARR" (edit distance 9) +``` + +--- + +## Integration Into `tool_library_loader.py` + +Add a single call in `load_library()` after successful JSON parse, before +`return tools`: + +```python +# After: tools = raw.get("data") passes all existing checks +# Before: return tools + +from validate_library import validate_library, ValidationMode + +validation = validate_library( + tools=tools, + library_name=path.stem, + mode=ValidationMode.PRODUCTION, + use_api=False, # keep loader fast and offline-safe +) +if not validation.passed: + log.error("Validation failed for %s — sync aborted", path.name) + log.error(validation.summary()) + return None +``` + +--- + +## Integration Into `app.py` + +Add two routes: + +```python +@app.route('/api/fusion/validate', methods=['GET', 'POST']) +def api_fusion_validate(): + """ + GET — validate live files from ADC share (same source as /api/fusion/libraries) + POST — validate uploaded JSON file(s) without touching the share + + Always runs in VERBOSE mode (human is reading the response). + Returns ValidationResult.to_dict() per library. + """ +``` + +--- + +## GitHub Issues to Open + +| Issue | Title | +|---|---| +| #XX | `validate_library.py` — implement core engine + CLI | +| #XX | Geometry bounds — confirm DC / OAL / NOF ranges, enable range WARN rules | +| #XX | Vendor fuzzy matching — promote near-miss debug output to verbose mode | + +--- + +## Open Decisions + +| # | Decision | Status | +|---|---|---| +| 1 | Geometry bounds (DC / OAL / NOF) | **Blocked on Shane** — constants stubbed as `None` | +| 2 | Fuzzy vendor matching in verbose vs debug only | Debug only for now; revisit after first real sync run | +| 3 | `?debug=1` query param on Flask endpoint | Deferred — not in initial implementation | diff --git a/enrich.py b/enrich.py new file mode 100644 index 0000000..8e0c693 --- /dev/null +++ b/enrich.py @@ -0,0 +1,321 @@ +#!/usr/bin/env python +""" +enrich.py +Cross-reference shop tools against the vendor reference catalog +Grace Engineering — Datum project +============================================================= +Matches tools in the ``tools`` table that have empty/missing +``product_id`` against the ``reference_catalog`` table by +geometry fingerprint (type + cutting diameter + flute count). + +Usage +----- + # Preview matches (no writes) + py enrich.py --dry-run + + # Apply matches — writes product_id + vendor back to tools table + py enrich.py + + # Verbose logging + py enrich.py -v +""" +from __future__ import annotations + +import argparse +import logging +import os +import sys +import time +from pathlib import Path + +_PROJECT_ROOT = Path(__file__).resolve().parent +os.chdir(_PROJECT_ROOT) +if str(_PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(_PROJECT_ROOT)) + +import bootstrap # noqa: E402, F401 + +from supabase_client import SupabaseClient # noqa: E402 + +log = logging.getLogger("datum.enrich") + +# Geometry tolerance for floating-point matching (mm) +DC_TOLERANCE = 0.01 # 0.01mm ~ 0.0004" +NOF_TOLERANCE = 0.5 # flute count is integer, but stored as float + + +def find_tools_missing_product_id(client: SupabaseClient) -> list[dict]: + """Fetch tools where product_id is empty or blank.""" + resp = client.select( + "tools", + columns="id,fusion_guid,type,description,geo_dc,geo_nof,geo_oal,vendor,product_id,library_id", + filters={"or": "(product_id.eq.,product_id.is.null)"}, + ) + return resp + + +def find_reference_match( + client: SupabaseClient, + tool_type: str, + geo_dc: float | None, + geo_nof: float | None, +) -> dict | None: + """ + Find the best match in reference_catalog by (type, DC, NOF). + Returns the first match or None. + """ + if geo_dc is None or geo_nof is None: + return None + + dc_lo = geo_dc - DC_TOLERANCE + dc_hi = geo_dc + DC_TOLERANCE + nof_lo = geo_nof - NOF_TOLERANCE + nof_hi = geo_nof + NOF_TOLERANCE + + resp = client.select( + "reference_catalog", + columns="vendor,product_id,description,catalog_name,geo_dc,geo_nof,geo_oal", + filters={ + "type": f"eq.{tool_type}", + "geo_dc": f"gte.{dc_lo}", + "geo_dc": f"lte.{dc_hi}", + "geo_nof": f"gte.{nof_lo}", + "geo_nof": f"lte.{nof_hi}", + }, + limit=5, + ) + # PostgREST doesn't support duplicate keys in filters dict, + # so we use raw query params for range queries + return resp[0] if resp else None + + +def find_reference_match_raw( + client: SupabaseClient, + tool_type: str, + geo_dc: float | None, + geo_nof: float | None, +) -> dict | None: + """ + Find the best match using direct PostgREST range query. + """ + if geo_dc is None or geo_nof is None: + return None + + dc_lo = geo_dc - DC_TOLERANCE + dc_hi = geo_dc + DC_TOLERANCE + nof_val = round(geo_nof) + + # Build query string manually for range filters + import requests + + url = client._table_url("reference_catalog") + params = { + "select": "vendor,product_id,description,catalog_name,geo_dc,geo_nof,geo_oal", + "type": f"eq.{tool_type}", + "geo_dc": f"gte.{dc_lo}", + "geo_nof": f"eq.{nof_val}", + "limit": "5", + "order": "geo_dc.asc", + } + + resp = client._session.get( + url, + params=params, + headers={ + **client._session.headers, + "Range": "0-4", + }, + timeout=client.timeout, + ) + + if not resp.ok: + return None + + results = resp.json() + if not results: + # Retry with just dc range (some tools have wrong NOF in Fusion) + return None + + # Filter by DC upper bound (params only had gte, add lte check here) + filtered = [r for r in results if r.get("geo_dc", 0) <= dc_hi] + return filtered[0] if filtered else None + + +# ───────────────────────────────────────────── +# Upstream enrichment (raw Fusion JSON, pre-validation) +# ───────────────────────────────────────────── +INCHES_TO_MM = 25.4 + + +def enrich_raw_tools( + tools: list[dict], + client: SupabaseClient, +) -> dict[str, int]: + """ + Enrich raw Fusion tool dicts in-place before validation. + + For each tool missing ``product-id``, queries the reference_catalog + by (type, DC, NOF) geometry match and fills in ``product-id`` and + ``vendor`` from the best match. + + Parameters + ---------- + tools : list[dict] + Raw Fusion JSON tool dicts (the "data" array). Modified in-place. + client : SupabaseClient + Client with access to reference_catalog table. + + Returns + ------- + dict + ``{"enriched": N, "skipped": M}`` counts. + """ + enriched = 0 + skipped = 0 + + for t in tools: + # Skip holders/probes and tools that already have product-id + if t.get("type") in ("holder", "probe"): + continue + pid = t.get("product-id", "") + if pid and str(pid).strip(): + continue + + # Normalize DC to mm for reference catalog lookup + geo = t.get("geometry") or {} + dc_raw = geo.get("DC") + nof_raw = geo.get("NOF") + unit = t.get("unit", "inches") + + if dc_raw is None or nof_raw is None: + skipped += 1 + continue + + try: + dc_mm = float(dc_raw) + if isinstance(unit, str) and unit.lower() == "inches": + dc_mm *= INCHES_TO_MM + nof = float(nof_raw) + except (TypeError, ValueError): + skipped += 1 + continue + + ref = find_reference_match_raw(client, t.get("type", ""), dc_mm, nof) + if ref: + t["product-id"] = ref["product_id"] + if ref.get("vendor") and not t.get("vendor"): + t["vendor"] = ref["vendor"] + enriched += 1 + log.info( + " ENRICH: %s -> %s %s", + t.get("description", t.get("type", "")), + ref["vendor"], + ref["product_id"], + ) + else: + skipped += 1 + + return {"enriched": enriched, "skipped": skipped} + + +def enrich_tools( + client: SupabaseClient, + *, + dry_run: bool = False, +) -> dict[str, int]: + """ + Find tools missing product_id and try to match them against + the reference catalog. Returns counts. + """ + missing = find_tools_missing_product_id(client) + log.info("Found %d tools with missing product_id", len(missing)) + + matched = 0 + unmatched = 0 + + for tool in missing: + tool_type = tool.get("type", "") + geo_dc = tool.get("geo_dc") + geo_nof = tool.get("geo_nof") + desc = tool.get("description", "") + tool_id = tool["id"] + + ref = find_reference_match_raw(client, tool_type, geo_dc, geo_nof) + + if ref: + matched += 1 + log.info( + "MATCH: %s (DC=%.2f NOF=%s) -> %s %s (%s)", + desc or tool_type, + geo_dc or 0, + int(geo_nof) if geo_nof else "?", + ref["vendor"], + ref["product_id"], + ref["catalog_name"], + ) + + if not dry_run: + client.update( + "tools", + {"product_id": ref["product_id"], "vendor": ref["vendor"]}, + filters={"id": f"eq.{tool_id}"}, + ) + else: + unmatched += 1 + log.info( + "NO MATCH: %s (DC=%.2f NOF=%s)", + desc or tool_type, + geo_dc or 0, + int(geo_nof) if geo_nof else "?", + ) + + return {"matched": matched, "unmatched": unmatched, "total": len(missing)} + + +def main(argv: list[str] | None = None) -> int: + parser = argparse.ArgumentParser( + description="Enrich shop tools with product IDs from vendor reference catalog", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Preview matches only, no writes to tools table", + ) + parser.add_argument( + "-v", "--verbose", + action="store_true", + help="Debug-level logging", + ) + args = parser.parse_args(argv) + + logging.basicConfig( + level=logging.DEBUG if args.verbose else logging.INFO, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + + client = SupabaseClient() + start = time.monotonic() + + counts = enrich_tools(client, dry_run=args.dry_run) + + elapsed = time.monotonic() - start + log.info("=" * 60) + log.info( + "Enrichment %s", + "preview (dry-run)" if args.dry_run else "complete", + ) + log.info( + " %d matched, %d unmatched out of %d", + counts["matched"], + counts["unmatched"], + counts["total"], + ) + log.info(" Elapsed: %.1fs", elapsed) + log.info("=" * 60) + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/ingest_reference.py b/ingest_reference.py new file mode 100644 index 0000000..3206024 --- /dev/null +++ b/ingest_reference.py @@ -0,0 +1,293 @@ +#!/usr/bin/env python +""" +ingest_reference.py +Load vendor catalog JSON files into the reference_catalog table +Grace Engineering — Datum project +============================================================= +Reads Fusion 360 tool library JSON files (the large hsmtools +vendor catalogs) and upserts them into the ``reference_catalog`` +Supabase table for geometry-based cross-referencing. + +Usage +----- + # Ingest all catalogs from a directory + py ingest_reference.py C:\\Users\\shanewaid\\Downloads + + # Ingest specific files + py ingest_reference.py "Harvey Tool-End Mills (1).json" "Garr Tool-Garr Tool.json" + + # Dry run — parse and count, no Supabase writes + py ingest_reference.py --dry-run C:\\Users\\shanewaid\\Downloads +""" +from __future__ import annotations + +import argparse +import json +import logging +import os +import sys +import time +from pathlib import Path + +_PROJECT_ROOT = Path(__file__).resolve().parent +os.chdir(_PROJECT_ROOT) +if str(_PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(_PROJECT_ROOT)) + +import bootstrap # noqa: E402, F401 + +from supabase_client import SupabaseClient # noqa: E402 +from sync_supabase import INCHES_TO_MM, EXCLUDED_TYPES # noqa: E402 + +log = logging.getLogger("datum.ingest_reference") + +# Minimum file size to consider (skip empty / tiny files) +MIN_FILE_SIZE = 1024 # 1 KB + +# Batch size for Supabase upserts (PostgREST has payload limits) +BATCH_SIZE = 500 + + +def _normalize_dc(value, unit: str) -> float | None: + """Normalize cutting diameter to mm.""" + if value is None: + return None + try: + v = float(value) + except (TypeError, ValueError): + return None + if unit.lower() == "inches": + v *= INCHES_TO_MM + return round(v, 6) + + +def _maybe_float(value) -> float | None: + if value is None: + return None + try: + return float(value) + except (TypeError, ValueError): + return None + + +def build_reference_rows( + catalog_name: str, + tools: list[dict], + unit_default: str = "inches", +) -> list[dict]: + """ + Convert raw Fusion tool dicts into reference_catalog rows. + Skips holders, probes, and tools without product-id. + """ + rows = [] + for t in tools: + tool_type = t.get("type", "") + if tool_type in EXCLUDED_TYPES: + continue + + pid = t.get("product-id", "") + if not pid or not str(pid).strip(): + continue # can't be a reference without a product-id + + unit = t.get("unit") or unit_default + geo = t.get("geometry") or {} + is_inches = isinstance(unit, str) and unit.lower() == "inches" + + row = { + "catalog_name": catalog_name, + "vendor": (t.get("vendor") or "").strip(), + "product_id": str(pid).strip(), + "description": (t.get("description") or "").strip(), + "type": tool_type, + "geo_dc": _normalize_dc(geo.get("DC"), unit) if is_inches + else _maybe_float(geo.get("DC")), + "geo_nof": _maybe_float(geo.get("NOF")), + "geo_oal": _normalize_dc(geo.get("OAL"), unit) if is_inches + else _maybe_float(geo.get("OAL")), + "geo_lcf": _normalize_dc(geo.get("LCF"), unit) if is_inches + else _maybe_float(geo.get("LCF")), + "geo_sig": _maybe_float(geo.get("SIG")), + "unit_original": unit, + } + rows.append(row) + + return rows + + +def ingest_catalog_file( + path: Path, + *, + client: SupabaseClient | None = None, + dry_run: bool = False, +) -> dict[str, int]: + """ + Load one vendor catalog JSON and upsert into reference_catalog. + + Returns {"tools": N, "skipped": M} counts. + """ + catalog_name = path.stem + # Strip trailing copy numbers: "Harvey Tool-End Mills (1)" -> "Harvey Tool-End Mills" + for suffix in (" (1)", " (2)", " (3)"): + if catalog_name.endswith(suffix): + catalog_name = catalog_name[: -len(suffix)] + break + + with open(path, "r", encoding="utf-8") as f: + raw = json.load(f) + + tools = raw.get("data", []) + if not isinstance(tools, list): + log.error("No 'data' array in %s", path.name) + return {"tools": 0, "skipped": 0} + + rows = build_reference_rows(catalog_name, tools) + skipped = len(tools) - len(rows) + + log.info( + "%s: %d tools -> %d reference rows (%d skipped)", + catalog_name, + len(tools), + len(rows), + skipped, + ) + + if dry_run or not rows: + return {"tools": len(rows), "skipped": skipped} + + # Upsert in batches + total_upserted = 0 + for i in range(0, len(rows), BATCH_SIZE): + batch = rows[i : i + BATCH_SIZE] + client.upsert( + "reference_catalog", + batch, + on_conflict="catalog_name,product_id", + ) + total_upserted += len(batch) + if i + BATCH_SIZE < len(rows): + log.info(" ... %d / %d upserted", total_upserted, len(rows)) + + return {"tools": total_upserted, "skipped": skipped} + + +def find_catalog_files(paths: list[str]) -> list[Path]: + """ + Resolve CLI arguments to a list of JSON files. + Accepts files or directories (scans for large .json files). + """ + result = [] + for p in paths: + path = Path(p) + if path.is_file() and path.suffix == ".json": + if path.stat().st_size >= MIN_FILE_SIZE: + result.append(path) + elif path.is_dir(): + for f in sorted(path.glob("*.json")): + if f.stat().st_size >= MIN_FILE_SIZE: + result.append(f) + return result + + +# Known hsmtools vendor catalog patterns (to filter out shop-specific files) +VENDOR_CATALOG_PATTERNS = [ + "Harvey Tool", + "Helical Solutions", + "Garr Tool", + "Guhring", + "Sandvik", + "Delta Tools", + "XEBEC", + "Kennametal", + "OSG", + "Dormer", + "Iscar", + "Mitsubishi", + "Walter", + "Seco", + "YG-1", + "Kyocera", + "Micro 100", + "Widia", + "Emuge", + "Nachi", + "Multi_Vendor", +] + + +def is_vendor_catalog(path: Path) -> bool: + """Check if a file looks like a vendor catalog (not a shop-specific library).""" + name = path.stem + return any(pat.lower() in name.lower() for pat in VENDOR_CATALOG_PATTERNS) + + +def main(argv: list[str] | None = None) -> int: + parser = argparse.ArgumentParser( + description="Ingest vendor tool catalogs into Supabase reference_catalog", + ) + parser.add_argument( + "paths", + nargs="+", + help="JSON files or directories to ingest", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Parse and count only, no Supabase writes", + ) + parser.add_argument( + "--all", + action="store_true", + help="Include all JSON files (not just recognized vendor catalogs)", + ) + parser.add_argument( + "-v", "--verbose", + action="store_true", + help="Debug-level logging", + ) + args = parser.parse_args(argv) + + logging.basicConfig( + level=logging.DEBUG if args.verbose else logging.INFO, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + ) + + files = find_catalog_files(args.paths) + + if not args.all: + files = [f for f in files if is_vendor_catalog(f)] + + if not files: + log.error("No catalog files found in %s", args.paths) + return 2 + + log.info("Found %d catalog file(s) to ingest", len(files)) + + client = None if args.dry_run else SupabaseClient() + start = time.monotonic() + + total_tools = 0 + total_skipped = 0 + errors = 0 + + for f in files: + try: + counts = ingest_catalog_file(f, client=client, dry_run=args.dry_run) + total_tools += counts["tools"] + total_skipped += counts["skipped"] + except Exception as e: + log.error("Failed to ingest %s: %s", f.name, e) + errors += 1 + + elapsed = time.monotonic() - start + log.info("=" * 60) + log.info("Reference catalog ingest complete") + log.info(" %d tools ingested, %d skipped, %d errors", total_tools, total_skipped, errors) + log.info(" Elapsed: %.1fs", elapsed) + log.info("=" * 60) + + return 1 if errors else 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/plex_api.py b/plex_api.py index 38276f2..21f419c 100644 --- a/plex_api.py +++ b/plex_api.py @@ -7,6 +7,12 @@ Rate: 200 calls/minute """ +# bootstrap MUST be imported before anything reads PLEX_API_KEY/SECRET from +# os.environ — it injects values from .env.local (if present) so the dev +# loop doesn't require setting env vars in every shell. Real shell env +# always wins via setdefault semantics. +import bootstrap # noqa: F401 + import requests import json import csv @@ -15,13 +21,42 @@ from datetime import datetime # ───────────────────────────────────────────── -# CONFIGURATION — fill these in +# CONFIGURATION # ───────────────────────────────────────────── -API_KEY = "k3SmLW3y3mhqJiG6osixbYUmiPsHfB51" # from developers.plex.com → My Apps -TENANT_ID = "a6af9c99-bce5-4938-a007-364dc5603d08" # leave blank for default tenant (your PCN) -BASE_URL = "https://connect.plex.com" -TEST_URL = "https://test.connect.plex.com" -USE_TEST = False # flip to True to hit test environment first +# All values come from environment variables (loaded via bootstrap.py +# from .env.local). Credentials are never hardcoded or committed. +# +# PLEX_API_KEY — Consumer Key from the Plex Developer Portal +# PLEX_API_SECRET — Consumer Secret (currently optional — Plex +# gateway authenticates on key alone) +# PLEX_TENANT_ID — Target tenant UUID. Default is the Grace +# Engineering production tenant. Tenant IDs +# are not secrets, safe to commit as defaults. +# PLEX_USE_TEST — "1" to hit test.connect.plex.com instead of +# connect.plex.com (production). Default is False +# because the current Fusion2Plex app only exists +# in the production environment. +# +# History note: an earlier version of this file hardcoded an old +# Consumer Key and the wrong Grace UUID (a6af9c99-...). Both are dead. +# The verified-working configuration is what's defaulted below. +GRACE_TENANT_ID = "58f781ba-1691-4f32-b1db-381cdb21300c" + +API_KEY = os.environ.get("PLEX_API_KEY", "") +API_SECRET = os.environ.get("PLEX_API_SECRET", "") +TENANT_ID = os.environ.get("PLEX_TENANT_ID", GRACE_TENANT_ID) + +BASE_URL = "https://connect.plex.com" +TEST_URL = "https://test.connect.plex.com" +# PLEX_BASE_URL — explicit override for the Plex base URL (e.g. the local +# mock at tools/plex_mock/server.py running on localhost:8080). Empty +# string means "no override"; BASE_URL / TEST_URL selection applies. +# Used by the write-validation workflow in issue #92 so the sync can +# dress-rehearse against a fake-Plex without touching connect.plex.com. +OVERRIDE_URL = os.environ.get("PLEX_BASE_URL", "").strip() +USE_TEST = os.environ.get("PLEX_USE_TEST", "").strip().lower() in ( + "1", "true", "yes", "on", "enabled", +) OUTPUT_DIR = "C:/projects/plex-api/outputs" TOOL_LIB_DIR = "Z:\\Engineering\\Tooling\\Fusion_Libraries" # Mapped drive path containing JSON files @@ -30,13 +65,25 @@ # BASE CLIENT # ───────────────────────────────────────────── class PlexClient: - def __init__(self, api_key, tenant_id="", use_test=False): - self.base = TEST_URL if use_test else BASE_URL + def __init__(self, api_key, api_secret="", tenant_id="", use_test=False, base_url=None): + # Resolution order: + # 1. explicit base_url kwarg (tests, ad-hoc scripts) + # 2. PLEX_BASE_URL env var (deployment-time override — the mock) + # 3. TEST_URL if use_test else BASE_URL (original behavior) + explicit = (base_url or "").strip() + if explicit: + self.base = explicit + elif OVERRIDE_URL: + self.base = OVERRIDE_URL + else: + self.base = TEST_URL if use_test else BASE_URL self.headers = { "X-Plex-Connect-Api-Key": api_key, "Content-Type": "application/json", "Accept": "application/json", } + if api_secret: + self.headers["X-Plex-Connect-Api-Secret"] = api_secret if tenant_id: self.headers["X-Plex-Connect-Tenant-Id"] = tenant_id @@ -58,20 +105,87 @@ def _throttle(self): self._window_start = time.time() def get(self, collection, version, resource, params=None): - """GET request with auto-throttling and error handling""" + """ + GET request with auto-throttling. + + Returns the parsed JSON body on success, or None on any failure. + Backward-compatible legacy interface — callers that need to know + WHY a request failed (auth error vs network error vs 404 vs JSON + parse failure) should use ``get_envelope()`` instead. + """ + env = self.get_envelope(collection, version, resource, params) + if not env["ok"]: + # Preserve the historical "log to stdout" behaviour for the + # legacy callers, then collapse to None. + print(f" HTTP Error {env['status']}: {env['url']}") + if env["body"] is not None: + snippet = str(env["body"])[:300] + print(f" Response: {snippet}") + return None + return env["body"] + + def get_envelope(self, collection, version, resource, params=None): + """ + GET request returning a structured envelope. + + Unlike ``get()`` (which returns parsed JSON on success and None on + any failure), this method returns a dict so callers can distinguish: + + - successful empty / null responses + - authentication errors (401, 403) + - other HTTP errors (404, 5xx, ...) + - network failures (DNS, timeout, connection refused, ...) + - JSON parse failures (response was text/html instead of JSON) + + Returns + ------- + dict + { + "ok": bool, # True iff response was 2xx + "status": int, # HTTP status; 0 if no response + "reason": str, # HTTP reason phrase or + # exception class name + "body": Any, # parsed JSON if possible, + # else text, else None + "elapsed_ms": int, + "url": str, + "error": str | None, # human-readable error if not ok + } + """ self._throttle() url = f"{self.base}/{collection}/{version}/{resource}" + started = time.perf_counter() + try: r = requests.get(url, headers=self.headers, params=params, timeout=30) - r.raise_for_status() - return r.json() - except requests.exceptions.HTTPError as e: - print(f" HTTP Error {r.status_code}: {url}") - print(f" Response: {r.text[:300]}") - return None - except Exception as e: - print(f" Error: {e}") - return None + except requests.exceptions.RequestException as e: + return { + "ok": False, + "status": 0, + "reason": e.__class__.__name__, + "body": None, + "elapsed_ms": int((time.perf_counter() - started) * 1000), + "url": url, + "error": str(e), + } + + elapsed_ms = int((time.perf_counter() - started) * 1000) + + # Try JSON first; fall back to text; fall back to None. + try: + body = r.json() + except ValueError: + body = r.text or None + + return { + "ok": r.ok, + "status": r.status_code, + "reason": r.reason or "", + "body": body, + "elapsed_ms": elapsed_ms, + "url": r.url, + "error": None if r.ok else f"HTTP {r.status_code} {r.reason}".strip(), + } def get_paginated(self, collection, version, resource, params=None, limit=100): """GET all pages of a paginated endpoint""" @@ -161,7 +275,7 @@ def extract_purchase_orders(client, supplier=None, date_from=None): if results: out = os.path.join(OUTPUT_DIR, "plex_purchase_orders.csv") write_csv(results, out) - print(f" Saved {len(results)} POs → {out}") + print(f" Saved {len(results)} POs -> {out}") return results @@ -180,7 +294,7 @@ def extract_parts(client, part_type=None): if results: out = os.path.join(OUTPUT_DIR, "plex_parts.csv") write_csv(results, out) - print(f" Saved {len(results)} parts → {out}") + print(f" Saved {len(results)} parts -> {out}") return results @@ -195,7 +309,7 @@ def extract_workcenters(client): if results: out = os.path.join(OUTPUT_DIR, "plex_workcenters.csv") write_csv(results, out) - print(f" Saved {len(results)} workcenters → {out}") + print(f" Saved {len(results)} workcenters -> {out}") return results @@ -210,10 +324,81 @@ def extract_operations(client): if results: out = os.path.join(OUTPUT_DIR, "plex_operations.csv") write_csv(results, out) - print(f" Saved {len(results)} operations → {out}") + print(f" Saved {len(results)} operations -> {out}") return results +# Category strings used by Plex Grace's supply-items records to identify +# cutting tools and inserts. Verified empirically against the live API +# on 2026-04-07. There are 1,109 such records on the Grace tenant. +TOOLING_CATEGORY = "Tools & Inserts" + + +def extract_supply_items(client, category=None): + """ + Pull supply-items from Plex and (by default) filter to cutting tools. + + Issue #2 — read the baseline tooling inventory from Plex. + + Plex Grace stores cutting tools and inserts as ``supply-items`` under + ``inventory/v1/inventory-definitions/supply-items``, NOT under + ``mdm/v1/parts`` (which is finished products). The schema is + identity-only — vendor part number, description, category, group — + no geometry. Geometry stays in Fusion as the source of truth. + + Server-side query filters on this endpoint are silently ignored, so + we always pull the full set (~614 KB / 2,516 records on Grace) and + filter client-side. + + Parameters + ---------- + client : PlexClient + category : str | None + If provided, keep only records whose ``category`` matches. + Defaults to ``"Tools & Inserts"``. Pass ``""`` (empty string) + to disable the filter and return everything. + + Returns + ------- + list[dict] | None + The matching supply-item records, or None on a network/auth + failure. Records are also written to + ``outputs/plex_supply_items.csv`` for diff/snapshot use. + """ + if category is None: + category = TOOLING_CATEGORY + + print("\nExtracting Supply Items...") + raw = client.get("inventory", "v1", "inventory-definitions/supply-items") + + if raw is None: + print(" No response — credentials, network, or subscription issue") + return None + + # The response shape is a bare list of dicts (verified empirically) + if isinstance(raw, dict): + records = raw.get("data") or raw.get("items") or raw.get("rows") or [] + else: + records = raw or [] + + total = len(records) + + # Client-side filter + if category: + records = [r for r in records if r.get("category") == category] + print(f" Pulled {total} supply-items, filtered to {len(records)} " + f"with category={category!r}") + else: + print(f" Pulled {total} supply-items (no filter)") + + if records: + out = os.path.join(OUTPUT_DIR, "plex_supply_items.csv") + write_csv(records, out) + print(f" Saved {len(records)} supply-items -> {out}") + + return records + + # ───────────────────────────────────────────── # UTILITY # ───────────────────────────────────────────── @@ -271,18 +456,18 @@ def discover_all(client): status = r.status_code note = "" if status == 200: - note = "✅ Available" + note = "[OK] Available" elif status == 401: - note = "❌ Auth error" + note = "[ERR] Auth error" elif status == 403: - note = "🔒 Not subscribed" + note = "[LOCK] Not subscribed" elif status == 404: - note = "❓ Not found" + note = "[?] Not found" else: - note = f"⚠️ HTTP {status}" + note = f"[!] HTTP {status}" except Exception as e: status = 0 - note = f"❌ Exception: {e}" + note = f"[ERR] Exception: {e}" print(f" {note:25s} {collection}/{version}/{resource}") report.append({ @@ -296,7 +481,7 @@ def discover_all(client): out = os.path.join(OUTPUT_DIR, "plex_api_discovery.csv") write_csv(report, out) - print(f"\nDiscovery report saved → {out}") + print(f"\nDiscovery report saved -> {out}") return report @@ -335,24 +520,38 @@ def explore_parts(client): os.makedirs(os.path.dirname(out), exist_ok=True) with open(out, "w", encoding="utf-8") as f: json.dump(data, f, indent=2) - print(f"\n Full response saved → {out}") + print(f"\n Full response saved -> {out}") return data if __name__ == "__main__": + if not API_KEY: + raise SystemExit( + "Missing PLEX_API_KEY. Set it in the environment or in .env.local." + ) + client = PlexClient( api_key=API_KEY, + api_secret=API_SECRET, tenant_id=TENANT_ID, use_test=USE_TEST, ) print(f"Plex API Client — {'TEST' if USE_TEST else 'PRODUCTION'}") print(f"Base URL: {client.base}") - print(f"Key: {API_KEY[:8]}{'*' * 20}") + print(f"Tenant: {TENANT_ID or '(default)'}") + print(f"Key: {API_KEY[:8]}{'*' * 20}") + print(f"Secret: {'set' if API_SECRET else '(unset — Plex authenticates on key alone)'}") + + if not USE_TEST: + print() + print("WARNING: Connected to PRODUCTION Plex environment.") + print(" Reads are safe. Writes are blocked at the proxy unless") + print(" PLEX_ALLOW_WRITES=1 is also set in the environment.") # ── Focus: Parts endpoint exploration - explore_parts(client) + # explore_parts(client) # NOTE: pulls 19 MB unfiltered — leave commented # ── Other exploration (uncomment as needed) # discover_all(client) diff --git a/plex_diagnostics.py b/plex_diagnostics.py new file mode 100644 index 0000000..f787079 --- /dev/null +++ b/plex_diagnostics.py @@ -0,0 +1,254 @@ +""" +plex_diagnostics.py +Plex Connect — diagnostic checks +================================ +Small suite of read-only checks against the Plex API to verify connectivity, +authentication, and tenant routing. Used as a sanity layer before any sync +work and as the visible "is the right tenant connected?" indicator in the UI. + +All functions are read-only and safe to run against any tenant — including +G5, where we have read access only. +""" + +from typing import Any + +# ───────────────────────────────────────────── +# Known tenants +# Tenant IDs are not secrets — committing them is fine. These labels are +# used to make the whoami report human-readable. +# +# History: an earlier version of BRIEFING.md listed a different Grace UUID +# (a6af9c99-bce5-4938-a007-364dc5603d08). That value is dead — verified +# empirically against the live API. The real Grace tenant ID is the one +# below, which the Plex API itself returns when you GET mdm/v1/tenants +# with the Fusion2Plex Consumer Key. The old UUID is kept here labeled +# "Grace (stale)" so anyone hitting it gets a clear signal. +# ───────────────────────────────────────────── +GRACE_TENANT_ID = "58f781ba-1691-4f32-b1db-381cdb21300c" # verified Apr 2026 +GRACE_OLD_TENANT_ID = "a6af9c99-bce5-4938-a007-364dc5603d08" # dead, kept for diagnostics +G5_TENANT_ID = "b406c8c4-cef0-4d62-862c-1758b702cd02" + +KNOWN_TENANTS = { + GRACE_TENANT_ID: "Grace Engineering", + GRACE_OLD_TENANT_ID: "Grace (stale UUID — replace with verified one)", + G5_TENANT_ID: "G5", +} + + +# ───────────────────────────────────────────── +# Raw endpoint wrappers +# ───────────────────────────────────────────── +def list_tenants(client) -> Any: + """ + GET /mdm/v1/tenants + + Returns the list of tenants visible to the active credential. + For a correctly-scoped credential this is typically a single tenant + (the one your API key is bound to). Useful for confirming which + tenant the credential actually lands on. + """ + return client.get("mdm", "v1", "tenants") + + +def get_tenant(client, tenant_id: str) -> Any: + """ + GET /mdm/v1/tenants/{id} + + Returns the full record for a specific tenant. 404 if the tenant + does not exist or is not visible to the credential. + """ + return client.get("mdm", "v1", f"tenants/{tenant_id}") + + +# ───────────────────────────────────────────── +# Composite check — the main diagnostic +# ───────────────────────────────────────────── +def tenant_whoami(client, configured_tenant_id: str = "") -> dict: + """ + Composite tenant diagnostic. + + Calls list_tenants() and (if a configured ID is provided) get_tenant(), + then compares the visible tenant(s) against the known Grace and G5 UUIDs + so the UI can show a clear "is this the right tenant?" status. + + Uses ``client.get_envelope()`` so HTTP errors (401, 403, 404, 5xx) and + network failures surface as distinct ``match`` values instead of being + swallowed into ``no_data``. + + Returns a structured report: + { + "configured_tenant_id": "", + "configured_tenant_label": "Grace Engineering" | "G5" | "unknown", + "visible_tenants": [{id, code, name, label}, ...], + "list_tenants_raw": , + "list_tenants_envelope": {ok, status, reason, elapsed_ms, error}, + "get_tenant_raw": , + "match": "grace" | "g5" | "configured" | + "other" | "no_data" | + "auth_failed" | "request_failed", + "summary": "", + } + """ + report: dict = { + "configured_tenant_id": configured_tenant_id or "", + "configured_tenant_label": KNOWN_TENANTS.get(configured_tenant_id, "unknown"), + "visible_tenants": [], + "list_tenants_raw": None, + "list_tenants_envelope": None, + "get_tenant_raw": None, + "match": "no_data", + "summary": "", + } + + # ── Step 1: list_tenants via get_envelope so HTTP errors surface ──── + list_env = client.get_envelope("mdm", "v1", "tenants") + report["list_tenants_envelope"] = { + "ok": list_env["ok"], + "status": list_env["status"], + "reason": list_env["reason"], + "elapsed_ms": list_env["elapsed_ms"], + "error": list_env["error"], + } + report["list_tenants_raw"] = list_env["body"] + + if not list_env["ok"]: + status = list_env["status"] + if status in (401, 403): + report["match"] = "auth_failed" + report["summary"] = ( + f"[ERROR] list_tenants returned HTTP {status} {list_env['reason']}. " + f"Check that PLEX_API_KEY and PLEX_API_SECRET are valid in .env.local " + f"or your shell environment. Underlying error: {list_env['error']}" + ) + elif status == 0: + report["match"] = "request_failed" + report["summary"] = ( + f"[ERROR] list_tenants could not reach Plex: {list_env['error']}. " + f"Check network connectivity and that {client.base} is reachable." + ) + else: + report["match"] = "request_failed" + report["summary"] = ( + f"[ERROR] list_tenants returned HTTP {status} {list_env['reason']}: " + f"{list_env['error']}" + ) + return report + + listed = list_env["body"] + + # Normalize the response. Plex sometimes wraps lists in {data|items|rows}. + if isinstance(listed, list): + items = listed + elif isinstance(listed, dict): + items = ( + listed.get("items") + or listed.get("data") + or listed.get("rows") + or [listed] # single tenant returned as a bare object + ) + else: + items = [] + + visible: list[dict] = [] + for t in items: + if not isinstance(t, dict): + continue + tid = t.get("id") or t.get("tenantId") or t.get("Id") + visible.append({ + "id": tid, + "code": t.get("code") or t.get("Code"), + "name": t.get("name") or t.get("Name"), + "label": KNOWN_TENANTS.get(tid, "unknown"), + }) + report["visible_tenants"] = visible + + # ── Step 2: get_tenant for the configured ID ──────────────── + if configured_tenant_id: + report["get_tenant_raw"] = get_tenant(client, configured_tenant_id) + + # ── Step 3: match logic ───────────────────── + visible_ids = {t["id"] for t in visible if t.get("id")} + + if not visible_ids: + report["match"] = "no_data" + report["summary"] = ( + "list_tenants returned no data — the response was empty or " + "contained no parseable tenant IDs. Check the raw response " + "in this report." + ) + return report + + if GRACE_TENANT_ID in visible_ids: + report["match"] = "grace" + report["summary"] = ( + "[OK] Connected to Grace Engineering. Tenant routing is resolved — " + "you may flip TENANT_ID in plex_api.py to the Grace UUID and " + "begin write-path testing." + ) + return report + + if G5_TENANT_ID in visible_ids: + report["match"] = "g5" + report["summary"] = ( + "[WARN] Connected to G5 (read-only, another company's data). " + "Awaiting IT (Courtney) to complete tenant routing for Grace. " + "All writes are prohibited until this resolves — see issue #1." + ) + return report + + if configured_tenant_id and configured_tenant_id in visible_ids: + report["match"] = "configured" + report["summary"] = ( + f"Connected to the configured tenant " + f"({report['configured_tenant_label']}), which is neither " + f"Grace nor G5. Verify this is intentional." + ) + return report + + report["match"] = "other" + report["summary"] = ( + "Connected to an unrecognized tenant. Inspect visible_tenants in " + "this report and confirm the credential routing is what you expect." + ) + return report + + +# ───────────────────────────────────────────── +# Standalone test +# ───────────────────────────────────────────── +if __name__ == "__main__": + import json + import sys + + # Force UTF-8 stdout so em-dashes / brackets in summary strings don't + # blow up on a Windows cp1252 console. + try: + sys.stdout.reconfigure(encoding="utf-8") + except Exception: + pass + + from plex_api import PlexClient, API_KEY, API_SECRET, TENANT_ID, USE_TEST + + if not API_KEY or not API_SECRET: + raise SystemExit( + "Missing credentials. Set PLEX_API_KEY and PLEX_API_SECRET " + "environment variables before running this diagnostic." + ) + + client = PlexClient( + api_key=API_KEY, + api_secret=API_SECRET, + tenant_id=TENANT_ID, + use_test=USE_TEST, + ) + + print(f"Plex Diagnostics — {'TEST' if USE_TEST else 'PRODUCTION'}") + print(f"Base URL: {client.base}") + print(f"Configured TENANT_ID: {TENANT_ID}\n") + + report = tenant_whoami(client, TENANT_ID) + + print("─" * 60) + print(report["summary"]) + print("─" * 60) + print(json.dumps(report, indent=2, default=str)) diff --git a/populate_supply_items.py b/populate_supply_items.py new file mode 100644 index 0000000..7a65d78 --- /dev/null +++ b/populate_supply_items.py @@ -0,0 +1,285 @@ +#!/usr/bin/env python +""" +populate_supply_items.py +Compute Plex supply-item payloads from ``tools`` and stage into +``plex_supply_items``. +Grace Engineering -- Datum project -- Issue #79 +============================================================= +For every row in ``tools`` with a non-empty ``product_id`` (the eventual +``supplyItemNumber`` on the Plex wire), compute the 6-field payload and +upsert into ``plex_supply_items``. **No Plex HTTP calls** — this is +pure Fusion → Supabase staging. + +The three DB-defaulted columns (``category``, ``inventory_unit``, +``item_type``) are omitted from the upsert so the migration defaults +govern. Only the three derived columns are written: + + description <- tools.description + item_group <- mapped from tools.type (default "Machining") + supply_item_number <- tools.product_id + +``plex_id`` and ``posted_to_plex_at`` stay NULL — the writeback worker +(#3) fills those after a successful POST to Plex. + +Usage +----- + py populate_supply_items.py # run the populate + py populate_supply_items.py --dry-run # compute, no writes + py populate_supply_items.py -v # debug logging + +Exit codes +---------- + 0 All eligible tools staged + 1 One or more rows failed (partial) + 2 Fatal: config missing, no tools, etc. +""" +from __future__ import annotations + +import argparse +import logging +import sys +import time +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any + +_PROJECT_ROOT = Path(__file__).resolve().parent +if str(_PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(_PROJECT_ROOT)) + +import bootstrap # noqa: E402, F401 -- loads .env.local + +from supabase_client import SupabaseClient # noqa: E402 + +log = logging.getLogger("datum.populate_supply_items") + +# --------------------------------------------------------------- +# Type → Plex group mapping +# --------------------------------------------------------------- +# Grace's Plex tenant has two supply-item groups for tooling: +# "Machining" (1,039 items) — cutting tools, inserts, drills, etc. +# "Tool Room" (104 items) — holders, collets, fixtures, etc. +# All Fusion tool types that survive the holder/probe exclusion filter +# in sync_supabase.py are cutting tools, so "Machining" is the +# universal default. Override per-type if needed later. +TYPE_TO_GROUP: dict[str, str] = { + # Every Fusion tool type maps to "Machining" today. + # Add overrides here when needed, e.g.: + # "holder": "Tool Room", +} + +DEFAULT_GROUP = "Machining" + + +def tool_type_to_group(tool_type: str | None) -> str: + """Map a ``tools.type`` value to a Plex supply-item group name.""" + if not tool_type: + return DEFAULT_GROUP + return TYPE_TO_GROUP.get(tool_type.lower(), DEFAULT_GROUP) + + +# --------------------------------------------------------------- +# Payload builder (pure — no I/O) +# --------------------------------------------------------------- +def build_supply_item_row(tool: dict[str, Any]) -> dict[str, Any]: + """Build a ``plex_supply_items`` row dict from a ``tools`` row. + + Only includes the three derived columns plus ``fusion_guid`` (the PK). + The three defaulted columns (``category``, ``inventory_unit``, + ``item_type``) are omitted so the DB defaults apply on INSERT, and + are left untouched on conflict-merge UPDATE. + + Parameters + ---------- + tool : dict + A ``tools`` row with at least ``fusion_guid``, ``product_id``, + ``description``, and ``type``. + + Returns + ------- + dict + Row suitable for ``SupabaseClient.upsert("plex_supply_items", ...)``. + """ + return { + "fusion_guid": tool["fusion_guid"], + "description": tool.get("description") or "", + "item_group": tool_type_to_group(tool.get("type")), + "supply_item_number": tool.get("product_id") or "", + } + + +# --------------------------------------------------------------- +# Result tracking +# --------------------------------------------------------------- +@dataclass +class RowResult: + fusion_guid: str + status: str # "staged" | "skipped" | "fail" + message: str = "" + + +@dataclass +class PopulateReport: + results: list[RowResult] = field(default_factory=list) + start_time: float = 0.0 + end_time: float = 0.0 + + @property + def staged(self) -> list[RowResult]: + return [r for r in self.results if r.status == "staged"] + + @property + def skipped(self) -> list[RowResult]: + return [r for r in self.results if r.status == "skipped"] + + @property + def failed(self) -> list[RowResult]: + return [r for r in self.results if r.status == "fail"] + + @property + def elapsed(self) -> float: + return self.end_time - self.start_time + + def print_summary(self) -> None: + log.info("=" * 60) + log.info("Supply-item staging complete") + log.info( + " %d staged, %d skipped (no product_id), %d failed", + len(self.staged), + len(self.skipped), + len(self.failed), + ) + log.info(" Elapsed: %.1fs", self.elapsed) + log.info("=" * 60) + + +# --------------------------------------------------------------- +# Main populate +# --------------------------------------------------------------- +def populate_supply_items( + sb: SupabaseClient, + *, + dry_run: bool = False, +) -> PopulateReport: + """Read ``tools``, compute payloads, upsert into ``plex_supply_items``. + + Tools with an empty ``product_id`` are skipped — the eventual + ``supplyItemNumber`` would be blank, which Plex rejects. + + Returns a PopulateReport with per-row results. + """ + report = PopulateReport(start_time=time.monotonic()) + + # 1. Fetch all tools + tools = sb.select( + "tools", + columns="fusion_guid,description,product_id,type", + ) + log.info("Found %d tool(s) in Supabase", len(tools)) + + if not tools: + report.end_time = time.monotonic() + return report + + # 2. Build rows, skipping tools without a product_id + rows_to_upsert: list[dict[str, Any]] = [] + for tool in tools: + fusion_guid = tool["fusion_guid"] + product_id = (tool.get("product_id") or "").strip() + + if not product_id: + report.results.append(RowResult(fusion_guid, "skipped", "no product_id")) + log.debug(" SKIP %s: no product_id", fusion_guid) + continue + + row = build_supply_item_row(tool) + rows_to_upsert.append(row) + report.results.append(RowResult(fusion_guid, "staged")) + + log.info( + " %d eligible, %d skipped (no product_id)", + len(rows_to_upsert), + len(report.skipped), + ) + + if not rows_to_upsert: + report.end_time = time.monotonic() + return report + + if dry_run: + log.info(" DRY-RUN: would upsert %d row(s)", len(rows_to_upsert)) + report.end_time = time.monotonic() + return report + + # 3. Batch upsert + try: + sb.upsert("plex_supply_items", rows_to_upsert, on_conflict="fusion_guid") + log.info(" Upserted %d row(s) to plex_supply_items", len(rows_to_upsert)) + except Exception as e: + log.error(" Supabase upsert failed: %s", e) + # Mark all staged rows as failed + for r in report.results: + if r.status == "staged": + r.status = "fail" + r.message = str(e) + + report.end_time = time.monotonic() + return report + + +# --------------------------------------------------------------- +# CLI +# --------------------------------------------------------------- +def main(argv: list[str] | None = None) -> int: + parser = argparse.ArgumentParser( + description="Datum -- populate plex_supply_items staging table from tools", + ) + parser.add_argument("--dry-run", action="store_true", + help="Compute payloads but do not write to Supabase") + parser.add_argument("-v", "--verbose", action="store_true", + help="Enable debug-level logging") + parser.add_argument("--log-file", type=str, default=None, + help="Append logs to this file (in addition to stdout)") + args = parser.parse_args(argv) + + level = logging.DEBUG if args.verbose else logging.INFO + handlers: list[logging.Handler] = [logging.StreamHandler()] + if args.log_file: + handlers.append(logging.FileHandler(args.log_file, encoding="utf-8")) + logging.basicConfig( + level=level, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=handlers, + ) + + log.info("Supply-item staging starting%s", " (dry-run)" if args.dry_run else "") + + try: + sb = SupabaseClient() + except Exception as e: + log.critical("Config error: %s", e) + return 2 + + try: + report = populate_supply_items(sb, dry_run=args.dry_run) + except Exception as e: + log.critical("Fatal error: %s", e) + return 2 + + report.print_summary() + + if not report.results: + log.warning("No tools found in Supabase") + return 2 + + return 1 if report.failed else 0 + + +def cli() -> None: + """Console-script entry point (``datum-populate-supply-items``).""" + sys.exit(main()) + + +if __name__ == "__main__": + cli() diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..bdc4ef3 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,51 @@ +[build-system] +requires = ["setuptools>=68.0"] +build-backend = "setuptools.build_meta" + +[project] +name = "datum" +version = "0.1.0" +description = "Fusion 360 tool library sync pipeline — Grace Engineering" +requires-python = ">=3.11" +dependencies = [ + "flask>=3.0", + "requests>=2.31", +] + +[project.optional-dependencies] +dev = [ + "pytest>=8.0", +] + +[project.scripts] +datum-sync = "sync:cli" +datum-sync-inventory = "sync_tool_inventory:cli" +datum-ingest-reference = "ingest_reference:main" +datum-enrich = "enrich:main" +datum-populate-supply-items = "populate_supply_items:cli" +datum-plex-mock-serve = "tools.plex_mock.server:main" +datum-plex-mock-snapshot = "tools.plex_mock.capture_snapshots:main" +datum-plex-mock-diff = "tools.plex_mock.diff:main" + +[tool.setuptools] +# Flat layout — all modules live at the repo root, no src/ directory. +py-modules = [ + "app", + "aps_client", + "bootstrap", + "build_supply_item_payload", + "populate_supply_items", + "plex_api", + "plex_diagnostics", + "supabase_client", + "sync", + "sync_supabase", + "sync_tool_inventory", + "tool_library_loader", + "validate_library", + "enrich", + "ingest_reference", +] +# Explicit package list — flat root modules use py-modules above; +# sub-packages with __init__.py are listed here. +packages = ["tools", "tools.plex_mock"] diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..ee23f6c --- /dev/null +++ b/pytest.ini @@ -0,0 +1,8 @@ +[pytest] +testpaths = tests +python_files = test_*.py +python_classes = Test* +python_functions = test_* +addopts = -v --tb=short +filterwarnings = + ignore::DeprecationWarning diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..a266747 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,2 @@ +-r requirements.txt +pytest>=8.0 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..a0d407c --- /dev/null +++ b/requirements.txt @@ -0,0 +1,2 @@ +flask>=3.0 +requests>=2.31 diff --git a/run_dev.py b/run_dev.py new file mode 100644 index 0000000..ae9899c --- /dev/null +++ b/run_dev.py @@ -0,0 +1,118 @@ +""" +run_dev.py +Local development launcher for app.py. +====================================== + +Forces .env.local values to OVERRIDE shell environment variables, then +runs app.py as if it were the main entry point. + +Why this exists +--------------- +bootstrap.py uses os.environ.setdefault() so that real shell env vars +always win over .env.local. That's correct for production deployment, +where credentials should come from the host's secure environment, not +a file. But it's wrong for local dev where: + + - A stale shell env var (e.g. an old credential set via setx in the + Windows registry years ago) silently shadows .env.local + - Debugging "why isn't it working" wastes hours + +This launcher forces the override for local dev only. Production +deployments still use `py app.py` directly, which respects +bootstrap.setdefault() and lets the host shell env take precedence. + +Usage +----- + py run_dev.py + +Or via Claude Preview — .claude/launch.json points here. +""" +import os +import sys +from pathlib import Path + +# Force stdout to UTF-8 before importing app — same reason as in app.py: +# Windows cp1252 console can't encode em-dashes / arrows / etc., and a +# print() failure mid-Flask-request turns into a 500. +try: + sys.stdout.reconfigure(encoding="utf-8") +except Exception: + pass + +PROJECT_ROOT = Path(__file__).resolve().parent +DEFAULT_ENV_FILE = PROJECT_ROOT / ".env.local" + + +def force_override_from_env_local(path: Path | str | None = None) -> int: + """ + Read .env.local and write each KEY=VALUE pair into os.environ via + direct assignment (NOT setdefault). Existing shell env vars with + the same name are OVERRIDDEN. + + Parameters + ---------- + path : Path | str | None + Override the file path. Defaults to ``/.env.local``. + + Returns + ------- + int + The number of os.environ keys that were either added or + actually changed (entries already at the desired value count + as zero). + """ + if path is None: + path = DEFAULT_ENV_FILE + else: + path = Path(path) + + if not path.exists(): + return 0 + + changed = 0 + for line in path.read_text(encoding="utf-8").splitlines(): + line = line.strip() + if not line or line.startswith("#") or "=" not in line: + continue + + key, _, value = line.partition("=") + key = key.strip() + value = value.strip() + + # Strip matched surrounding quotes (' or ") + if len(value) >= 2 and value[0] == value[-1] and value[0] in ("'", '"'): + value = value[1:-1] + + if not key: + continue + + # Direct assignment — OVERRIDE the shell value if it exists + if os.environ.get(key) != value: + os.environ[key] = value + changed += 1 + + return changed + + +def main() -> None: + n = force_override_from_env_local() + if n: + print( + f"[run_dev] Loaded {n} env var(s) from .env.local " + f"(overriding any shell-level values)" + ) + elif not DEFAULT_ENV_FILE.exists(): + print( + f"[run_dev] WARNING: {DEFAULT_ENV_FILE.name} not found. " + f"Falling back to shell env vars only." + ) + + # Re-execute app.py as __main__ so its existing startup banner + + # app.run() block fires correctly. Using runpy keeps the executed + # module's __name__ == '__main__'. + import runpy + runpy.run_path(str(PROJECT_ROOT / "app.py"), run_name="__main__") + + +if __name__ == "__main__": + main() diff --git a/scripts/gcp/00-provision.sh b/scripts/gcp/00-provision.sh new file mode 100755 index 0000000..32aa3a3 --- /dev/null +++ b/scripts/gcp/00-provision.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# 00-provision.sh — end-to-end Datum GCP provisioning wrapper. +# +# Usage: +# export PROJECT_ID=your-project-id +# export BILLING_ACCOUNT=XXXXXX-XXXXXX-XXXXXX +# gcloud auth login # browser flow; do this first +# gcloud config set project "$PROJECT_ID" +# gcloud beta billing projects link "$PROJECT_ID" --billing-account="$BILLING_ACCOUNT" +# bash scripts/gcp/00-provision.sh +# +# The script is idempotent. Re-running after a partial failure picks up +# where it left off — each resource check-before-creates. +# +# To tear down: bash scripts/gcp/99-teardown.sh +set -euo pipefail + +HERE="$(cd "$(dirname "$0")" && pwd)" +source "$HERE/env.sh" + +say "Datum GCP provisioning → project $PROJECT_ID in $REGION" +echo + +for phase in 01-apis 02-vpc 03-psc-sql 04-service-accounts 05-secrets 06-cloud-sql 07-vms; do + echo + say "Phase: $phase" + bash "$HERE/${phase}.sh" +done + +echo +ok "Provisioning complete." +echo +echo "Next steps:" +echo " 1. Populate secret values:" +for s in "${SECRETS[@]}"; do + if [[ "$s" != "db-url" ]]; then + echo " echo -n 'VALUE' | gcloud secrets versions add $s --data-file=- --project=$PROJECT_ID" + fi +done +echo " 2. SSH to a VM and confirm connectivity:" +echo " gcloud compute ssh $RUNTIME_VM --zone=$ZONE --project=$PROJECT_ID --tunnel-through-iap" +echo " 3. Verify Cloud SQL reachable from datum-runtime:" +echo " gcloud compute ssh $RUNTIME_VM --zone=$ZONE --tunnel-through-iap -- \\" +echo " 'sudo apt-get update && sudo apt-get install -y postgresql-client && \\" +echo " psql \"\$(gcloud secrets versions access latest --secret=db-url)\" -c \"select 1;\"'" diff --git a/scripts/gcp/01-apis.sh b/scripts/gcp/01-apis.sh new file mode 100755 index 0000000..20ed945 --- /dev/null +++ b/scripts/gcp/01-apis.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# 01-apis.sh — enable the GCP APIs Datum depends on. +# Idempotent: `gcloud services enable` is a no-op if the API is already on. +set -euo pipefail +source "$(dirname "$0")/env.sh" + +say "Enabling APIs on project $PROJECT_ID" + +APIS=( + compute.googleapis.com + sqladmin.googleapis.com + secretmanager.googleapis.com + iap.googleapis.com + servicenetworking.googleapis.com + cloudscheduler.googleapis.com + cloudresourcemanager.googleapis.com + iam.googleapis.com + logging.googleapis.com +) + +gcloud services enable "${APIS[@]}" --project="$PROJECT_ID" +ok "APIs enabled (${#APIS[@]} services)" diff --git a/scripts/gcp/02-vpc.sh b/scripts/gcp/02-vpc.sh new file mode 100755 index 0000000..0478656 --- /dev/null +++ b/scripts/gcp/02-vpc.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +# 02-vpc.sh — custom-mode VPC, primary subnet with secondary ranges reserved +# for future GKE, Cloud Router + Cloud NAT for private egress, and firewall +# rules for IAP SSH and internal traffic. +set -euo pipefail +source "$(dirname "$0")/env.sh" + +# ── VPC ──────────────────────────────────────────────────────────────────── +ensure \ + "gcloud compute networks describe $VPC_NAME --project=$PROJECT_ID" \ + "gcloud compute networks create $VPC_NAME \ + --project=$PROJECT_ID \ + --subnet-mode=custom \ + --bgp-routing-mode=regional" \ + "VPC $VPC_NAME" + +# ── Subnet (primary + secondary ranges for future GKE) ───────────────────── +ensure \ + "gcloud compute networks subnets describe $SUBNET_NAME \ + --region=$REGION --project=$PROJECT_ID" \ + "gcloud compute networks subnets create $SUBNET_NAME \ + --project=$PROJECT_ID \ + --network=$VPC_NAME \ + --region=$REGION \ + --range=$SUBNET_PRIMARY_RANGE \ + --secondary-range=pods=$SUBNET_SECONDARY_PODS,services=$SUBNET_SECONDARY_SVC \ + --enable-private-ip-google-access" \ + "subnet $SUBNET_NAME" + +# ── Cloud Router (prerequisite for Cloud NAT) ────────────────────────────── +ensure \ + "gcloud compute routers describe $ROUTER_NAME \ + --region=$REGION --project=$PROJECT_ID" \ + "gcloud compute routers create $ROUTER_NAME \ + --project=$PROJECT_ID \ + --network=$VPC_NAME \ + --region=$REGION" \ + "Cloud Router $ROUTER_NAME" + +# ── Cloud NAT — outbound internet for private VMs ────────────────────────── +ensure \ + "gcloud compute routers nats describe $NAT_NAME \ + --router=$ROUTER_NAME --region=$REGION --project=$PROJECT_ID" \ + "gcloud compute routers nats create $NAT_NAME \ + --project=$PROJECT_ID \ + --router=$ROUTER_NAME \ + --region=$REGION \ + --nat-all-subnet-ip-ranges \ + --auto-allocate-nat-external-ips" \ + "Cloud NAT $NAT_NAME" + +# ── Firewall rule: allow SSH from IAP range only ─────────────────────────── +ensure \ + "gcloud compute firewall-rules describe datum-allow-iap-ssh --project=$PROJECT_ID" \ + "gcloud compute firewall-rules create datum-allow-iap-ssh \ + --project=$PROJECT_ID \ + --network=$VPC_NAME \ + --direction=INGRESS \ + --action=ALLOW \ + --rules=tcp:22 \ + --source-ranges=$IAP_SOURCE_RANGE \ + --target-tags=ssh-access \ + --description='Allow SSH from Google IAP forwarders only'" \ + "firewall rule datum-allow-iap-ssh" + +# ── Firewall rule: allow all internal VPC traffic ────────────────────────── +ensure \ + "gcloud compute firewall-rules describe datum-allow-internal --project=$PROJECT_ID" \ + "gcloud compute firewall-rules create datum-allow-internal \ + --project=$PROJECT_ID \ + --network=$VPC_NAME \ + --direction=INGRESS \ + --action=ALLOW \ + --rules=all \ + --source-ranges=$SUBNET_PRIMARY_RANGE \ + --description='Allow all internal traffic within the primary subnet'" \ + "firewall rule datum-allow-internal" + +ok "VPC ready" diff --git a/scripts/gcp/03-psc-sql.sh b/scripts/gcp/03-psc-sql.sh new file mode 100755 index 0000000..b008ffd --- /dev/null +++ b/scripts/gcp/03-psc-sql.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# 03-psc-sql.sh — Private Service Connection for Cloud SQL. +# Reserves an IP range on the VPC and establishes peering with +# servicenetworking.googleapis.com so managed services (Cloud SQL) can +# allocate private IPs inside the VPC. +set -euo pipefail +source "$(dirname "$0")/env.sh" + +# ── Reserve the PSC IP range ─────────────────────────────────────────────── +ensure \ + "gcloud compute addresses describe $PSC_RANGE_NAME \ + --global --project=$PROJECT_ID" \ + "gcloud compute addresses create $PSC_RANGE_NAME \ + --project=$PROJECT_ID \ + --global \ + --purpose=VPC_PEERING \ + --addresses=$PSC_RANGE_START \ + --prefix-length=$PSC_RANGE_PREFIX \ + --network=$VPC_NAME \ + --description='Reserved range for Cloud SQL private IP'" \ + "PSC IP range $PSC_RANGE_NAME" + +# ── Establish VPC peering with servicenetworking ─────────────────────────── +# `vpc-peerings connect` is not describe-compatible in the same way, so check +# by listing existing peerings on the network. +if gcloud services vpc-peerings list \ + --network="$VPC_NAME" \ + --service=servicenetworking.googleapis.com \ + --project="$PROJECT_ID" \ + --format="value(reservedPeeringRanges)" 2>/dev/null | grep -q "$PSC_RANGE_NAME"; then + skip "VPC peering with servicenetworking" +else + say "creating VPC peering with servicenetworking" + gcloud services vpc-peerings connect \ + --project="$PROJECT_ID" \ + --service=servicenetworking.googleapis.com \ + --ranges="$PSC_RANGE_NAME" \ + --network="$VPC_NAME" + ok "VPC peering with servicenetworking" +fi + +ok "PSC ready for Cloud SQL" diff --git a/scripts/gcp/04-service-accounts.sh b/scripts/gcp/04-service-accounts.sh new file mode 100755 index 0000000..0a6a338 --- /dev/null +++ b/scripts/gcp/04-service-accounts.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# 04-service-accounts.sh — per-VM service accounts and IAM bindings. +# datum-runtime-sa runs the nightly sync and Flask API +# datum-dev-sa used when Shane SSHes into datum-dev +# +# Project-level roles bound here: Cloud SQL Client, Log Writer. +# Secret Accessor is bound PER-SECRET in 05-secrets.sh so datum-dev-sa can +# be denied access to aps-refresh-token (runtime-only). +set -euo pipefail +source "$(dirname "$0")/env.sh" + +RUNTIME_EMAIL="${RUNTIME_SA}@${PROJECT_ID}.iam.gserviceaccount.com" +DEV_EMAIL="${DEV_SA}@${PROJECT_ID}.iam.gserviceaccount.com" + +# ── Create service accounts ──────────────────────────────────────────────── +ensure \ + "gcloud iam service-accounts describe $RUNTIME_EMAIL --project=$PROJECT_ID" \ + "gcloud iam service-accounts create $RUNTIME_SA \ + --project=$PROJECT_ID \ + --display-name='Datum runtime (sync + API)'" \ + "service account $RUNTIME_SA" + +ensure \ + "gcloud iam service-accounts describe $DEV_EMAIL --project=$PROJECT_ID" \ + "gcloud iam service-accounts create $DEV_SA \ + --project=$PROJECT_ID \ + --display-name='Datum dev VM'" \ + "service account $DEV_SA" + +# Wait for IAM propagation before binding roles. SA creation returns +# immediately but the SA isn't always visible to add-iam-policy-binding +# for ~30s. A missing SA surfaces as a confusing "Policy modification +# failed" error with a misleading lint-condition hint. +say "waiting for SA propagation (30s)" +sleep 30 + +# ── Project-level role bindings ──────────────────────────────────────────── +# add-iam-policy-binding is effectively idempotent (no-op if the binding +# already exists). +say "binding project-level IAM roles" + +for ROLE in roles/cloudsql.client roles/logging.logWriter; do + gcloud projects add-iam-policy-binding "$PROJECT_ID" \ + --member="serviceAccount:$RUNTIME_EMAIL" \ + --role="$ROLE" \ + --condition=None \ + --quiet >/dev/null + gcloud projects add-iam-policy-binding "$PROJECT_ID" \ + --member="serviceAccount:$DEV_EMAIL" \ + --role="$ROLE" \ + --condition=None \ + --quiet >/dev/null +done + +ok "IAM bindings applied" diff --git a/scripts/gcp/05-secrets.sh b/scripts/gcp/05-secrets.sh new file mode 100755 index 0000000..f0eba90 --- /dev/null +++ b/scripts/gcp/05-secrets.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +# 05-secrets.sh — create Secret Manager slots (empty) and grant access. +# Values are populated later via `gcloud secrets versions add`. +# datum-dev-sa does not get aps-refresh-token (token rotation belongs to the +# runtime SA; dev shouldn't contend). +set -euo pipefail +source "$(dirname "$0")/env.sh" + +RUNTIME_EMAIL="${RUNTIME_SA}@${PROJECT_ID}.iam.gserviceaccount.com" +DEV_EMAIL="${DEV_SA}@${PROJECT_ID}.iam.gserviceaccount.com" + +# Secrets that dev should NOT see. Runtime gets access to everything. +DEV_DENIED=(aps-refresh-token) + +is_dev_denied() { + local s="$1" + for denied in "${DEV_DENIED[@]}"; do + [[ "$s" == "$denied" ]] && return 0 + done + return 1 +} + +for SECRET in "${SECRETS[@]}"; do + ensure \ + "gcloud secrets describe $SECRET --project=$PROJECT_ID" \ + "gcloud secrets create $SECRET \ + --project=$PROJECT_ID \ + --replication-policy=automatic" \ + "secret $SECRET" + + # Runtime SA gets access to every secret. + gcloud secrets add-iam-policy-binding "$SECRET" \ + --project="$PROJECT_ID" \ + --member="serviceAccount:$RUNTIME_EMAIL" \ + --role="roles/secretmanager.secretAccessor" \ + --condition=None \ + --quiet >/dev/null + + # Dev SA gets access unless the secret is on the denied list. + if is_dev_denied "$SECRET"; then + skip "dev-sa access to $SECRET (denied by policy)" + else + gcloud secrets add-iam-policy-binding "$SECRET" \ + --project="$PROJECT_ID" \ + --member="serviceAccount:$DEV_EMAIL" \ + --role="roles/secretmanager.secretAccessor" \ + --condition=None \ + --quiet >/dev/null + fi +done + +ok "Secret Manager slots ready (${#SECRETS[@]} secrets)" diff --git a/scripts/gcp/06-cloud-sql.sh b/scripts/gcp/06-cloud-sql.sh new file mode 100755 index 0000000..a4f177c --- /dev/null +++ b/scripts/gcp/06-cloud-sql.sh @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +# 06-cloud-sql.sh — create the Cloud SQL Postgres instance with private IP, +# add the `datum` database, create the application user, and push the +# connection string into Secret Manager `db-url`. +# +# Provisioning takes 10–15 minutes. The script blocks until the instance is +# RUNNABLE before creating the database and user. +set -euo pipefail +source "$(dirname "$0")/env.sh" + +VPC_SELF_LINK="projects/${PROJECT_ID}/global/networks/${VPC_NAME}" + +# ── Instance ─────────────────────────────────────────────────────────────── +ensure \ + "gcloud sql instances describe $SQL_INSTANCE --project=$PROJECT_ID" \ + "gcloud sql instances create $SQL_INSTANCE \ + --project=$PROJECT_ID \ + --database-version=$SQL_VERSION \ + --tier=$SQL_TIER \ + --region=$REGION \ + --network=$VPC_SELF_LINK \ + --no-assign-ip \ + --storage-size=$SQL_STORAGE_GB \ + --storage-type=SSD \ + --backup-start-time=03:00 \ + --maintenance-window-day=SUN \ + --maintenance-window-hour=04" \ + "Cloud SQL instance $SQL_INSTANCE" + +# Wait for the instance to reach RUNNABLE before doing anything else. +say "waiting for $SQL_INSTANCE to become RUNNABLE (may take several minutes)" +until [[ "$(gcloud sql instances describe "$SQL_INSTANCE" \ + --project="$PROJECT_ID" --format='value(state)')" == "RUNNABLE" ]]; do + printf '.'; sleep 15 +done +echo +ok "$SQL_INSTANCE is RUNNABLE" + +# ── Database ─────────────────────────────────────────────────────────────── +ensure \ + "gcloud sql databases describe $SQL_DATABASE \ + --instance=$SQL_INSTANCE --project=$PROJECT_ID" \ + "gcloud sql databases create $SQL_DATABASE \ + --instance=$SQL_INSTANCE \ + --project=$PROJECT_ID" \ + "database $SQL_DATABASE" + +# ── User ─────────────────────────────────────────────────────────────────── +if gcloud sql users list --instance="$SQL_INSTANCE" --project="$PROJECT_ID" \ + --format='value(name)' | grep -qx "$SQL_USER"; then + skip "user $SQL_USER" + SQL_PASSWORD="" # unknown; existing password not retrievable +else + say "creating user $SQL_USER with generated password" + SQL_PASSWORD="$(openssl rand -base64 24 | tr -d '=+/' | head -c 32)" + gcloud sql users create "$SQL_USER" \ + --instance="$SQL_INSTANCE" \ + --project="$PROJECT_ID" \ + --password="$SQL_PASSWORD" >/dev/null + ok "user $SQL_USER" +fi + +# ── Store connection string in Secret Manager ────────────────────────────── +# Only push a new version when we know the password (i.e. just created the +# user). Re-runs on an existing user leave the existing secret in place. +if [[ -n "${SQL_PASSWORD:-}" ]]; then + PRIVATE_IP="$(gcloud sql instances describe "$SQL_INSTANCE" \ + --project="$PROJECT_ID" \ + --format='value(ipAddresses[0].ipAddress)')" + DB_URL="postgresql://${SQL_USER}:${SQL_PASSWORD}@${PRIVATE_IP}:5432/${SQL_DATABASE}" + echo -n "$DB_URL" | gcloud secrets versions add db-url \ + --project="$PROJECT_ID" \ + --data-file=- >/dev/null + ok "db-url secret populated with new connection string" +else + skip "db-url secret (user pre-existed; secret left as-is)" +fi + +ok "Cloud SQL ready" diff --git a/scripts/gcp/07-vms.sh b/scripts/gcp/07-vms.sh new file mode 100755 index 0000000..709db24 --- /dev/null +++ b/scripts/gcp/07-vms.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# 07-vms.sh — create datum-runtime (e2-micro, always-on) and datum-dev +# (e2-standard-2, scheduled start/stop in a later phase). +# Both are Ubuntu 24.04 LTS, no public IP, IAP-only SSH, attached to the +# custom subnet with their own service accounts. +set -euo pipefail +source "$(dirname "$0")/env.sh" + +RUNTIME_EMAIL="${RUNTIME_SA}@${PROJECT_ID}.iam.gserviceaccount.com" +DEV_EMAIL="${DEV_SA}@${PROJECT_ID}.iam.gserviceaccount.com" + +# ── datum-runtime ────────────────────────────────────────────────────────── +ensure \ + "gcloud compute instances describe $RUNTIME_VM \ + --zone=$ZONE --project=$PROJECT_ID" \ + "gcloud compute instances create $RUNTIME_VM \ + --project=$PROJECT_ID \ + --zone=$ZONE \ + --machine-type=$RUNTIME_MACHINE_TYPE \ + --image-family=$VM_IMAGE_FAMILY \ + --image-project=$VM_IMAGE_PROJECT \ + --subnet=$SUBNET_NAME \ + --no-address \ + --service-account=$RUNTIME_EMAIL \ + --scopes=cloud-platform \ + --tags=ssh-access \ + --boot-disk-size=30GB \ + --boot-disk-type=pd-standard \ + --metadata=enable-oslogin=TRUE" \ + "VM $RUNTIME_VM" + +# ── datum-dev ────────────────────────────────────────────────────────────── +ensure \ + "gcloud compute instances describe $DEV_VM \ + --zone=$ZONE --project=$PROJECT_ID" \ + "gcloud compute instances create $DEV_VM \ + --project=$PROJECT_ID \ + --zone=$ZONE \ + --machine-type=$DEV_MACHINE_TYPE \ + --image-family=$VM_IMAGE_FAMILY \ + --image-project=$VM_IMAGE_PROJECT \ + --subnet=$SUBNET_NAME \ + --no-address \ + --service-account=$DEV_EMAIL \ + --scopes=cloud-platform \ + --tags=ssh-access,dev-schedule \ + --boot-disk-size=50GB \ + --boot-disk-type=pd-balanced \ + --metadata=enable-oslogin=TRUE" \ + "VM $DEV_VM" + +ok "VMs ready" +echo +echo "SSH via IAP:" +echo " gcloud compute ssh $RUNTIME_VM --zone=$ZONE --project=$PROJECT_ID --tunnel-through-iap" +echo " gcloud compute ssh $DEV_VM --zone=$ZONE --project=$PROJECT_ID --tunnel-through-iap" diff --git a/scripts/gcp/08-scheduler.sh b/scripts/gcp/08-scheduler.sh new file mode 100755 index 0000000..97db51f --- /dev/null +++ b/scripts/gcp/08-scheduler.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +# 08-scheduler.sh — Cloud Scheduler jobs to start/stop datum-dev on a +# weekday-only schedule (07:00 / 19:00 America/Chicago, Mon–Fri). +# +# Why: datum-dev is an e2-standard-2 — on at all times it's ~$50/mo. +# Running it only during weekday business hours cuts that to ~$15/mo +# and forces us to keep VM setup scripted (state rots if we leave it +# on for weeks). +# +# Mechanism: two HTTP-target Cloud Scheduler jobs hitting the Compute +# Engine REST API (.../instances//start and .../stop), authenticating +# with OAuth using the runtime service account. +# +# IAM: the runtime SA is granted roles/compute.instanceAdmin.v1 scoped +# to the datum-dev instance only (not project-level), so it can still +# only touch that one VM. +set -euo pipefail +source "$(dirname "$0")/env.sh" + +RUNTIME_EMAIL="${RUNTIME_SA}@${PROJECT_ID}.iam.gserviceaccount.com" +SCHEDULE_TZ="America/Chicago" +START_JOB="datum-dev-start" +STOP_JOB="datum-dev-stop" +START_SCHEDULE="0 7 * * 1-5" +STOP_SCHEDULE="0 19 * * 1-5" +DEV_VM_URI="https://compute.googleapis.com/compute/v1/projects/${PROJECT_ID}/zones/${ZONE}/instances/${DEV_VM}" + +# ── Instance-level IAM for the runtime SA ────────────────────────────────── +# add-iam-policy-binding is idempotent — re-running is a no-op if the +# binding already exists. +say "granting $RUNTIME_SA start/stop on instance $DEV_VM" +gcloud compute instances add-iam-policy-binding "$DEV_VM" \ + --project="$PROJECT_ID" \ + --zone="$ZONE" \ + --member="serviceAccount:$RUNTIME_EMAIL" \ + --role="roles/compute.instanceAdmin.v1" \ + --condition=None \ + --quiet >/dev/null +ok "instance IAM binding applied" + +# ── Start job (weekdays 07:00 America/Chicago) ───────────────────────────── +ensure \ + "gcloud scheduler jobs describe $START_JOB --location=$REGION --project=$PROJECT_ID" \ + "gcloud scheduler jobs create http $START_JOB \ + --project=$PROJECT_ID \ + --location=$REGION \ + --schedule='$START_SCHEDULE' \ + --time-zone='$SCHEDULE_TZ' \ + --uri='${DEV_VM_URI}/start' \ + --http-method=POST \ + --oauth-service-account-email=$RUNTIME_EMAIL \ + --description='Start $DEV_VM weekday mornings (07:00 CT)'" \ + "scheduler job $START_JOB" + +# ── Stop job (weekdays 19:00 America/Chicago) ────────────────────────────── +ensure \ + "gcloud scheduler jobs describe $STOP_JOB --location=$REGION --project=$PROJECT_ID" \ + "gcloud scheduler jobs create http $STOP_JOB \ + --project=$PROJECT_ID \ + --location=$REGION \ + --schedule='$STOP_SCHEDULE' \ + --time-zone='$SCHEDULE_TZ' \ + --uri='${DEV_VM_URI}/stop' \ + --http-method=POST \ + --oauth-service-account-email=$RUNTIME_EMAIL \ + --description='Stop $DEV_VM weekday evenings (19:00 CT)'" \ + "scheduler job $STOP_JOB" + +ok "scheduler jobs ready" +echo +echo "Manual trigger (useful for testing or a one-off late night):" +echo " gcloud scheduler jobs run $START_JOB --location=$REGION --project=$PROJECT_ID" +echo " gcloud scheduler jobs run $STOP_JOB --location=$REGION --project=$PROJECT_ID" diff --git a/scripts/gcp/10-populate-secrets.sh b/scripts/gcp/10-populate-secrets.sh new file mode 100755 index 0000000..3e4ef30 --- /dev/null +++ b/scripts/gcp/10-populate-secrets.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash +# 10-populate-secrets.sh — interactively populate Secret Manager slots +# created by 05-secrets.sh. +# +# Run this from a machine that has gcloud auth'd against the target project +# (Shane's Legion laptop as of 2026-04-17). Prompts for each empty slot. +# Skips slots that already have a version unless --force is passed. +# Always skips aps-refresh-token (rotated by the runtime SA, not populated +# manually). +# +# Designed for partial success: a failure on one secret does not abort the +# rest. A summary prints at the end. + +set -uo pipefail +source "$(dirname "$0")/env.sh" + +FORCE=0 +for arg in "$@"; do + case "$arg" in + --force|-f) FORCE=1 ;; + -h|--help) + cat </dev/null | grep -q . +} + +added=() +skipped=() +failed=() + +say "populating secrets in project ${PROJECT_ID} (force=${FORCE})" +echo + +for SECRET in "${SECRETS[@]}"; do + if is_runtime_rotated "$SECRET"; then + skip "$SECRET (runtime-rotated, not populated here)" + skipped+=("$SECRET") + continue + fi + + if has_version "$SECRET" && [[ $FORCE -eq 0 ]]; then + skip "$SECRET (already has a version — pass --force to overwrite)" + skipped+=("$SECRET") + continue + fi + + printf "\033[1;36m==> value for %s (empty to skip): \033[0m" "$SECRET" + # -s hides input; -r avoids backslash interpretation. + read -r -s value + echo + + if [[ -z "$value" ]]; then + skip "$SECRET (empty input)" + skipped+=("$SECRET") + continue + fi + + if printf '%s' "$value" | gcloud secrets versions add "$SECRET" \ + --project="$PROJECT_ID" \ + --data-file=- >/dev/null 2>&1; then + ok "$SECRET" + added+=("$SECRET") + else + printf "\033[1;31m ✗ %s (gcloud error — run again or check access)\033[0m\n" "$SECRET" + failed+=("$SECRET") + fi + + unset value +done + +echo +say "summary" +printf " added: %s\n" "${added[*]:-(none)}" +printf " skipped: %s\n" "${skipped[*]:-(none)}" +printf " failed: %s\n" "${failed[*]:-(none)}" + +# Non-zero exit only if something failed. Missing values (skipped) are fine. +[[ ${#failed[@]} -eq 0 ]] || exit 1 diff --git a/scripts/gcp/99-teardown.sh b/scripts/gcp/99-teardown.sh new file mode 100755 index 0000000..7b542e1 --- /dev/null +++ b/scripts/gcp/99-teardown.sh @@ -0,0 +1,95 @@ +#!/usr/bin/env bash +# 99-teardown.sh — delete everything 00-provision.sh created, in reverse +# dependency order. APIs are left enabled (disabling them is cheap to skip +# and risks knock-on effects on other projects). +# +# REQUIRES EXPLICIT CONFIRMATION. Does not run unless you type the project ID. +set -euo pipefail +source "$(dirname "$0")/env.sh" + +echo "This will PERMANENTLY DELETE Datum infrastructure from project: $PROJECT_ID" +echo " - VMs: $RUNTIME_VM, $DEV_VM" +echo " - Cloud SQL: $SQL_INSTANCE (all data + backups destroyed)" +echo " - Secrets: ${SECRETS[*]}" +echo " - Service accounts: $RUNTIME_SA, $DEV_SA" +echo " - VPC: $VPC_NAME (subnets, NAT, router, firewall, PSC)" +echo +read -r -p "Type the project ID to confirm: " CONFIRM +[[ "$CONFIRM" == "$PROJECT_ID" ]] || die "Confirmation did not match — aborting" + +RUNTIME_EMAIL="${RUNTIME_SA}@${PROJECT_ID}.iam.gserviceaccount.com" +DEV_EMAIL="${DEV_SA}@${PROJECT_ID}.iam.gserviceaccount.com" + +# Best-effort deletes: don't bail if a resource is already gone. +safe_delete() { + local label="$1"; shift + if "$@" &>/dev/null; then + ok "deleted $label" + else + skip "$label (already gone or delete failed harmlessly)" + fi +} + +# ── VMs ──────────────────────────────────────────────────────────────────── +for VM in "$RUNTIME_VM" "$DEV_VM"; do + safe_delete "VM $VM" \ + gcloud compute instances delete "$VM" \ + --zone="$ZONE" --project="$PROJECT_ID" --quiet +done + +# ── Cloud SQL ────────────────────────────────────────────────────────────── +safe_delete "Cloud SQL $SQL_INSTANCE" \ + gcloud sql instances delete "$SQL_INSTANCE" \ + --project="$PROJECT_ID" --quiet + +# ── Secrets ──────────────────────────────────────────────────────────────── +for SECRET in "${SECRETS[@]}"; do + safe_delete "secret $SECRET" \ + gcloud secrets delete "$SECRET" --project="$PROJECT_ID" --quiet +done + +# ── Service accounts ────────────────────────────────────────────────────── +for EMAIL in "$RUNTIME_EMAIL" "$DEV_EMAIL"; do + safe_delete "service account $EMAIL" \ + gcloud iam service-accounts delete "$EMAIL" \ + --project="$PROJECT_ID" --quiet +done + +# ── VPC peering ──────────────────────────────────────────────────────────── +safe_delete "VPC peering with servicenetworking" \ + gcloud services vpc-peerings delete \ + --service=servicenetworking.googleapis.com \ + --network="$VPC_NAME" \ + --project="$PROJECT_ID" --quiet + +safe_delete "PSC range $PSC_RANGE_NAME" \ + gcloud compute addresses delete "$PSC_RANGE_NAME" \ + --global --project="$PROJECT_ID" --quiet + +# ── Firewall rules ──────────────────────────────────────────────────────── +for RULE in datum-allow-iap-ssh datum-allow-internal; do + safe_delete "firewall rule $RULE" \ + gcloud compute firewall-rules delete "$RULE" \ + --project="$PROJECT_ID" --quiet +done + +# ── NAT + router ────────────────────────────────────────────────────────── +safe_delete "Cloud NAT $NAT_NAME" \ + gcloud compute routers nats delete "$NAT_NAME" \ + --router="$ROUTER_NAME" --region="$REGION" --project="$PROJECT_ID" --quiet + +safe_delete "Cloud Router $ROUTER_NAME" \ + gcloud compute routers delete "$ROUTER_NAME" \ + --region="$REGION" --project="$PROJECT_ID" --quiet + +# ── Subnet + VPC ────────────────────────────────────────────────────────── +safe_delete "subnet $SUBNET_NAME" \ + gcloud compute networks subnets delete "$SUBNET_NAME" \ + --region="$REGION" --project="$PROJECT_ID" --quiet + +safe_delete "VPC $VPC_NAME" \ + gcloud compute networks delete "$VPC_NAME" \ + --project="$PROJECT_ID" --quiet + +echo +ok "Teardown complete." diff --git a/scripts/gcp/env.sh b/scripts/gcp/env.sh new file mode 100755 index 0000000..1e0e8f5 --- /dev/null +++ b/scripts/gcp/env.sh @@ -0,0 +1,80 @@ +# scripts/gcp/env.sh — configuration for the Datum GCP provisioning scripts. +# Source this before running any 0N-*.sh script, or run via 00-provision.sh +# which sources it automatically. +# +# Edit these values before each run. The two that change between personal +# (dry-run) and Grace (production) accounts are PROJECT_ID and BILLING_ACCOUNT. + +# ── Account / project ────────────────────────────────────────────────────── +: "${PROJECT_ID:?PROJECT_ID must be set (e.g. export PROJECT_ID=datum-dev-shane)}" +# BILLING_ACCOUNT is documented in 00-provision.sh for the one-time billing +# link step. It's optional at script-run time (billing just needs to be +# linked by the time APIs get enabled). +BILLING_ACCOUNT="${BILLING_ACCOUNT:-}" + +# ── Region / zone ────────────────────────────────────────────────────────── +REGION="${REGION:-us-central1}" +ZONE="${ZONE:-us-central1-a}" + +# ── VPC / networking ─────────────────────────────────────────────────────── +VPC_NAME="datum-vpc" +SUBNET_NAME="datum-${REGION}" +SUBNET_PRIMARY_RANGE="10.10.0.0/20" +SUBNET_SECONDARY_PODS="10.20.0.0/16" +SUBNET_SECONDARY_SVC="10.30.0.0/20" +PSC_RANGE_NAME="datum-psc-range" +PSC_RANGE_START="10.40.0.0" +PSC_RANGE_PREFIX="20" +ROUTER_NAME="datum-router" +NAT_NAME="datum-nat" +IAP_SOURCE_RANGE="35.235.240.0/20" + +# ── Service accounts ─────────────────────────────────────────────────────── +RUNTIME_SA="datum-runtime-sa" +DEV_SA="datum-dev-sa" + +# ── Cloud SQL ────────────────────────────────────────────────────────────── +SQL_INSTANCE="datum-db" +SQL_DATABASE="datum" +SQL_USER="datum_app" +SQL_TIER="db-f1-micro" +SQL_VERSION="POSTGRES_15" +SQL_STORAGE_GB="10" + +# ── VMs ──────────────────────────────────────────────────────────────────── +RUNTIME_VM="datum-runtime" +DEV_VM="datum-dev" +VM_IMAGE_FAMILY="ubuntu-2404-lts-amd64" +VM_IMAGE_PROJECT="ubuntu-os-cloud" +RUNTIME_MACHINE_TYPE="e2-micro" +DEV_MACHINE_TYPE="e2-standard-2" + +# ── Secrets (created as empty slots; values populated later) ─────────────── +SECRETS=( + plex-api-key + plex-api-secret + plex-tenant-id + db-url + aps-client-id + aps-client-secret + aps-refresh-token +) + +# ── Helpers ──────────────────────────────────────────────────────────────── +# Colored status prints. Keep noise low so piped output stays readable. +say() { printf "\033[1;36m==> %s\033[0m\n" "$*"; } +ok() { printf "\033[1;32m ✓ %s\033[0m\n" "$*"; } +skip() { printf "\033[0;33m - %s (exists, skipping)\033[0m\n" "$*"; } +die() { printf "\033[1;31m ✗ %s\033[0m\n" "$*" >&2; exit 1; } + +# Idempotency helper: run $2 if $1 returns non-zero (i.e. resource missing). +ensure() { + local check_cmd="$1"; local create_cmd="$2"; local label="$3" + if eval "$check_cmd" &>/dev/null; then + skip "$label" + else + say "creating $label" + eval "$create_cmd" || die "failed to create $label" + ok "$label" + fi +} diff --git a/scripts/load_sample.py b/scripts/load_sample.py new file mode 100644 index 0000000..ecd687d --- /dev/null +++ b/scripts/load_sample.py @@ -0,0 +1,96 @@ +""" +scripts/load_sample.py +Smoke test — ingest BROTHER SPEEDIO ALUMINUM.json into Supabase +================================================================ +Loads the committed sample Fusion library file into the Datum +``libraries``/``tools``/``cutting_presets`` tables and prints a +row-count report. + +Run from the repo root:: + + py scripts/load_sample.py + +Expected output with a fresh database (28 total entries in the sample, +minus 6 holders + 1 probe = 21 tools, ~25 presets):: + + Library upserted: BROTHER SPEEDIO ALUMINUM → id=... + Tools upserted: 21 + Presets inserted: 25 + +Re-running the script should be idempotent — tool counts stay the +same, presets are flushed and re-inserted. Requires SUPABASE_URL +and SUPABASE_SERVICE_ROLE_KEY in .env.local (or the shell env). +""" +from __future__ import annotations + +import logging +import sys +from pathlib import Path + +# Make the repo root importable when running ``py scripts/load_sample.py``. +ROOT = Path(__file__).resolve().parent.parent +if str(ROOT) not in sys.path: + sys.path.insert(0, str(ROOT)) + +from supabase_client import SupabaseClient, SupabaseConfigError # noqa: E402 +from sync_supabase import hash_file, sync_library # noqa: E402 + + +SAMPLE_FILE = ROOT / "BROTHER SPEEDIO ALUMINUM.json" +LIBRARY_NAME = "BROTHER SPEEDIO ALUMINUM" + + +def main() -> int: + logging.basicConfig( + level=logging.INFO, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", + datefmt="%H:%M:%S", + ) + + if not SAMPLE_FILE.exists(): + print(f"ERROR: sample file not found at {SAMPLE_FILE}", file=sys.stderr) + return 2 + + # Load tools from disk without going through the stale-file guard — + # the committed sample is always older than 25h, but we still want to + # exercise the ingest pipeline against it. + import json + + with open(SAMPLE_FILE, "r", encoding="utf-8") as f: + raw = json.load(f) + tools = raw.get("data", []) + print(f"Loaded {len(tools)} entries from {SAMPLE_FILE.name}") + + try: + client = SupabaseClient() + except SupabaseConfigError as e: + print(f"ERROR: {e}", file=sys.stderr) + print( + "\nAdd these to .env.local:\n" + " SUPABASE_URL=https://.supabase.co\n" + " SUPABASE_SERVICE_ROLE_KEY=\n", + file=sys.stderr, + ) + return 3 + + file_hash = hash_file(SAMPLE_FILE) + print(f"File hash (sha256): {file_hash[:16]}...") + + result = sync_library( + LIBRARY_NAME, + tools, + client=client, + file_path=str(SAMPLE_FILE), + file_hash=file_hash, + ) + + print() + print("=" * 56) + print(f" Tools upserted: {result['tools']:4d}") + print(f" Presets inserted: {result['presets']:4d}") + print("=" * 56) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/static/css/style.css b/static/css/style.css new file mode 100644 index 0000000..c61891e --- /dev/null +++ b/static/css/style.css @@ -0,0 +1,590 @@ +/* + * plex-api · endpoint tester + * Flat, neutral, no gradients, no glass, no glow. + * Single blue accent. Semantic color only for status. + */ + +:root { + /* surface */ + --bg-0: #0b0b0d; /* base */ + --bg-1: #111115; /* panel */ + --bg-2: #17171c; /* panel hover / input */ + --bg-3: #1d1d23; /* chip */ + --border: #24242b; + --border-strong: #2e2e36; + + /* text */ + --fg-0: #f2f2f3; + --fg-1: #c9c9cf; + --fg-2: #8a8a94; + --fg-3: #55555e; + + /* accents (solid, single hue) */ + --accent: #3b82f6; + --accent-hover: #2563eb; + --accent-fg: #ffffff; + + /* semantic */ + --ok: #22c55e; + --warn: #eab308; + --err: #ef4444; + --info: #38bdf8; + + /* http methods */ + --get: #22c55e; + --post: #eab308; + --put: #38bdf8; + --patch: #a855f7; + --delete: #ef4444; + --internal: #8a8a94; + + /* metrics */ + --rail-w: 280px; + --radius: 4px; + --radius-lg: 6px; + + /* fonts */ + --font-ui: ui-sans-serif, system-ui, -apple-system, "Segoe UI", Roboto, sans-serif; + --font-mono: ui-monospace, SFMono-Regular, "SF Mono", Menlo, Consolas, "Liberation Mono", monospace; +} + +* { box-sizing: border-box; margin: 0; padding: 0; } + +html, body { + height: 100%; +} + +body { + background: var(--bg-0); + color: var(--fg-0); + font-family: var(--font-ui); + font-size: 13px; + line-height: 1.5; + -webkit-font-smoothing: antialiased; + overflow: hidden; +} + +button, input, select, textarea { + font: inherit; + color: inherit; +} + +button { cursor: pointer; } + +/* ───────────────────────────────── + Layout + ───────────────────────────────── */ +.app { + display: grid; + grid-template-columns: var(--rail-w) 1fr; + height: 100vh; +} + +/* ───────────────────────────────── + Left rail + ───────────────────────────────── */ +.rail { + background: var(--bg-1); + border-right: 1px solid var(--border); + display: flex; + flex-direction: column; + overflow: hidden; +} + +.rail-header { + padding: 14px 16px; + display: flex; + align-items: center; + justify-content: space-between; + border-bottom: 1px solid var(--border); + flex-shrink: 0; +} + +.brand { + font-size: 13px; + font-weight: 600; + letter-spacing: -0.01em; + color: var(--fg-0); +} + +.env-chip { + font-family: var(--font-mono); + font-size: 10px; + padding: 3px 7px; + border-radius: var(--radius); + background: var(--bg-3); + color: var(--fg-2); + border: 1px solid var(--border); + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.env-chip.test { color: var(--warn); border-color: rgba(234, 179, 8, 0.3); } +.env-chip.prod { + color: var(--err); + border-color: rgba(239, 68, 68, 0.5); + background: rgba(239, 68, 68, 0.08); + font-weight: 600; +} + +.env-chips { + display: flex; + align-items: center; + gap: 4px; +} + +.writes-chip.hidden { display: none; } +.writes-chip.allowed { + color: var(--err); + border-color: rgba(239, 68, 68, 0.5); + background: rgba(239, 68, 68, 0.08); +} +.writes-chip.blocked { + color: var(--ok); + border-color: rgba(34, 197, 94, 0.4); + background: rgba(34, 197, 94, 0.06); +} + +.rail-section { + padding: 12px 12px 16px; + border-bottom: 1px solid var(--border); +} + +.rail-section.rail-history { + border-bottom: none; + flex: 1; + min-height: 0; + display: flex; + flex-direction: column; +} + +.rail-label { + font-size: 10px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.08em; + color: var(--fg-2); + padding: 4px 4px 8px; + display: flex; + align-items: center; + justify-content: space-between; +} + +.rail-sub { + display: flex; + gap: 6px; + margin-top: 6px; + padding: 0 2px; +} + +.preset-list { + list-style: none; + display: flex; + flex-direction: column; + gap: 1px; +} + +.preset { + width: 100%; + display: flex; + align-items: center; + gap: 8px; + padding: 6px 8px; + background: transparent; + border: 1px solid transparent; + border-radius: var(--radius); + text-align: left; + color: var(--fg-1); + font-family: var(--font-mono); + font-size: 11.5px; + transition: background 0.08s ease, border-color 0.08s ease; +} + +.preset:hover { + background: var(--bg-2); +} + +.preset:focus-visible { + outline: none; + border-color: var(--accent); +} + +.preset .m { + flex-shrink: 0; + font-size: 9.5px; + font-weight: 700; + letter-spacing: 0.02em; + padding: 2px 5px; + border-radius: 3px; + background: var(--bg-3); + color: var(--fg-2); + min-width: 34px; + text-align: center; +} + +.preset .m-get { color: var(--get); } +.preset .m-post { color: var(--post); } +.preset .m-put { color: var(--put); } +.preset .m-patch { color: var(--patch); } +.preset .m-delete { color: var(--delete); } +.preset .m-int { color: var(--internal); } + +.preset .p { + flex: 1; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + color: var(--fg-1); +} + +.preset .tag { + font-size: 9px; + font-weight: 600; + padding: 1px 5px; + border-radius: 3px; + background: rgba(239, 68, 68, 0.1); + color: var(--err); + border: 1px solid rgba(239, 68, 68, 0.2); +} + +/* history list */ +.history-list { + list-style: none; + display: flex; + flex-direction: column; + gap: 1px; + overflow-y: auto; + flex: 1; + min-height: 0; +} + +.history-empty { + padding: 10px; + color: var(--fg-3); + font-size: 11px; + text-align: center; +} + +.history-item { + width: 100%; + display: flex; + align-items: center; + gap: 6px; + padding: 5px 8px; + background: transparent; + border: 1px solid transparent; + border-radius: var(--radius); + font-family: var(--font-mono); + font-size: 11px; + color: var(--fg-1); + text-align: left; +} + +.history-item:hover { background: var(--bg-2); } + +.history-item .h-status { + flex-shrink: 0; + font-weight: 600; + font-size: 10px; + min-width: 28px; +} +.history-item.ok .h-status { color: var(--ok); } +.history-item.warn .h-status { color: var(--warn); } +.history-item.err .h-status { color: var(--err); } + +.history-item .h-path { + flex: 1; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + color: var(--fg-2); +} + +.history-item .h-time { + color: var(--fg-3); + font-size: 10px; + flex-shrink: 0; +} + +/* ───────────────────────────────── + Main + ───────────────────────────────── */ +.main { + display: flex; + flex-direction: column; + overflow: hidden; + background: var(--bg-0); +} + +/* URL bar */ +.url-bar { + display: flex; + align-items: stretch; + gap: 8px; + padding: 14px 16px 8px; + flex-shrink: 0; +} + +.method-select { + appearance: none; + -webkit-appearance: none; + background: var(--bg-1); + border: 1px solid var(--border); + color: var(--fg-0); + padding: 0 28px 0 12px; + border-radius: var(--radius); + font-family: var(--font-mono); + font-size: 12px; + font-weight: 600; + height: 34px; + background-image: url("data:image/svg+xml;utf8,"); + background-repeat: no-repeat; + background-position: right 10px center; +} + +.method-select:focus { + outline: none; + border-color: var(--accent); +} + +.url-host { + display: flex; + align-items: center; + padding: 0 10px; + background: var(--bg-1); + border: 1px solid var(--border); + border-right: none; + border-radius: var(--radius) 0 0 var(--radius); + font-family: var(--font-mono); + font-size: 12px; + color: var(--fg-2); + white-space: nowrap; + height: 34px; +} + +.path-input { + flex: 1; + background: var(--bg-1); + border: 1px solid var(--border); + border-left: none; + border-radius: 0 var(--radius) var(--radius) 0; + padding: 0 12px; + font-family: var(--font-mono); + font-size: 12px; + color: var(--fg-0); + height: 34px; + min-width: 0; +} + +.path-input::placeholder { color: var(--fg-3); } + +.path-input:focus, +.url-host:has(+ .path-input:focus) { + outline: none; + border-color: var(--accent); +} + +.btn-primary { + background: var(--accent); + color: var(--accent-fg); + border: 1px solid var(--accent); + padding: 0 18px; + border-radius: var(--radius); + font-size: 12px; + font-weight: 600; + height: 34px; + white-space: nowrap; + transition: background 0.1s ease; +} + +.btn-primary:hover { background: var(--accent-hover); border-color: var(--accent-hover); } +.btn-primary:active { transform: none; } +.btn-primary:disabled { opacity: 0.5; cursor: not-allowed; } + +/* Params row */ +.params-row { + display: flex; + align-items: center; + gap: 8px; + padding: 0 16px 12px; + flex-shrink: 0; +} + +.params-label { + font-family: var(--font-mono); + font-size: 10px; + font-weight: 600; + letter-spacing: 0.06em; + color: var(--fg-2); + text-transform: uppercase; + padding-left: 4px; + min-width: 48px; +} + +.params-input { + flex: 1; + background: var(--bg-1); + border: 1px solid var(--border); + border-radius: var(--radius); + padding: 0 12px; + font-family: var(--font-mono); + font-size: 12px; + color: var(--fg-0); + height: 30px; +} + +.params-input::placeholder { color: var(--fg-3); } + +.params-input:focus { + outline: none; + border-color: var(--accent); +} + +/* Status strip */ +.status-strip { + padding: 10px 16px; + border-top: 1px solid var(--border); + border-bottom: 1px solid var(--border); + background: var(--bg-1); + display: flex; + align-items: center; + gap: 14px; + font-family: var(--font-mono); + font-size: 11px; + min-height: 38px; + flex-shrink: 0; +} + +.ss-idle { color: var(--fg-3); } +.ss-loading { color: var(--fg-2); } + +.ss-item { + display: flex; + align-items: center; + gap: 5px; + color: var(--fg-2); +} + +.ss-item .k { + color: var(--fg-3); + font-size: 10px; + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.ss-item .v { color: var(--fg-1); font-weight: 600; } + +.ss-status { + font-weight: 700; + padding: 2px 8px; + border-radius: var(--radius); + font-size: 11px; +} + +.ss-status.ok { color: var(--ok); background: rgba(34, 197, 94, 0.1); } +.ss-status.warn { color: var(--warn); background: rgba(234, 179, 8, 0.1); } +.ss-status.err { color: var(--err); background: rgba(239, 68, 68, 0.1); } +.ss-status.info { color: var(--info); background: rgba(56, 189, 248, 0.1); } + +/* Response tabs */ +.resp-tabs { + display: flex; + align-items: center; + gap: 2px; + padding: 0 12px; + background: var(--bg-1); + border-bottom: 1px solid var(--border); + flex-shrink: 0; +} + +.tab { + background: transparent; + border: none; + border-bottom: 2px solid transparent; + padding: 9px 12px; + font-size: 11.5px; + font-weight: 500; + color: var(--fg-2); + margin-bottom: -1px; +} + +.tab:hover { color: var(--fg-0); } +.tab.active { color: var(--fg-0); border-bottom-color: var(--accent); } + +.tab-spacer { flex: 1; } + +/* Response body */ +.resp-body { + flex: 1; + overflow: auto; + background: var(--bg-0); + min-height: 0; +} + +.resp-pre { + padding: 14px 16px; + font-family: var(--font-mono); + font-size: 12px; + line-height: 1.55; + color: var(--fg-1); + white-space: pre; + tab-size: 2; +} + +.resp-pre.empty { color: var(--fg-3); } + +/* JSON syntax coloring (set by JS via classed spans) */ +.json-key { color: var(--accent); } +.json-str { color: var(--ok); } +.json-num { color: var(--warn); } +.json-bool { color: var(--patch); } +.json-null { color: var(--err); } + +/* ───────────────────────────────── + Generic buttons + ───────────────────────────────── */ +.btn-ghost { + background: transparent; + border: 1px solid var(--border); + color: var(--fg-2); + padding: 4px 10px; + border-radius: var(--radius); + font-size: 11px; + transition: background 0.08s, color 0.08s, border-color 0.08s; +} + +.btn-ghost:hover { + color: var(--fg-0); + background: var(--bg-2); + border-color: var(--border-strong); +} + +.btn-xs { + padding: 2px 8px; + font-size: 10px; +} + +/* ───────────────────────────────── + Scrollbars (minimal) + ───────────────────────────────── */ +::-webkit-scrollbar { + width: 10px; + height: 10px; +} +::-webkit-scrollbar-track { background: transparent; } +::-webkit-scrollbar-thumb { + background: var(--border); + border: 2px solid var(--bg-0); + border-radius: 10px; +} +::-webkit-scrollbar-thumb:hover { background: var(--border-strong); } + +/* ───────────────────────────────── + Focus + ───────────────────────────────── */ +:focus-visible { + outline: 2px solid var(--accent); + outline-offset: 1px; +} + +button:focus:not(:focus-visible) { outline: none; } diff --git a/static/js/script.js b/static/js/script.js new file mode 100644 index 0000000..6016ccc --- /dev/null +++ b/static/js/script.js @@ -0,0 +1,485 @@ +/* + * plex-api · endpoint tester + * Minimal, no framework. Vanilla DOM. + */ +(() => { + "use strict"; + + // ── DOM ───────────────────────────────────────── + const $ = (sel) => document.querySelector(sel); + const $$ = (sel) => document.querySelectorAll(sel); + + const methodEl = $("#method"); + const pathEl = $("#path-input"); + const paramsEl = $("#params-input"); + const urlHostEl = $("#url-host"); + const sendBtn = $("#btn-send"); + const envChipEl = $("#env-chip"); + const writesChipEl = $("#writes-chip"); + + const statusStripEl = $("#status-strip"); + const respPre = $("#resp-pre"); + const tabsEl = $$(".tab"); + const copyBtn = $("#btn-copy"); + const clearBtn = $("#btn-clear"); + const clearHistBtn = $("#btn-clear-history"); + const historyListEl = $("#history-list"); + + const btnPickFiles = $("#btn-pick-files"); + const btnPickDir = $("#btn-pick-dir"); + const fileInput = $("#fusion-file-input"); + const dirInput = $("#fusion-dir-input"); + + // ── State ─────────────────────────────────────── + const state = { + activeTab: "body", + lastResponse: null, // { body, headers, raw, http_status, elapsed_ms, size_bytes, method, url } + history: [], + maxHistory: 20, + }; + + // ── Boot ──────────────────────────────────────── + loadConfig(); + wireEvents(); + renderHistory(); + + async function loadConfig() { + try { + const r = await fetch("/api/config"); + const cfg = await r.json(); + urlHostEl.textContent = `${cfg.base_url}/`; + + // Environment chip + envChipEl.textContent = cfg.environment === "production" ? "PROD" : "TEST"; + envChipEl.classList.remove("test", "prod"); + envChipEl.classList.add(cfg.is_production ? "prod" : "test"); + envChipEl.title = + `Tenant ${cfg.tenant_id || "(default)"} · ` + + `key:${cfg.has_key ? "✓" : "✗"} ` + + `secret:${cfg.has_secret ? "✓" : "✗"}`; + + // Writes chip — only meaningful in production + if (cfg.is_production) { + writesChipEl.classList.remove("hidden"); + if (cfg.writes_allowed) { + writesChipEl.textContent = "WRITES ON"; + writesChipEl.classList.remove("blocked"); + writesChipEl.classList.add("allowed"); + writesChipEl.title = + "PLEX_ALLOW_WRITES is set. POST/PUT/PATCH/DELETE to " + + "production are ENABLED. Every mutating call hits real " + + "Grace Engineering production data."; + } else { + writesChipEl.textContent = "READ ONLY"; + writesChipEl.classList.remove("allowed"); + writesChipEl.classList.add("blocked"); + writesChipEl.title = + "Production write guard active. POST/PUT/PATCH/DELETE " + + "to production are blocked at the proxy. To enable, set " + + "PLEX_ALLOW_WRITES=1 in the environment and restart."; + } + } else { + writesChipEl.classList.add("hidden"); + } + } catch (e) { + envChipEl.textContent = "offline"; + } + } + + function wireEvents() { + // Send button + sendBtn.addEventListener("click", send); + + // Ctrl/Cmd+Enter to send + document.addEventListener("keydown", (e) => { + if ((e.ctrlKey || e.metaKey) && e.key === "Enter") { + e.preventDefault(); + send(); + } + }); + + // Presets + $$(".preset").forEach((btn) => { + btn.addEventListener("click", () => { + const internal = btn.getAttribute("data-internal"); + if (internal) { + runInternal(internal, btn.querySelector(".p")?.textContent || internal); + return; + } + const m = btn.getAttribute("data-method") || "GET"; + const p = btn.getAttribute("data-path") || ""; + methodEl.value = m; + pathEl.value = p; + pathEl.focus(); + }); + }); + + // Tabs + tabsEl.forEach((tab) => { + tab.addEventListener("click", () => { + tabsEl.forEach((t) => t.classList.remove("active")); + tab.classList.add("active"); + state.activeTab = tab.getAttribute("data-tab"); + renderResponseTab(); + }); + }); + + copyBtn.addEventListener("click", copyResponse); + clearBtn.addEventListener("click", clearResponse); + clearHistBtn.addEventListener("click", () => { + state.history = []; + renderHistory(); + }); + + // Fusion local uploads + if (btnPickFiles && fileInput) { + btnPickFiles.addEventListener("click", () => fileInput.click()); + fileInput.addEventListener("change", handleFileSelect); + } + if (btnPickDir && dirInput) { + btnPickDir.addEventListener("click", () => dirInput.click()); + dirInput.addEventListener("change", handleFileSelect); + } + } + + // ── Core: send via proxy ──────────────────────── + async function send() { + const path = pathEl.value.trim().replace(/^\/+/, ""); + const method = methodEl.value; + if (!path) { + setStatusStrip({ error: "Missing path" }); + pathEl.focus(); + return; + } + + const qs = new URLSearchParams(); + qs.set("path", path); + + if (paramsEl.value.trim()) { + const extra = parseParams(paramsEl.value.trim()); + for (const [k, v] of extra) qs.append(k, v); + } + + const url = `/api/plex/raw?${qs.toString()}`; + + setLoading(true, `${method} ${path}`); + const started = performance.now(); + try { + const r = await fetch(url, { method }); + const data = await r.json(); + const elapsed = Math.round(performance.now() - started); + + const resp = { + method, + path, + http_status: data.http_status ?? 0, + http_reason: data.http_reason || "", + elapsed_ms: data.elapsed_ms ?? elapsed, + size_bytes: data.size_bytes ?? 0, + url: data.url || "", + headers: data.headers || {}, + body: data.body ?? data, + raw: data, + }; + state.lastResponse = resp; + setStatusStripFromResponse(resp); + renderResponseTab(); + pushHistory(resp); + } catch (err) { + state.lastResponse = { + error: err.message, + raw: { error: err.message }, + headers: {}, + body: null, + }; + setStatusStrip({ error: err.message }); + respPre.textContent = `// fetch failed\n${err.message}`; + } finally { + setLoading(false); + } + } + + // ── Internal (non-proxy) endpoints ────────────── + async function runInternal(endpoint, label) { + setLoading(true, `RUN ${label}`); + const started = performance.now(); + try { + const r = await fetch(endpoint); + const data = await r.json(); + const elapsed = Math.round(performance.now() - started); + const text = JSON.stringify(data, null, 2); + + const resp = { + method: "RUN", + path: endpoint, + http_status: r.status, + http_reason: r.statusText, + elapsed_ms: elapsed, + size_bytes: new Blob([text]).size, + url: endpoint, + headers: Object.fromEntries(r.headers.entries()), + body: data, + raw: data, + }; + state.lastResponse = resp; + setStatusStripFromResponse(resp); + renderResponseTab(); + pushHistory(resp); + } catch (err) { + setStatusStrip({ error: err.message }); + respPre.textContent = `// fetch failed\n${err.message}`; + } finally { + setLoading(false); + } + } + + // ── Fusion file upload ────────────────────────── + async function handleFileSelect(e) { + const files = e.target.files; + if (!files || files.length === 0) return; + + const fd = new FormData(); + let added = 0; + for (let i = 0; i < files.length; i++) { + if (files[i].name.toLowerCase().endsWith(".json")) { + fd.append(`file_${i}`, files[i]); + added++; + } + } + if (added === 0) { + setStatusStrip({ error: "No .json files in selection" }); + return; + } + + setLoading(true, `UPLOAD ${added} file${added === 1 ? "" : "s"}`); + const started = performance.now(); + try { + const r = await fetch("/api/fusion/tools", { method: "POST", body: fd }); + const data = await r.json(); + const elapsed = Math.round(performance.now() - started); + const text = JSON.stringify(data, null, 2); + + const resp = { + method: "POST", + path: "/api/fusion/tools", + http_status: r.status, + http_reason: r.statusText, + elapsed_ms: elapsed, + size_bytes: new Blob([text]).size, + url: "/api/fusion/tools", + headers: Object.fromEntries(r.headers.entries()), + body: data, + raw: data, + }; + state.lastResponse = resp; + setStatusStripFromResponse(resp); + renderResponseTab(); + pushHistory(resp); + } catch (err) { + setStatusStrip({ error: err.message }); + respPre.textContent = `// upload failed\n${err.message}`; + } finally { + setLoading(false); + e.target.value = ""; + } + } + + // ── Status strip ──────────────────────────────── + function setStatusStrip({ error } = {}) { + if (error) { + statusStripEl.innerHTML = `ERROR${escapeHtml(error)}`; + return; + } + statusStripEl.innerHTML = `Ready · Ctrl+Enter to send`; + } + + function setStatusStripFromResponse(r) { + const status = r.http_status; + let cls = "info"; + if (status >= 200 && status < 300) cls = "ok"; + else if (status >= 300 && status < 400) cls = "warn"; + else if (status >= 400) cls = "err"; + else if (status === 0) cls = "err"; + + const label = status ? `${status} ${r.http_reason || ""}`.trim() : "NO RESP"; + + statusStripEl.innerHTML = ` + ${escapeHtml(label)} + time${r.elapsed_ms}ms + size${formatBytes(r.size_bytes)} + ${r.method}${escapeHtml(r.path)} + `; + } + + function setLoading(isLoading, label) { + sendBtn.disabled = isLoading; + if (isLoading) { + statusStripEl.innerHTML = `… ${escapeHtml(label || "sending")}`; + respPre.classList.add("empty"); + respPre.textContent = "// waiting for response"; + } + } + + // ── Response rendering ────────────────────────── + function renderResponseTab() { + const r = state.lastResponse; + if (!r) { + respPre.classList.add("empty"); + respPre.textContent = "// Response will appear here"; + return; + } + respPre.classList.remove("empty"); + + if (state.activeTab === "headers") { + const lines = Object.entries(r.headers || {}) + .map(([k, v]) => `${k}: ${v}`) + .join("\n"); + respPre.textContent = lines || "// no headers"; + return; + } + + if (state.activeTab === "raw") { + respPre.textContent = JSON.stringify(r.raw, null, 2); + return; + } + + // body tab — try to render just the body nicely + const body = r.body; + if (body == null) { + respPre.textContent = "// empty body"; + return; + } + if (typeof body === "string") { + respPre.textContent = body; + return; + } + try { + respPre.innerHTML = syntaxHighlight(JSON.stringify(body, null, 2)); + } catch { + respPre.textContent = String(body); + } + } + + function syntaxHighlight(json) { + const esc = escapeHtml(json); + return esc.replace( + /("(\\.|[^&])*?")(\s*:)?|\b(true|false|null)\b|-?\d+(?:\.\d+)?(?:[eE][+-]?\d+)?/g, + (match, strMatch, _c, colon) => { + if (strMatch !== undefined) { + return colon + ? `${strMatch}${colon}` + : `${strMatch}`; + } + if (match === "true" || match === "false") return `${match}`; + if (match === "null") return `${match}`; + return `${match}`; + } + ); + } + + // ── Copy / clear ──────────────────────────────── + async function copyResponse() { + const txt = respPre.textContent || ""; + try { + await navigator.clipboard.writeText(txt); + flashBtn(copyBtn, "Copied"); + } catch { + flashBtn(copyBtn, "Fail"); + } + } + + function clearResponse() { + state.lastResponse = null; + respPre.classList.add("empty"); + respPre.textContent = "// Response will appear here"; + setStatusStrip(); + } + + function flashBtn(btn, text) { + const prev = btn.textContent; + btn.textContent = text; + setTimeout(() => (btn.textContent = prev), 900); + } + + // ── History ───────────────────────────────────── + function pushHistory(r) { + const item = { + method: r.method, + path: r.path, + http_status: r.http_status, + elapsed_ms: r.elapsed_ms, + ts: Date.now(), + snapshot: r, + }; + state.history.unshift(item); + state.history = state.history.slice(0, state.maxHistory); + renderHistory(); + } + + function renderHistory() { + historyListEl.innerHTML = ""; + if (state.history.length === 0) { + const li = document.createElement("li"); + li.className = "history-empty"; + li.textContent = "No requests yet"; + historyListEl.appendChild(li); + return; + } + state.history.forEach((item, idx) => { + const li = document.createElement("li"); + const btn = document.createElement("button"); + let cls = "history-item"; + if (item.http_status >= 200 && item.http_status < 300) cls += " ok"; + else if (item.http_status >= 300 && item.http_status < 400) cls += " warn"; + else cls += " err"; + btn.className = cls; + btn.innerHTML = ` + ${item.http_status || "—"} + ${escapeHtml(item.path)} + ${item.elapsed_ms}ms + `; + btn.addEventListener("click", () => { + state.lastResponse = item.snapshot; + setStatusStripFromResponse(item.snapshot); + renderResponseTab(); + }); + li.appendChild(btn); + historyListEl.appendChild(li); + }); + } + + // ── Helpers ───────────────────────────────────── + function parseParams(s) { + // Accept "k=v&k2=v2" or one-per-line + const out = []; + const chunks = s.split(/[&\n]/); + for (const chunk of chunks) { + const t = chunk.trim(); + if (!t) continue; + const i = t.indexOf("="); + if (i === -1) out.push([t, ""]); + else out.push([t.slice(0, i).trim(), t.slice(i + 1).trim()]); + } + return out; + } + + function escapeHtml(s) { + return String(s) + .replace(/&/g, "&") + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); + } + + function formatBytes(n) { + if (!n) return "0 B"; + const units = ["B", "KB", "MB", "GB"]; + let i = 0; + while (n >= 1024 && i < units.length - 1) { + n /= 1024; + i++; + } + return `${n.toFixed(n >= 10 || i === 0 ? 0 : 1)} ${units[i]}`; + } +})(); diff --git a/supabase_client.py b/supabase_client.py new file mode 100644 index 0000000..41c377a --- /dev/null +++ b/supabase_client.py @@ -0,0 +1,269 @@ +""" +supabase_client.py +Thin Supabase REST client for the Datum ingest layer +Grace Engineering — Datum project +============================================================= +Minimal PostgREST wrapper that talks to Supabase over plain HTTP. +Deliberately avoids the `supabase-py` SDK because its transitive +dependency tree (pyiceberg etc.) requires MSVC on Windows + Python +3.14 and is overkill for the three tables we touch. + +Why a hand-rolled client +------------------------ +- Same HTTP pattern as ``plex_api.py`` — one library to understand +- No compiled deps, installs cleanly on any platform +- Easy to stub in tests (patch ``requests.Session``) +- We only need five verbs: select, insert, upsert, delete, rpc-free + +Credentials come from environment variables loaded via ``bootstrap.py``: + + SUPABASE_URL e.g. ``https://uhmpkprcxrajbtkvqmwg.supabase.co`` + SUPABASE_SERVICE_ROLE_KEY service role JWT (bypasses RLS — server-side only) + +**Never ship the service role key to a browser.** It is intended for +back-end ingest scripts and should never leave the server. +""" +from __future__ import annotations + +import json +import os +from typing import Any, Iterable, Mapping + +import requests + +import bootstrap # noqa: F401 — loads .env.local into os.environ on import + +DEFAULT_TIMEOUT = 30 # seconds + + +class SupabaseConfigError(RuntimeError): + """Raised when SUPABASE_URL or SUPABASE_SERVICE_ROLE_KEY is missing.""" + + +class SupabaseHTTPError(RuntimeError): + """Raised when PostgREST returns a non-2xx response.""" + + def __init__(self, status: int, body: Any, url: str): + self.status = status + self.body = body + self.url = url + super().__init__(f"Supabase {status} on {url}: {body}") + + +class SupabaseClient: + """ + Minimal PostgREST client. + + Parameters + ---------- + url : str | None + Supabase project URL, e.g. ``https://.supabase.co``. Defaults + to the ``SUPABASE_URL`` env var. + service_role_key : str | None + Service role JWT. Defaults to ``SUPABASE_SERVICE_ROLE_KEY``. + timeout : int + Per-request timeout in seconds. Defaults to 30. + + The service role key bypasses RLS. Do not pass it to the browser. + """ + + def __init__( + self, + url: str | None = None, + service_role_key: str | None = None, + timeout: int = DEFAULT_TIMEOUT, + ): + self.url = (url or os.environ.get("SUPABASE_URL", "")).rstrip("/") + self.key = service_role_key or os.environ.get("SUPABASE_SERVICE_ROLE_KEY", "") + self.timeout = timeout + + if not self.url: + raise SupabaseConfigError( + "SUPABASE_URL is not set. Add it to .env.local or the shell env." + ) + if not self.key: + raise SupabaseConfigError( + "SUPABASE_SERVICE_ROLE_KEY is not set. Add it to .env.local " + "(the service role key is server-side only — never ship it " + "to the browser)." + ) + + self._session = requests.Session() + self._session.headers.update( + { + "apikey": self.key, + "Authorization": f"Bearer {self.key}", + "Content-Type": "application/json", + "Accept": "application/json", + } + ) + + # ───────────────────────────────────────────── + # URL building + # ───────────────────────────────────────────── + def _table_url(self, table: str) -> str: + return f"{self.url}/rest/v1/{table}" + + # ───────────────────────────────────────────── + # Response handling + # ───────────────────────────────────────────── + def _handle(self, response: requests.Response) -> Any: + if not response.ok: + try: + body = response.json() + except ValueError: + body = response.text + raise SupabaseHTTPError(response.status_code, body, response.url) + + # 204 No Content (e.g. delete with no return) → empty list + if not response.content: + return [] + + try: + return response.json() + except ValueError: + return response.text + + # ───────────────────────────────────────────── + # Operations + # ───────────────────────────────────────────── + def select( + self, + table: str, + *, + columns: str = "*", + filters: Mapping[str, str] | None = None, + limit: int | None = None, + ) -> list[dict]: + """ + GET /rest/v1/{table}?select=...&&limit=... + + ``filters`` is a mapping of PostgREST filter clauses, e.g. + ``{"library_id": "eq.abc-123"}``. + """ + params: dict[str, str] = {"select": columns} + if filters: + params.update(filters) + if limit is not None: + params["limit"] = str(limit) + + response = self._session.get( + self._table_url(table), params=params, timeout=self.timeout + ) + return self._handle(response) or [] + + def insert( + self, + table: str, + rows: Mapping[str, Any] | Iterable[Mapping[str, Any]], + *, + returning: str = "representation", + ) -> list[dict]: + """ + POST /rest/v1/{table} — insert one row or many. + + ``returning`` is passed through as the ``Prefer: return=`` + header. Defaults to "representation" (return inserted rows). + """ + if isinstance(rows, Mapping): + body = [dict(rows)] + else: + body = [dict(r) for r in rows] + + headers = {"Prefer": f"return={returning}"} + response = self._session.post( + self._table_url(table), + data=json.dumps(body), + headers=headers, + timeout=self.timeout, + ) + return self._handle(response) or [] + + def upsert( + self, + table: str, + rows: Mapping[str, Any] | Iterable[Mapping[str, Any]], + *, + on_conflict: str, + returning: str = "representation", + ) -> list[dict]: + """ + POST with ``Prefer: resolution=merge-duplicates``. + + Parameters + ---------- + on_conflict : str + Column name (or comma-separated columns) that backs a UNIQUE + constraint to resolve against, e.g. ``"fusion_guid"``. + """ + if isinstance(rows, Mapping): + body = [dict(rows)] + else: + body = [dict(r) for r in rows] + + headers = { + "Prefer": f"resolution=merge-duplicates,return={returning}", + } + params = {"on_conflict": on_conflict} + response = self._session.post( + self._table_url(table), + data=json.dumps(body), + headers=headers, + params=params, + timeout=self.timeout, + ) + return self._handle(response) or [] + + def delete( + self, + table: str, + *, + filters: Mapping[str, str], + ) -> list[dict]: + """ + DELETE /rest/v1/{table}? + + ``filters`` is REQUIRED — PostgREST refuses unfiltered deletes by + default and we want to keep it that way to avoid wiping tables. + """ + if not filters: + raise ValueError( + "delete() requires at least one filter — refusing to " + "issue an unfiltered DELETE." + ) + headers = {"Prefer": "return=representation"} + response = self._session.delete( + self._table_url(table), + params=dict(filters), + headers=headers, + timeout=self.timeout, + ) + return self._handle(response) or [] + + def update( + self, + table: str, + values: Mapping[str, Any], + *, + filters: Mapping[str, str], + ) -> list[dict]: + """ + PATCH /rest/v1/{table}? — update matching rows. + + ``filters`` is REQUIRED to prevent accidental full-table updates. + ``values`` is the dict of columns to set. + """ + if not filters: + raise ValueError( + "update() requires at least one filter — refusing to " + "issue an unfiltered PATCH." + ) + headers = {"Prefer": "return=representation"} + response = self._session.patch( + self._table_url(table), + data=json.dumps(dict(values)), + params=dict(filters), + headers=headers, + timeout=self.timeout, + ) + return self._handle(response) or [] diff --git a/sync.py b/sync.py new file mode 100644 index 0000000..99e651d --- /dev/null +++ b/sync.py @@ -0,0 +1,461 @@ +#!/usr/bin/env python +""" +sync.py +Nightly sync CLI — APS cloud-first, local ADC fallback +Grace Engineering — Datum project +============================================================= +Downloads Fusion 360 tool libraries from the Autodesk cloud +(APS Data Management API) and upserts them into Supabase. +Falls back to local ADC-synced files when APS OAuth is +unavailable. + +Usage +----- + # Full sync (APS cloud → Supabase) + py sync.py + + # Dry run — download + validate only, no Supabase writes + py sync.py --dry-run + + # Force local ADC fallback (skip APS entirely) + py sync.py --local + + # Verbose logging + py sync.py -v + +Exit codes +---------- + 0 All libraries synced (or validated, in dry-run mode) + 1 One or more libraries failed validation or sync + 2 Fatal error (no source available, config missing, etc.) + +Scheduling +---------- + # Windows Task Scheduler (daily at 02:00) + schtasks /create /tn "Datum Nightly Sync" ^ + /tr "py C:\\projects\\Datum\\sync.py" /sc daily /st 02:00 + + # Linux cron (daily at 02:00) + 0 2 * * * /opt/datum/sync.py >> /var/log/datum-sync.log 2>&1 +""" +from __future__ import annotations + +import argparse +import logging +import os +import sys +import time +from dataclasses import dataclass, field +from pathlib import Path + +# ── Anchor working directory to the project root ────────── +# Task Scheduler / cron may launch from any CWD. All local +# imports (bootstrap, aps_client, etc.) and .env.local rely +# on CWD being the project root. +_PROJECT_ROOT = Path(__file__).resolve().parent +os.chdir(_PROJECT_ROOT) +if str(_PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(_PROJECT_ROOT)) + +import bootstrap # noqa: E402, F401 — loads .env.local into os.environ + +from aps_client import APSClient, APSAuthError, APSConfigError, APSHTTPError # noqa: E402 +from supabase_client import SupabaseClient # noqa: E402 +from sync_supabase import sync_library, hash_file # noqa: E402 +from tool_library_loader import load_all_libraries, CAM_TOOLS_DIR # noqa: E402 +from enrich import enrich_raw_tools # noqa: E402 +from populate_supply_items import populate_supply_items # noqa: E402 +from validate_library import validate_library, ValidationMode # noqa: E402 + +log = logging.getLogger("datum.sync") + +# Known XWERKS hub IDs (see memory: project_aps_cloud_integration.md) +PROJECT_ID = "a.YnVzaW5lc3M6Z3JhY2Vlbmc0I0QyMDI0MTIyMDg0OTIxNzc3Ng" +CAM_TOOLS_FOLDER = "urn:adsk.wipprod:fs.folder:co.C0zYkNP4TOexre_-hWRhRA" + + +# ───────────────────────────────────────────── +# Result tracking +# ───────────────────────────────────────────── +@dataclass +class LibraryResult: + name: str + status: str # "success" | "skipped" | "fail" + tools: int = 0 + presets: int = 0 + message: str = "" + + +@dataclass +class SyncReport: + source: str # "aps" | "local" + results: list[LibraryResult] = field(default_factory=list) + start_time: float = 0.0 + end_time: float = 0.0 + + @property + def succeeded(self) -> list[LibraryResult]: + return [r for r in self.results if r.status == "success"] + + @property + def failed(self) -> list[LibraryResult]: + return [r for r in self.results if r.status == "fail"] + + @property + def skipped(self) -> list[LibraryResult]: + return [r for r in self.results if r.status == "skipped"] + + @property + def total_tools(self) -> int: + return sum(r.tools for r in self.results) + + @property + def total_presets(self) -> int: + return sum(r.presets for r in self.results) + + @property + def elapsed(self) -> float: + return self.end_time - self.start_time + + def print_summary(self) -> None: + log.info("=" * 60) + log.info("Sync complete — source: %s", self.source) + log.info( + " %d succeeded, %d skipped, %d failed", + len(self.succeeded), + len(self.skipped), + len(self.failed), + ) + log.info( + " Totals: %d tools, %d presets", + self.total_tools, + self.total_presets, + ) + log.info(" Elapsed: %.1fs", self.elapsed) + log.info("=" * 60) + + +# ───────────────────────────────────────────── +# APS cloud sync +# ───────────────────────────────────────────── +def sync_from_aps(*, dry_run: bool = False) -> SyncReport: + """ + Download all cloud tool libraries from APS and sync into Supabase. + Same pipeline as /api/aps/sync but callable without Flask. + """ + report = SyncReport(source="aps", start_time=time.monotonic()) + + aps = APSClient() + aps._require_config() + aps._ensure_token() + + # Supabase client needed for both enrichment (read) and sync (write) + sb = SupabaseClient() + + contents = aps.get_folder_contents(PROJECT_ID, CAM_TOOLS_FOLDER) + + for item in contents: + if item.get("type") != "items": + continue + name = item.get("attributes", {}).get("displayName", "") + if not name.endswith(".json"): + continue + + library_name = name.replace(".json", "") + log.info("── %s ──", library_name) + + # Get storage URN from the tip + try: + item_id = item["id"] + tip = aps.get_item_tip(PROJECT_ID, item_id) + storage_urn = ( + tip.get("relationships", {}) + .get("storage", {}) + .get("data", {}) + .get("id", "") + ) + source_modified_at = ( + tip.get("attributes", {}).get("lastModifiedTime", "") + ) or None + if not storage_urn: + report.results.append(LibraryResult( + library_name, "fail", message="No storage URN in tip", + )) + log.error(" FAIL: no storage URN in tip") + continue + + # Download and parse + tools = aps.download_tool_library(storage_urn) + except (APSHTTPError, Exception) as e: + report.results.append(LibraryResult( + library_name, "fail", message=str(e), + )) + log.error(" FAIL download: %s", e) + continue + + if not tools: + report.results.append(LibraryResult( + library_name, "skipped", message="Empty library", + )) + log.info(" SKIP: empty library") + continue + + # Enrich tools missing product-id from reference catalog + try: + ec = enrich_raw_tools(tools, sb) + if ec["enriched"]: + log.info(" Enriched %d tools from reference catalog", ec["enriched"]) + except Exception as e: + log.warning(" Enrichment failed (non-fatal): %s", e) + + # Validation gate + vr = validate_library( + tools=tools, + library_name=library_name, + mode=ValidationMode.PRODUCTION, + use_api=False, + ) + if not vr.passed: + report.results.append(LibraryResult( + library_name, "fail", + message=f"Validation failed: {len(vr.fails)} issue(s)", + )) + log.error(" FAIL validation: %s", vr.summary()) + for issue in vr.fails: + log.error(" %s: %s", issue.rule, issue.message) + continue + + log.info(" Validated: %s", vr.summary()) + + if dry_run: + report.results.append(LibraryResult( + library_name, "success", + tools=vr.sync_candidate_count, + message="dry-run — validated OK, no write", + )) + log.info(" DRY-RUN: %d tools validated, skipping write", vr.sync_candidate_count) + continue + + # Sync to Supabase + try: + counts = sync_library( + library_name, + tools, + client=sb, + file_path=f"aps://{item_id}", + source_modified_at=source_modified_at, + ) + report.results.append(LibraryResult( + library_name, "success", + tools=counts["tools"], + presets=counts["presets"], + )) + log.info(" OK: %d tools, %d presets", counts["tools"], counts["presets"]) + except Exception as e: + report.results.append(LibraryResult( + library_name, "fail", message=str(e), + )) + log.error(" FAIL sync: %s", e) + + report.end_time = time.monotonic() + return report + + +# ───────────────────────────────────────────── +# Local ADC fallback +# ───────────────────────────────────────────── +def sync_from_local(*, dry_run: bool = False) -> SyncReport: + """ + Load tool libraries from the local ADC sync path and sync into Supabase. + Fallback when APS OAuth is unavailable. + """ + report = SyncReport(source="local", start_time=time.monotonic()) + + log.info("Loading libraries from local path: %s", CAM_TOOLS_DIR) + + if not CAM_TOOLS_DIR.exists(): + log.error("CAMTools directory not found: %s", CAM_TOOLS_DIR) + report.end_time = time.monotonic() + return report + + libraries = load_all_libraries( + CAM_TOOLS_DIR, + abort_on_stale=False, + validate=False, # We run validation ourselves below + ) + + if not libraries: + log.error("No libraries loaded from %s", CAM_TOOLS_DIR) + report.end_time = time.monotonic() + return report + + sb = SupabaseClient() + + for library_name, tools in libraries.items(): + log.info("── %s ──", library_name) + + # Enrich tools missing product-id from reference catalog + try: + ec = enrich_raw_tools(tools, sb) + if ec["enriched"]: + log.info(" Enriched %d tools from reference catalog", ec["enriched"]) + except Exception as e: + log.warning(" Enrichment failed (non-fatal): %s", e) + + # Validation gate + vr = validate_library( + tools=tools, + library_name=library_name, + mode=ValidationMode.PRODUCTION, + use_api=False, + ) + if not vr.passed: + report.results.append(LibraryResult( + library_name, "fail", + message=f"Validation failed: {len(vr.fails)} issue(s)", + )) + log.error(" FAIL validation: %s", vr.summary()) + for issue in vr.fails: + log.error(" %s: %s", issue.rule, issue.message) + continue + + log.info(" Validated: %s", vr.summary()) + + if dry_run: + report.results.append(LibraryResult( + library_name, "success", + tools=vr.sync_candidate_count, + message="dry-run — validated OK, no write", + )) + log.info(" DRY-RUN: %d tools validated, skipping write", vr.sync_candidate_count) + continue + + # Sync to Supabase + try: + file_path = CAM_TOOLS_DIR / f"{library_name}.json" + fh = hash_file(file_path) if file_path.exists() else None + counts = sync_library( + library_name, + tools, + client=sb, + file_path=str(file_path), + file_hash=fh, + ) + report.results.append(LibraryResult( + library_name, "success", + tools=counts["tools"], + presets=counts["presets"], + )) + log.info(" OK: %d tools, %d presets", counts["tools"], counts["presets"]) + except Exception as e: + report.results.append(LibraryResult( + library_name, "fail", message=str(e), + )) + log.error(" FAIL sync: %s", e) + + report.end_time = time.monotonic() + return report + + +# ───────────────────────────────────────────── +# Main +# ───────────────────────────────────────────── +def main(argv: list[str] | None = None) -> int: + parser = argparse.ArgumentParser( + description="Datum nightly sync -- Fusion tool libraries to Supabase", + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Download and validate only, no Supabase writes", + ) + parser.add_argument( + "--local", + action="store_true", + help="Force local ADC fallback (skip APS cloud)", + ) + parser.add_argument( + "-v", "--verbose", + action="store_true", + help="Enable debug-level logging", + ) + parser.add_argument( + "--log-file", + type=str, + default=None, + help="Append logs to this file (in addition to stdout)", + ) + args = parser.parse_args(argv) + + # Logging setup + level = logging.DEBUG if args.verbose else logging.INFO + handlers: list[logging.Handler] = [logging.StreamHandler()] + if args.log_file: + fh = logging.FileHandler(args.log_file, encoding="utf-8") + handlers.append(fh) + logging.basicConfig( + level=level, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=handlers, + ) + + log.info("Datum sync starting%s", " (dry-run)" if args.dry_run else "") + + report: SyncReport | None = None + + if not args.local: + # Try APS cloud first + try: + log.info("Attempting APS cloud sync...") + report = sync_from_aps(dry_run=args.dry_run) + except (APSConfigError, APSAuthError) as e: + log.warning("APS unavailable: %s — falling back to local", e) + except Exception as e: + log.warning("APS error: %s — falling back to local", e) + + if report is None: + # Fallback to local ADC + log.info("Using local ADC path...") + try: + report = sync_from_local(dry_run=args.dry_run) + except Exception as e: + log.critical("Local sync failed: %s", e) + return 2 + + report.print_summary() + + # Post-sync: refresh plex_supply_items staging table (#80). + # Non-fatal — a failure here should not change the sync exit code. + if not args.dry_run and report.succeeded: + try: + sb = SupabaseClient() + pop = populate_supply_items(sb) + log.info( + "Supply-item staging: %d staged, %d skipped, %d failed", + len(pop.staged), len(pop.skipped), len(pop.failed), + ) + except Exception as e: + log.warning("Supply-item staging failed (non-fatal): %s", e) + + if not report.results: + log.error("No libraries processed from any source") + return 2 + + if report.failed: + log.error( + "Failed libraries: %s", + ", ".join(r.name for r in report.failed), + ) + return 1 + + return 0 + + +def cli() -> None: + """Console-script entry point (called by ``datum-sync`` after pip install).""" + sys.exit(main()) + + +if __name__ == "__main__": + cli() diff --git a/sync_supabase.py b/sync_supabase.py new file mode 100644 index 0000000..0f0bd3e --- /dev/null +++ b/sync_supabase.py @@ -0,0 +1,525 @@ +""" +sync_supabase.py +Fusion 360 JSON → Supabase ingest +Grace Engineering — Datum project +============================================================= +Reads Fusion 360 tool-library JSON files, applies the eight +normalization rules documented in the Supabase Schema Design +(Notion · 2026-04-08), and upserts the three core tables +(``libraries``, ``tools``, ``cutting_presets``) in the dedicated +``datum`` Supabase project. + +Pipeline +-------- +1. Load a library (filename → list of raw tool dicts) with + ``tool_library_loader.load_library``. +2. Filter out holders and probes (Rule 6). +3. For each remaining tool, build a normalized row: + - Unit convert inches → mm on all FLOAT geometry (Rule 1). + - Strip leading/trailing whitespace on ``product_id``; preserve + internal characters (Rule 2). + - Carry JSON nulls through as SQL NULL (Rule 5). + - Carry ``shaft.segments`` as JSONB passthrough, NULL if absent + (Rule 7). + - Use ``.get("comment")`` for post-process comment (Rule 8). +4. Upsert the library row on ``library_name``, capture its id. +5. Upsert all tool rows on ``fusion_guid`` in one batch, capture ids + keyed by fusion_guid. +6. For each tool, flush its existing presets (DELETE WHERE tool_id), + then bulk-insert the freshly normalized preset rows. This is a + cleaner model than trying to upsert per-preset when the vendor + doesn't provide a stable preset identity. + +The module is pure data — it does not touch Plex. Downstream, +``build_supply_item_payload`` (#3) will read normalized rows from +the ``tools`` table and push them to Plex. +""" +from __future__ import annotations + +import hashlib +import logging +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + +from supabase_client import SupabaseClient + +log = logging.getLogger(__name__) + +# ───────────────────────────────────────────── +# Constants +# ───────────────────────────────────────────── +INCHES_TO_MM = 25.4 + +EXCLUDED_TYPES = frozenset({"holder", "probe"}) + +# Fusion 360 uses legacy type names that don't match modern shop terminology. +# Map lowercased Fusion type → corrected display name. +TYPE_RENAMES: dict[str, str] = { + "slot mill": "slitting saw", +} + +# Geometry fields that are dimensional (convert inches → mm) vs dimensionless +# (counts, flags, angles — carry as-is). +GEOMETRY_LENGTH_FIELDS = { + "DC": "geo_dc", + "OAL": "geo_oal", + "LCF": "geo_lcf", + "LB": "geo_lb", + "SFDM": "geo_sfdm", + "RE": "geo_re", + "tip-diameter": "geo_tip_diameter", + "tip-length": "geo_tip_length", + "tip-offset": "geo_tip_offset", + "assemblyGaugeLength": "geo_assembly_gauge_length", + "shoulder-diameter": "geo_shoulder_diameter", + "shoulder-length": "geo_shoulder_length", +} + +# Dimensionless geometry fields (counts, angles, booleans, etc.) — never +# scaled by unit conversion. +GEOMETRY_DIMENSIONLESS_FIELDS = { + "NOF": "geo_nof", + "SIG": "geo_sig", # point angle in degrees + "NT": "geo_nt", + "TA": "geo_ta", # taper angle in degrees + "TA2": "geo_ta2", + "TP": "geo_tp", + "thread-profile-angle": "geo_thread_profile_angle", +} + +GEOMETRY_BOOL_FIELDS = { + "HAND": "geo_hand", + "CSP": "geo_csp", +} + +# Post-process field map (int/bool/text, never unit-scaled). +POST_PROCESS_INT_FIELDS = { + "number": "pp_number", + "turret": "pp_turret", + "diameter-offset": "pp_diameter_offset", + "length-offset": "pp_length_offset", +} + +POST_PROCESS_BOOL_FIELDS = { + "live": "pp_live", + "break-control": "pp_break_control", + "manual-tool-change": "pp_manual_tool_change", +} + +# Preset FLOAT fields — all nullable, carry JSON null through unchanged. +# Keys are Fusion JSON field names, values are Supabase column names. +PRESET_FLOAT_FIELDS = { + "v_c": "v_c", + "v_f": "v_f", + "f_z": "f_z", + "f_n": "f_n", + "n": "n", + "n_ramp": "n_ramp", + "ramp-angle": "ramp_angle", + "v_f_plunge": "v_f_plunge", + "v_f_ramp": "v_f_ramp", + "v_f_leadIn": "v_f_lead_in", + "v_f_leadOut": "v_f_lead_out", + "v_f_retract": "v_f_retract", + "v_f_transition": "v_f_transition", +} + +PRESET_BOOL_FIELDS = { + "use-feed-per-revolution": "use_feed_per_revolution", + "use-stepdown": "use_stepdown", + "use-stepover": "use_stepover", +} + + +# ───────────────────────────────────────────── +# Normalization primitives +# ───────────────────────────────────────────── +def normalize_product_id(raw: Any) -> str | None: + """ + Rule 2 — strip leading/trailing whitespace only. Never strip + internal characters. Sandvik ships ``"RA216.33-0845-CK04P 1640"`` + with a real internal space that must be preserved. + """ + if raw is None: + return None + if not isinstance(raw, str): + raw = str(raw) + stripped = raw.strip() + return stripped or None + + +def normalize_preset_guid(raw: Any) -> str | None: + """ + Rule 3 — strip surrounding curly braces from Sandvik preset GUIDs: + ``"{6a2d224-...}"`` → ``"6a2d224-..."``. Leave everything else + alone. + """ + if raw is None: + return None + if not isinstance(raw, str): + raw = str(raw) + s = raw.strip() + if len(s) >= 2 and s.startswith("{") and s.endswith("}"): + s = s[1:-1] + return s or None + + +def unit_scale(value: Any, is_inches: bool) -> float | None: + """ + Rule 1 — multiply dimensional values by 25.4 when the library + declares ``unit == "inches"``. Pass JSON nulls through unchanged. + Booleans and non-numeric strings return None (not a dimensional + value). + """ + if value is None: + return None + if isinstance(value, bool): + # bool is a subclass of int in Python — reject explicitly. + return None + try: + as_float = float(value) + except (TypeError, ValueError): + return None + if is_inches: + return as_float * INCHES_TO_MM + return as_float + + +def _maybe_float(value: Any) -> float | None: + """Coerce to float or return None (for dimensionless geometry + presets).""" + if value is None: + return None + if isinstance(value, bool): + return None + try: + return float(value) + except (TypeError, ValueError): + return None + + +def _maybe_int(value: Any) -> int | None: + if value is None: + return None + if isinstance(value, bool): + return None + try: + return int(value) + except (TypeError, ValueError): + return None + + +def _maybe_bool(value: Any) -> bool | None: + if value is None: + return None + if isinstance(value, bool): + return value + return None + + +def _maybe_str(value: Any) -> str | None: + if value is None: + return None + if not isinstance(value, str): + return str(value) + return value + + +# ───────────────────────────────────────────── +# Tool row builder +# ───────────────────────────────────────────── +def build_tool_row(tool: dict) -> dict: + """ + Map one raw Fusion tool dict to a ``tools`` row dict. + Does NOT include ``library_id`` — caller fills that in after the + library row has been upserted and has a real id. + + Applies Rules 1, 2, 7, 8. Rules 5 and 6 are applied at the batch + level (see ``sync_library``). + """ + unit_raw = tool.get("unit") + is_inches = isinstance(unit_raw, str) and unit_raw.lower() == "inches" + + row: dict[str, Any] = { + "fusion_guid": _maybe_str(tool.get("guid")), + "vendor": _maybe_str(tool.get("vendor")) or "", + "product_id": normalize_product_id(tool.get("product-id")) or "", + "description": _maybe_str(tool.get("description")) or "", + "type": TYPE_RENAMES.get( + (_maybe_str(tool.get("type")) or "").lower(), + _maybe_str(tool.get("type")) or "", + ), + "bmc": _maybe_str(tool.get("BMC")), + "grade": _maybe_str(tool.get("GRADE")), + # reference_guid is observed as integer 0 in Harvey/Helical — store as string + "reference_guid": ( + str(tool["reference_guid"]) if "reference_guid" in tool else None + ), + "unit_original": _maybe_str(unit_raw), + "product_link": _maybe_str(tool.get("product-link")), + "tapered_type": _maybe_str(tool.get("tapered-type")), + } + + # Geometry — length fields go through unit_scale; dimensionless pass through. + geometry = tool.get("geometry") or {} + for fusion_key, col in GEOMETRY_LENGTH_FIELDS.items(): + row[col] = unit_scale(geometry.get(fusion_key), is_inches) + for fusion_key, col in GEOMETRY_DIMENSIONLESS_FIELDS.items(): + row[col] = _maybe_float(geometry.get(fusion_key)) + for fusion_key, col in GEOMETRY_BOOL_FIELDS.items(): + row[col] = _maybe_bool(geometry.get(fusion_key)) + + # Post-process — Rule 8: use .get("comment"), not direct key access. + pp = tool.get("post-process") or {} + for fusion_key, col in POST_PROCESS_INT_FIELDS.items(): + row[col] = _maybe_int(pp.get(fusion_key)) + for fusion_key, col in POST_PROCESS_BOOL_FIELDS.items(): + row[col] = _maybe_bool(pp.get(fusion_key)) + row["pp_comment"] = _maybe_str(pp.get("comment")) + + # Rule 7 — shaft.segments as JSONB passthrough, NULL if absent. Do not error. + shaft = tool.get("shaft") + if isinstance(shaft, dict) and "segments" in shaft: + row["shaft_segments"] = shaft["segments"] + else: + row["shaft_segments"] = None + + return row + + +# ───────────────────────────────────────────── +# Preset row builder +# ───────────────────────────────────────────── +def build_preset_rows(tool: dict, tool_id: str) -> list[dict]: + """ + Map ``tool.start-values.presets`` → list of preset row dicts. + ``tool_id`` is the Supabase UUID of the parent tool row. + + Applies Rules 3 (brace strip) and 5 (JSON nulls). + """ + start_values = tool.get("start-values") or {} + presets = start_values.get("presets") or [] + if not isinstance(presets, list): + return [] + + rows: list[dict] = [] + for raw in presets: + if not isinstance(raw, dict): + continue + material = raw.get("material") or {} + if not isinstance(material, dict): + material = {} + + # Preset GUID can appear under either 'guid' or 'presetGuid' across vendors. + preset_guid = raw.get("presetGuid") or raw.get("guid") + + row: dict[str, Any] = { + "tool_id": tool_id, + "preset_guid": normalize_preset_guid(preset_guid), + "name": _maybe_str(raw.get("name")), + "description": _maybe_str(raw.get("description")), + "material_category": _maybe_str(material.get("category")), + "material_query": _maybe_str(material.get("query")), + "material_use_hardness": _maybe_bool(material.get("useHardness")), + "tool_coolant": _maybe_str(raw.get("tool-coolant")), + } + + for fusion_key, col in PRESET_FLOAT_FIELDS.items(): + # Rule 5 — preserve explicit JSON null, do not substitute 0. + if fusion_key in raw: + row[col] = _maybe_float(raw.get(fusion_key)) + else: + row[col] = None + + for fusion_key, col in PRESET_BOOL_FIELDS.items(): + if fusion_key in raw: + row[col] = _maybe_bool(raw.get(fusion_key)) + else: + row[col] = None + + rows.append(row) + + return rows + + +# ───────────────────────────────────────────── +# File hashing +# ───────────────────────────────────────────── +def hash_file(path: Path) -> str: + """SHA-256 of file contents, used for change detection on libraries.""" + h = hashlib.sha256() + with open(path, "rb") as f: + for chunk in iter(lambda: f.read(65536), b""): + h.update(chunk) + return h.hexdigest() + + +# ───────────────────────────────────────────── +# Top-level ingest +# ───────────────────────────────────────────── +def _pick_vendor(tools: list[dict]) -> str | None: + """ + Rule 4 — preserve raw vendor casing. Most libraries are single-vendor, + so we take the first tool's vendor string as-is. Job-specific libraries + (e.g. BROTHER SPEEDIO ALUMINUM) may be mixed; that's fine — the + library-level ``vendor`` is a hint, the per-tool ``vendor`` column + is the source of truth. + """ + for t in tools: + v = t.get("vendor") + if isinstance(v, str) and v.strip(): + return v + return None + + +def _pick_unit_original(tools: list[dict]) -> str | None: + """First declared unit wins for the library metadata.""" + for t in tools: + u = t.get("unit") + if isinstance(u, str) and u.strip(): + return u + return None + + +def sync_library( + library_name: str, + tools: list[dict], + *, + client: SupabaseClient, + file_path: str | None = None, + file_hash: str | None = None, + source_modified_at: str | None = None, +) -> dict[str, int]: + """ + Upsert one library worth of Fusion tools into Supabase. + + Parameters + ---------- + library_name : str + Filename stem or other unique name. UPSERT key on libraries. + tools : list[dict] + Raw ``data`` array from a Fusion JSON file. Holders/probes are + filtered out inside this function (Rule 6). + client : SupabaseClient + Configured Supabase client. + file_path : str | None + Full on-disk path. Stored for audit; not required. + file_hash : str | None + SHA-256 of the source file. Stored on the library row so future + runs can skip unchanged files. + source_modified_at : str | None + ISO 8601 timestamp from APS ``lastModifiedTime``. When the + library file was last saved in Fusion Hub. + + Returns + ------- + dict + ``{"tools": , "presets": }``. + """ + # Rule 6 — sync filter. + filtered = [t for t in tools if t.get("type") not in EXCLUDED_TYPES] + + vendor = _pick_vendor(filtered) + unit_original = _pick_unit_original(filtered) + + # ── 1. Library row upsert ────────────────────────────────────── + library_row = { + "library_name": library_name, + "vendor": vendor, + "file_path": file_path, + "file_hash": file_hash, + "tool_count": len(filtered), + "unit_original": unit_original, + "ingested_at": datetime.now(timezone.utc).isoformat(), + } + if source_modified_at: + library_row["source_modified_at"] = source_modified_at + lib_result = client.upsert( + "libraries", + library_row, + on_conflict="library_name", + ) + if not lib_result: + raise RuntimeError(f"Library upsert returned no rows for {library_name!r}") + library_id = lib_result[0]["id"] + log.info( + "Library upserted: %s → id=%s (%d tools after filter)", + library_name, + library_id, + len(filtered), + ) + + # ── 2. Tool rows upsert ──────────────────────────────────────── + tool_rows: list[dict] = [] + for raw in filtered: + row = build_tool_row(raw) + if not row.get("fusion_guid"): + log.warning( + "Skipping tool with no guid in %s: %s", + library_name, + row.get("product_id") or row.get("description") or "", + ) + continue + row["library_id"] = library_id + tool_rows.append(row) + + if not tool_rows: + log.info("No tool rows to upsert for %s", library_name) + return {"tools": 0, "presets": 0} + + tools_result = client.upsert( + "tools", + tool_rows, + on_conflict="fusion_guid", + ) + # Build a guid → db id lookup for preset parenting. + guid_to_id = {r["fusion_guid"]: r["id"] for r in tools_result} + log.info("Tools upserted: %d rows for %s", len(tools_result), library_name) + + # ── 3. Presets: flush + bulk insert per tool ─────────────────── + total_presets = 0 + for raw in filtered: + guid = raw.get("guid") + if guid not in guid_to_id: + continue + tool_id = guid_to_id[guid] + # Flush existing presets for this tool so a re-sync never double-inserts. + client.delete( + "cutting_presets", + filters={"tool_id": f"eq.{tool_id}"}, + ) + preset_rows = build_preset_rows(raw, tool_id=tool_id) + if preset_rows: + client.insert("cutting_presets", preset_rows) + total_presets += len(preset_rows) + + log.info("Presets inserted: %d rows for %s", total_presets, library_name) + return {"tools": len(tools_result), "presets": total_presets} + + +def sync_library_file( + path: Path, + *, + client: SupabaseClient, + library_name: str | None = None, +) -> dict[str, int]: + """ + Convenience — load a single ``.json`` file from disk, apply the + stale-file guard via ``tool_library_loader.load_library``, and + sync it into Supabase. + """ + from tool_library_loader import load_library + + tools = load_library(path) + if tools is None: + raise RuntimeError(f"load_library returned None for {path}") + + name = library_name or path.stem + file_hash = hash_file(path) + return sync_library( + name, + tools, + client=client, + file_path=str(path), + file_hash=file_hash, + ) diff --git a/sync_tool_inventory.py b/sync_tool_inventory.py new file mode 100644 index 0000000..2b18a65 --- /dev/null +++ b/sync_tool_inventory.py @@ -0,0 +1,342 @@ +#!/usr/bin/env python +""" +sync_tool_inventory.py +Plex -> Supabase nightly sync of tool on-hand quantities. +Grace Engineering -- Datum project -- Issue #75 +============================================================= +For every row in ``tools`` with a non-NULL ``plex_supply_item_id``, call +``inventory/v1-beta1/inventory-history/item-adjustments`` and update: + + qty_on_hand -- sum of adjustment quantities (quantity is pre-signed by Plex) + qty_tracked -- TRUE iff Plex returned >=1 adjustment record + qty_synced_at -- now() + +See docs/Plex_API_Reference.md Section 3.6 for the transactionType sign +table. The contract: ``quantity`` is delivered pre-signed, so we sum it +directly -- no lookup from transactionType required. + +Usage +----- + py sync_tool_inventory.py # run the sync + py sync_tool_inventory.py --dry-run # fetch + compute, no Supabase writes + py sync_tool_inventory.py -v # debug logging + py sync_tool_inventory.py --log-file f.log + +Exit codes +---------- + 0 All linked tools synced + 1 One or more tools failed (partial) + 2 Fatal: config missing, no tools linked, etc. +""" +from __future__ import annotations + +import argparse +import logging +import os +import sys +import time +from dataclasses import dataclass, field +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + +_PROJECT_ROOT = Path(__file__).resolve().parent +if str(_PROJECT_ROOT) not in sys.path: + sys.path.insert(0, str(_PROJECT_ROOT)) + +import bootstrap # noqa: E402, F401 -- loads .env.local + +from plex_api import PlexClient, API_KEY, API_SECRET, TENANT_ID, USE_TEST # noqa: E402 +from supabase_client import SupabaseClient # noqa: E402 + +log = logging.getLogger("datum.sync_tool_inventory") + +# History window. 2015-01-01 predates Grace's Plex go-live; using a wide +# window means we capture the full running balance, not just recent deltas. +# Plex requires full ISO with Z suffix -- plain dates return 400. +DEFAULT_START = "2015-01-01T00:00:00Z" + +# Known transactionType values as of 2026-04-15 probe. Unknown values are +# still summed (quantity is pre-signed) but we log a warning so new types +# can be reviewed and added to the docs. +KNOWN_TRANSACTION_TYPES = frozenset({ + "PO Receipt", + "Checkout", + "Correction", + "Check In", +}) + + +# --------------------------------------------------------------- +# Result tracking +# --------------------------------------------------------------- +@dataclass +class ToolResult: + fusion_guid: str + plex_supply_item_id: str + status: str # "success" | "fail" + qty_on_hand: float | None = None + qty_tracked: bool | None = None + n_records: int = 0 + message: str = "" + + +@dataclass +class SyncReport: + results: list[ToolResult] = field(default_factory=list) + unknown_transaction_types: set[str] = field(default_factory=set) + start_time: float = 0.0 + end_time: float = 0.0 + + @property + def succeeded(self) -> list[ToolResult]: + return [r for r in self.results if r.status == "success"] + + @property + def failed(self) -> list[ToolResult]: + return [r for r in self.results if r.status == "fail"] + + @property + def tracked(self) -> list[ToolResult]: + return [r for r in self.succeeded if r.qty_tracked] + + @property + def elapsed(self) -> float: + return self.end_time - self.start_time + + def print_summary(self) -> None: + log.info("=" * 60) + log.info("Tool inventory sync complete") + log.info( + " %d succeeded (%d with history, %d empty), %d failed", + len(self.succeeded), + len(self.tracked), + len(self.succeeded) - len(self.tracked), + len(self.failed), + ) + log.info(" Elapsed: %.1fs", self.elapsed) + if self.unknown_transaction_types: + log.warning( + " Unknown transactionType values encountered: %s " + "-- review and update docs/Plex_API_Reference.md Section 3.6", + sorted(self.unknown_transaction_types), + ) + log.info("=" * 60) + + +# --------------------------------------------------------------- +# Pure helpers (easy to unit-test) +# --------------------------------------------------------------- +def compute_qty(records: list[dict]) -> tuple[float, bool]: + """Return (qty_on_hand, qty_tracked) for a list of adjustment records. + + ``quantity`` is delivered pre-signed by Plex (positive for receipts/ + check-ins, negative for checkouts), so we sum directly. Missing or + non-numeric quantities are skipped silently. + + qty_tracked is TRUE iff ``records`` is non-empty -- a linked tool with + zero history is a valid, distinct state from "not linked". + """ + total = 0.0 + for r in records: + q = r.get("quantity") + if q is None: + continue + try: + total += float(q) + except (TypeError, ValueError): + continue + return total, len(records) > 0 + + +def collect_unknown_types(records: list[dict]) -> set[str]: + """Return the set of transactionType values not in KNOWN_TRANSACTION_TYPES.""" + unknown = set() + for r in records: + tt = r.get("transactionType") + if tt is not None and tt not in KNOWN_TRANSACTION_TYPES: + unknown.add(tt) + return unknown + + +def _unwrap_records(body: Any) -> list[dict]: + """Plex inventory-history returns either a bare list or {data: [...]}.""" + if isinstance(body, list): + return body + if isinstance(body, dict): + data = body.get("data") + if isinstance(data, list): + return data + return [] + + +# --------------------------------------------------------------- +# Main sync +# --------------------------------------------------------------- +def sync_tool_inventory( + plex: PlexClient, + sb: SupabaseClient, + *, + start_date: str = DEFAULT_START, + end_date: str | None = None, + dry_run: bool = False, +) -> SyncReport: + """Sync qty_on_hand / qty_tracked / qty_synced_at from Plex to Supabase. + + Fetches every ``tools`` row with a non-NULL ``plex_supply_item_id``, + calls ``inventory/v1-beta1/inventory-history/item-adjustments`` for + each, and writes the computed totals back. Returns a SyncReport. + """ + report = SyncReport(start_time=time.monotonic()) + + if end_date is None: + end_date = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + # 1. Fetch linked tools from Supabase + linked = sb.select( + "tools", + columns="fusion_guid,plex_supply_item_id", + filters={"plex_supply_item_id": "not.is.null"}, + ) + log.info("Found %d linked tool(s) in Supabase", len(linked)) + + if not linked: + report.end_time = time.monotonic() + return report + + # 2. For each, fetch adjustments and update Supabase + for tool in linked: + fusion_guid = tool["fusion_guid"] + plex_id = tool["plex_supply_item_id"] + + env = plex.get_envelope( + "inventory", "v1-beta1", "inventory-history/item-adjustments", + params={"ItemId": plex_id, "StartDate": start_date, "EndDate": end_date}, + ) + + if not env["ok"]: + report.results.append(ToolResult( + fusion_guid=fusion_guid, + plex_supply_item_id=plex_id, + status="fail", + message=f"Plex {env['status']}: {env.get('error') or env.get('body')}", + )) + log.error(" FAIL %s: Plex HTTP %s", fusion_guid, env["status"]) + continue + + records = _unwrap_records(env["body"]) + qty_on_hand, qty_tracked = compute_qty(records) + unknown = collect_unknown_types(records) + if unknown: + report.unknown_transaction_types.update(unknown) + log.warning( + " %s: unknown transactionType(s) %s (still summing; pre-signed quantity)", + fusion_guid, sorted(unknown), + ) + + result = ToolResult( + fusion_guid=fusion_guid, + plex_supply_item_id=plex_id, + status="success", + qty_on_hand=qty_on_hand, + qty_tracked=qty_tracked, + n_records=len(records), + ) + + if dry_run: + log.info( + " DRY-RUN %s: qty=%s tracked=%s n=%d", + fusion_guid, qty_on_hand, qty_tracked, len(records), + ) + report.results.append(result) + continue + + # 3. Write back to Supabase + try: + sb.update( + "tools", + { + "qty_on_hand": qty_on_hand, + "qty_tracked": qty_tracked, + "qty_synced_at": datetime.now(timezone.utc).isoformat(), + }, + filters={"fusion_guid": f"eq.{fusion_guid}"}, + ) + log.info( + " OK %s: qty=%s tracked=%s n=%d", + fusion_guid, qty_on_hand, qty_tracked, len(records), + ) + report.results.append(result) + except Exception as e: + result.status = "fail" + result.message = f"Supabase update: {e}" + log.error(" FAIL %s: Supabase update: %s", fusion_guid, e) + report.results.append(result) + + report.end_time = time.monotonic() + return report + + +# --------------------------------------------------------------- +# CLI +# --------------------------------------------------------------- +def main(argv: list[str] | None = None) -> int: + parser = argparse.ArgumentParser( + description="Datum -- sync tool on-hand qty from Plex to Supabase", + ) + parser.add_argument("--dry-run", action="store_true", + help="Fetch and compute, but do not write to Supabase") + parser.add_argument("-v", "--verbose", action="store_true", + help="Enable debug-level logging") + parser.add_argument("--log-file", type=str, default=None, + help="Append logs to this file (in addition to stdout)") + parser.add_argument("--start-date", type=str, default=DEFAULT_START, + help=f"ISO start date (default: {DEFAULT_START})") + args = parser.parse_args(argv) + + level = logging.DEBUG if args.verbose else logging.INFO + handlers: list[logging.Handler] = [logging.StreamHandler()] + if args.log_file: + handlers.append(logging.FileHandler(args.log_file, encoding="utf-8")) + logging.basicConfig( + level=level, + format="%(asctime)s [%(levelname)s] %(name)s: %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=handlers, + ) + + log.info("Tool inventory sync starting%s", " (dry-run)" if args.dry_run else "") + + try: + plex = PlexClient(API_KEY, API_SECRET, TENANT_ID, use_test=USE_TEST) + sb = SupabaseClient() + except Exception as e: + log.critical("Config error: %s", e) + return 2 + + try: + report = sync_tool_inventory( + plex, sb, + start_date=args.start_date, + dry_run=args.dry_run, + ) + except Exception as e: + log.critical("Fatal sync error: %s", e) + return 2 + + report.print_summary() + + if not report.results: + log.warning("No linked tools to sync -- populate tools.plex_supply_item_id first") + return 2 + + return 1 if report.failed else 0 + + +def cli() -> None: + """Console-script entry point (``datum-sync-inventory``).""" + sys.exit(main()) + + +if __name__ == "__main__": + cli() diff --git a/templates/index.html b/templates/index.html new file mode 100644 index 0000000..79ae2fa --- /dev/null +++ b/templates/index.html @@ -0,0 +1,142 @@ + + + + + + plex-api · endpoint tester + + + +
+ + + + +
+ +
+ +
https://test.connect.plex.com/
+ + +
+ + +
+ + +
+ + +
+ Ready · Ctrl+Enter to send +
+ + +
+ + + +
+ + +
+ + +
+
// Response will appear here
+
+
+
+ + + + diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..805c46c --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,121 @@ +""" +Shared pytest fixtures and setup for the plex-api test suite. + +Sets PLEX_API_KEY and PLEX_API_SECRET to dummy values BEFORE any test +imports app.py — otherwise the import-time guard at the bottom of +plex_api.py will reject empty credentials and break test collection. + +Tests must NEVER hit the real Plex API. All requests should be patched +or routed through fake clients. +""" +import os +import sys +from pathlib import Path + +# Make the project root importable so `import plex_api` works regardless +# of where pytest is invoked from. +ROOT = Path(__file__).resolve().parent.parent +if str(ROOT) not in sys.path: + sys.path.insert(0, str(ROOT)) + +# Inject dummy credentials before any module-level reads happen. +os.environ.setdefault("PLEX_API_KEY", "test-key-do-not-use") +os.environ.setdefault("PLEX_API_SECRET", "test-secret-do-not-use") + +# Dummy Supabase credentials so supabase_client imports cleanly under pytest. +# Tests NEVER hit the real Supabase project — they use FakeSupabaseClient or +# patch ``requests.Session``. +os.environ.setdefault("SUPABASE_URL", "https://test.supabase.co") +os.environ.setdefault("SUPABASE_SERVICE_ROLE_KEY", "test-supabase-key-do-not-use") + +# Dummy APS credentials so aps_client imports cleanly. Tests patch +# requests.Session — no real Autodesk calls are made. +os.environ.setdefault("APS_CLIENT_ID", "test-aps-client-id") +os.environ.setdefault("APS_CLIENT_SECRET", "test-aps-client-secret") + + +# ───────────────────────────────────────────── +# Shared fixtures +# ───────────────────────────────────────────── +import pytest + + +class FakePlexClient: + """ + Drop-in replacement for plex_api.PlexClient that records calls + and returns canned responses without ever touching the network. + + Two parallel canned-response stores: + - ``set_response(resource, body)`` — body returned by both ``get()`` + and ``get_envelope()`` (the latter wraps the body in a synthetic + 200 OK envelope). + - ``set_envelope(resource, envelope)`` — full envelope dict returned + by ``get_envelope()`` only. Use this to test error branches like + 401/403/network failure. + + If both are set for the same resource, ``set_envelope`` wins for + ``get_envelope()`` calls and ``set_response`` is used for ``get()``. + """ + + def __init__(self, base="https://test.connect.plex.com"): + self.base = base + self.headers = { + "X-Plex-Connect-Api-Key": "test-key", + "X-Plex-Connect-Api-Secret": "test-secret", + "Content-Type": "application/json", + "Accept": "application/json", + } + self.calls = [] + self._responses = {} + self._envelopes = {} + self._default = None + + def set_response(self, resource, payload): + """Canned body for a specific resource string (last segment).""" + self._responses[resource] = payload + + def set_envelope(self, resource, envelope): + """Canned full envelope (overrides set_response for get_envelope).""" + self._envelopes[resource] = envelope + + def set_default(self, payload): + """Canned body for any resource not explicitly set.""" + self._default = payload + + def _lookup_body(self, resource): + if resource in self._responses: + return self._responses[resource] + head = resource.split("/")[0] + if head in self._responses: + return self._responses[head] + return self._default + + def get(self, collection, version, resource, params=None): + self.calls.append((collection, version, resource, params)) + return self._lookup_body(resource) + + def get_envelope(self, collection, version, resource, params=None): + self.calls.append((collection, version, resource, params)) + # Explicit envelope override wins + if resource in self._envelopes: + return self._envelopes[resource] + head = resource.split("/")[0] + if head in self._envelopes: + return self._envelopes[head] + # Otherwise synthesize a 200 OK envelope wrapping the canned body + body = self._lookup_body(resource) + return { + "ok": True, + "status": 200, + "reason": "OK", + "body": body, + "elapsed_ms": 0, + "url": f"{self.base}/{collection}/{version}/{resource}", + "error": None, + } + + +@pytest.fixture +def fake_client(): + """A fresh FakePlexClient for each test.""" + return FakePlexClient() diff --git a/tests/fixtures/plex_mock/expected_supply_items.json b/tests/fixtures/plex_mock/expected_supply_items.json new file mode 100644 index 0000000..7e0b583 --- /dev/null +++ b/tests/fixtures/plex_mock/expected_supply_items.json @@ -0,0 +1,24 @@ +{ + "supply_items_post_shape": { + "required_fields": [ + "category", + "description", + "group", + "inventoryUnit", + "supplyItemNumber", + "type" + ], + "forbidden_fields": [ + "id", + "posted_to_plex_at" + ], + "field_types": { + "category": "str", + "description": "str", + "group": "str", + "inventoryUnit": "str", + "supplyItemNumber": "str", + "type": "str" + } + } +} diff --git a/tests/test_app_routes.py b/tests/test_app_routes.py new file mode 100644 index 0000000..36db3b3 --- /dev/null +++ b/tests/test_app_routes.py @@ -0,0 +1,557 @@ +""" +Tests for the Flask routes in app.py. + +These are smoke tests — they verify that each route registers, responds +with the right shape, and doesn't blow up. The actual Plex client and +diagnostics are mocked so no real network calls happen. +""" +from unittest.mock import patch, MagicMock + +import pytest + +# conftest.py has already injected dummy PLEX_API_KEY/SECRET into env +import app as app_module + + +@pytest.fixture +def client(): + """Flask test client.""" + app_module.app.config["TESTING"] = True + return app_module.app.test_client() + + +# ───────────────────────────────────────────── +# Index +# ───────────────────────────────────────────── +class TestIndex: + def test_index_returns_html(self, client): + rv = client.get("/") + assert rv.status_code == 200 + assert b"" in rv.data + assert b"plex-api" in rv.data + + +# ───────────────────────────────────────────── +# /api/config +# ───────────────────────────────────────────── +class TestConfig: + def test_config_returns_expected_keys(self, client): + rv = client.get("/api/config") + assert rv.status_code == 200 + body = rv.get_json() + for key in ("base_url", "environment", "tenant_id", "has_key", "has_secret"): + assert key in body + + def test_config_environment_is_test_or_prod(self, client): + rv = client.get("/api/config") + body = rv.get_json() + assert body["environment"] in ("test", "production") + + def test_config_reports_credentials_present(self, client): + rv = client.get("/api/config") + body = rv.get_json() + # conftest.py injects dummy values, so both should be True + assert body["has_key"] is True + assert body["has_secret"] is True + + +# ───────────────────────────────────────────── +# /api/diagnostics/tenant +# ───────────────────────────────────────────── +class TestDiagnosticsTenant: + def test_returns_success_envelope(self, client): + with patch.object(app_module, "tenant_whoami") as mock_whoami: + mock_whoami.return_value = { + "match": "g5", + "summary": "test summary", + "configured_tenant_label": "G5", + } + rv = client.get("/api/diagnostics/tenant") + assert rv.status_code == 200 + body = rv.get_json() + assert body["status"] == "success" + assert body["data"]["match"] == "g5" + assert body["data"]["summary"] == "test summary" + + def test_passes_configured_tenant_id_to_whoami(self, client): + with patch.object(app_module, "tenant_whoami") as mock_whoami: + mock_whoami.return_value = {"match": "g5", "summary": ""} + client.get("/api/diagnostics/tenant") + mock_whoami.assert_called_once() + # Second positional arg is the configured tenant ID + call_args = mock_whoami.call_args + assert call_args[0][1] == app_module.TENANT_ID + + def test_returns_500_on_exception(self, client): + with patch.object(app_module, "tenant_whoami", side_effect=RuntimeError("boom")): + rv = client.get("/api/diagnostics/tenant") + assert rv.status_code == 500 + body = rv.get_json() + assert body["status"] == "error" + assert "boom" in body["message"] + + +# ───────────────────────────────────────────── +# /api/diagnostics/tenants/list +# ───────────────────────────────────────────── +class TestDiagnosticsTenantsList: + def test_returns_list_payload(self, client): + with patch.object(app_module, "list_tenants") as mock_list: + mock_list.return_value = [{"id": "abc", "code": "TEST"}] + rv = client.get("/api/diagnostics/tenants/list") + assert rv.status_code == 200 + body = rv.get_json() + assert body["status"] == "success" + assert body["data"] == [{"id": "abc", "code": "TEST"}] + + +# ───────────────────────────────────────────── +# /api/diagnostics/tenants/ +# ───────────────────────────────────────────── +class TestDiagnosticsTenantById: + def test_passes_id_to_get_tenant(self, client): + with patch.object(app_module, "get_tenant") as mock_get: + mock_get.return_value = {"id": "abc-123", "name": "Test"} + rv = client.get("/api/diagnostics/tenants/abc-123") + assert rv.status_code == 200 + mock_get.assert_called_once() + assert mock_get.call_args[0][1] == "abc-123" + + +# ───────────────────────────────────────────── +# /api/plex/raw — proxy +# ───────────────────────────────────────────── +class TestPlexRawProxy: + def test_missing_path_returns_400(self, client): + rv = client.get("/api/plex/raw") + assert rv.status_code == 400 + body = rv.get_json() + assert "Missing required" in body["message"] + + def test_forwards_get_to_plex(self, client): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.reason = "OK" + mock_response.ok = True + mock_response.content = b'{"items":[]}' + mock_response.json.return_value = {"items": []} + mock_response.headers = {"Content-Type": "application/json"} + mock_response.url = "https://test.connect.plex.com/mdm/v1/parts" + + with patch.object(app_module.requests, "request", return_value=mock_response) as mock_req: + rv = client.get("/api/plex/raw?path=mdm/v1/parts") + assert rv.status_code == 200 + body = rv.get_json() + assert body["status"] == "success" + assert body["http_status"] == 200 + assert body["method"] == "GET" + assert body["body"] == {"items": []} + + # Verify the proxy actually forwarded to the right URL with the + # client's auth headers + mock_req.assert_called_once() + call_kwargs = mock_req.call_args.kwargs + assert "mdm/v1/parts" in call_kwargs["url"] + assert "X-Plex-Connect-Api-Key" in call_kwargs["headers"] + + def test_strips_path_query_param_from_forwarded_params(self, client): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.reason = "OK" + mock_response.ok = True + mock_response.content = b"{}" + mock_response.json.return_value = {} + mock_response.headers = {} + mock_response.url = "https://test.connect.plex.com/mdm/v1/parts" + + with patch.object(app_module.requests, "request", return_value=mock_response) as mock_req: + client.get("/api/plex/raw?path=mdm/v1/parts&limit=5&status=Active") + forwarded = mock_req.call_args.kwargs["params"] + assert "path" not in forwarded + assert forwarded["limit"] == "5" + assert forwarded["status"] == "Active" + + def test_error_response_propagates_status(self, client): + mock_response = MagicMock() + mock_response.status_code = 403 + mock_response.reason = "Forbidden" + mock_response.ok = False + mock_response.content = b'{"error":"forbidden"}' + mock_response.json.return_value = {"error": "forbidden"} + mock_response.headers = {} + mock_response.url = "https://test.connect.plex.com/tooling/v1/tools" + + with patch.object(app_module.requests, "request", return_value=mock_response): + rv = client.get("/api/plex/raw?path=tooling/v1/tools") + assert rv.status_code == 200 # envelope status, not the inner one + body = rv.get_json() + assert body["status"] == "error" + assert body["http_status"] == 403 + + +# ───────────────────────────────────────────── +# /api/plex/discover +# ───────────────────────────────────────────── +class TestDiscover: + def test_calls_discover_all(self, client): + with patch.object(app_module, "discover_all") as mock_discover: + mock_discover.return_value = [{"endpoint": "x", "status": 200}] + rv = client.get("/api/plex/discover") + assert rv.status_code == 200 + body = rv.get_json() + assert body["status"] == "success" + assert body["data"] == [{"endpoint": "x", "status": 200}] + + +# ───────────────────────────────────────────── +# Production write guard +# ───────────────────────────────────────────── +class TestProductionWriteGuard: + """ + The /api/plex/raw proxy must refuse mutating methods (POST/PUT/PATCH/ + DELETE) when running against a production Plex environment unless + PLEX_ALLOW_WRITES is explicitly enabled. + + These tests temporarily flip the module-level IS_PRODUCTION and + WRITES_ALLOWED constants since they're computed at import time from + env vars (which conftest.py has already locked in). + """ + + def test_get_always_allowed_in_production(self, client, monkeypatch): + monkeypatch.setattr(app_module, "IS_PRODUCTION", True) + monkeypatch.setattr(app_module, "WRITES_ALLOWED", False) + + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.reason = "OK" + mock_response.ok = True + mock_response.content = b"{}" + mock_response.json.return_value = {} + mock_response.headers = {} + mock_response.url = "https://connect.plex.com/mdm/v1/tenants" + + with patch.object(app_module.requests, "request", return_value=mock_response): + rv = client.get("/api/plex/raw?path=mdm/v1/tenants") + assert rv.status_code == 200 + assert rv.get_json()["status"] == "success" + + def test_post_blocked_in_production_without_writes_allowed(self, client, monkeypatch): + monkeypatch.setattr(app_module, "IS_PRODUCTION", True) + monkeypatch.setattr(app_module, "WRITES_ALLOWED", False) + + rv = client.post("/api/plex/raw?path=mdm/v1/parts", json={"foo": "bar"}) + assert rv.status_code == 403 + body = rv.get_json() + assert body["status"] == "error" + assert body["guard"] == "PLEX_ALLOW_WRITES" + assert body["is_production"] is True + assert body["writes_allowed"] is False + assert "PLEX_ALLOW_WRITES" in body["message"] + assert "POST" in body["message"] + + def test_put_blocked_in_production_without_writes_allowed(self, client, monkeypatch): + monkeypatch.setattr(app_module, "IS_PRODUCTION", True) + monkeypatch.setattr(app_module, "WRITES_ALLOWED", False) + + rv = client.put("/api/plex/raw?path=mdm/v1/parts/x", json={"foo": "bar"}) + assert rv.status_code == 403 + assert rv.get_json()["guard"] == "PLEX_ALLOW_WRITES" + + def test_patch_blocked_in_production_without_writes_allowed(self, client, monkeypatch): + monkeypatch.setattr(app_module, "IS_PRODUCTION", True) + monkeypatch.setattr(app_module, "WRITES_ALLOWED", False) + + rv = client.patch("/api/plex/raw?path=mdm/v1/parts/x", json={"foo": "bar"}) + assert rv.status_code == 403 + + def test_delete_blocked_in_production_without_writes_allowed(self, client, monkeypatch): + monkeypatch.setattr(app_module, "IS_PRODUCTION", True) + monkeypatch.setattr(app_module, "WRITES_ALLOWED", False) + + rv = client.delete("/api/plex/raw?path=mdm/v1/parts/x") + assert rv.status_code == 403 + + def test_post_allowed_in_production_when_writes_enabled(self, client, monkeypatch): + monkeypatch.setattr(app_module, "IS_PRODUCTION", True) + monkeypatch.setattr(app_module, "WRITES_ALLOWED", True) + + mock_response = MagicMock() + mock_response.status_code = 201 + mock_response.reason = "Created" + mock_response.ok = True + mock_response.content = b'{"id":"new"}' + mock_response.json.return_value = {"id": "new"} + mock_response.headers = {} + mock_response.url = "https://connect.plex.com/mdm/v1/parts" + + with patch.object(app_module.requests, "request", return_value=mock_response): + rv = client.post("/api/plex/raw?path=mdm/v1/parts", json={"foo": "bar"}) + assert rv.status_code == 200 # envelope is 200; inner http_status is 201 + body = rv.get_json() + assert body["status"] == "success" + assert body["http_status"] == 201 + + def test_post_allowed_in_test_environment_regardless(self, client, monkeypatch): + monkeypatch.setattr(app_module, "IS_PRODUCTION", False) + monkeypatch.setattr(app_module, "WRITES_ALLOWED", False) + + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.reason = "OK" + mock_response.ok = True + mock_response.content = b"{}" + mock_response.json.return_value = {} + mock_response.headers = {} + mock_response.url = "https://test.connect.plex.com/mdm/v1/parts" + + with patch.object(app_module.requests, "request", return_value=mock_response): + rv = client.post("/api/plex/raw?path=mdm/v1/parts", json={"foo": "bar"}) + assert rv.status_code == 200 + + def test_config_endpoint_exposes_guard_state(self, client): + rv = client.get("/api/config") + body = rv.get_json() + assert "is_production" in body + assert "writes_allowed" in body + assert isinstance(body["is_production"], bool) + assert isinstance(body["writes_allowed"], bool) + + +# ───────────────────────────────────────────── +# Helper function _is_write_blocked +# ───────────────────────────────────────────── +class TestIsWriteBlocked: + def test_get_never_blocked_in_production(self, monkeypatch): + monkeypatch.setattr(app_module, "IS_PRODUCTION", True) + monkeypatch.setattr(app_module, "WRITES_ALLOWED", False) + blocked, reason = app_module._is_write_blocked("GET") + assert blocked is False + assert reason == "" + + def test_get_never_blocked_in_test(self, monkeypatch): + monkeypatch.setattr(app_module, "IS_PRODUCTION", False) + monkeypatch.setattr(app_module, "WRITES_ALLOWED", False) + blocked, reason = app_module._is_write_blocked("GET") + assert blocked is False + + def test_post_blocked_in_production_default(self, monkeypatch): + monkeypatch.setattr(app_module, "IS_PRODUCTION", True) + monkeypatch.setattr(app_module, "WRITES_ALLOWED", False) + blocked, reason = app_module._is_write_blocked("POST") + assert blocked is True + assert "PLEX_ALLOW_WRITES" in reason + + def test_post_unblocked_in_test(self, monkeypatch): + monkeypatch.setattr(app_module, "IS_PRODUCTION", False) + monkeypatch.setattr(app_module, "WRITES_ALLOWED", False) + blocked, reason = app_module._is_write_blocked("POST") + assert blocked is False + + def test_post_unblocked_when_writes_enabled(self, monkeypatch): + monkeypatch.setattr(app_module, "IS_PRODUCTION", True) + monkeypatch.setattr(app_module, "WRITES_ALLOWED", True) + blocked, reason = app_module._is_write_blocked("POST") + assert blocked is False + + def test_method_case_insensitive(self, monkeypatch): + monkeypatch.setattr(app_module, "IS_PRODUCTION", True) + monkeypatch.setattr(app_module, "WRITES_ALLOWED", False) + blocked, _ = app_module._is_write_blocked("post") + assert blocked is True + blocked, _ = app_module._is_write_blocked("Delete") + assert blocked is True + + +# ───────────────────────────────────────────── +# Helper function _is_production_base +# ───────────────────────────────────────────── +class TestIsProductionBase: + """Guard resolves IS_PRODUCTION via exact match, not substring. + + Historical heuristic ``"test." not in client.base`` was bypassable once + PLEX_BASE_URL (PR #96) let operators set client.base to any string — + a URL containing "test." silently disarmed the guard. The fix requires + an exact match against PLEX_PROD_URL, so unrecognised URLs fail closed. + """ + + def test_production_url_matches(self): + assert app_module._is_production_base("https://connect.plex.com") is True + + def test_production_url_trailing_slash_matches(self): + assert app_module._is_production_base("https://connect.plex.com/") is True + + def test_production_url_uppercase_matches(self): + assert app_module._is_production_base("https://CONNECT.PLEX.COM") is True + + def test_test_environment_is_not_production(self): + assert app_module._is_production_base("https://test.connect.plex.com") is False + + def test_mock_url_is_not_production(self): + assert app_module._is_production_base("http://127.0.0.1:8080") is False + + def test_url_containing_test_substring_not_production(self): + # Previously `"test." not in base` would return False here too, but + # it would ALSO treat any custom proxy or mock containing "test." + # in the hostname as non-production. That part was fine. The + # dangerous case is the inverse (below). + assert app_module._is_production_base("http://my-test-host:8080") is False + + def test_adversarial_base_not_matching_prod_fails_closed(self): + # An operator-controlled URL that neither is prod nor contains + # "test." must still be treated as non-production (writes allowed + # only when PLEX_ALLOW_WRITES=1 elsewhere in the stack). The guard + # no longer lies about this class of URL. + for base in [ + "https://evil.example.com", + "https://connect.plex.com.attacker.example", # suffix trick + "https://connect-plex-com", + "", + ]: + assert app_module._is_production_base(base) is False + + +# ───────────────────────────────────────────── +# /api/plex/supply_items (issue #2) +# ───────────────────────────────────────────── +class TestSupplyItemsExtractor: + def test_route_calls_extract_supply_items(self, client): + with patch.object(app_module, "extract_supply_items") as mock_extract: + mock_extract.return_value = [ + {"category": "Tools & Inserts", "supplyItemNumber": "990910"}, + {"category": "Tools & Inserts", "supplyItemNumber": "ABC123"}, + ] + rv = client.get("/api/plex/supply_items") + assert rv.status_code == 200 + body = rv.get_json() + assert body["status"] == "success" + assert body["count"] == 2 + assert len(body["data"]) == 2 + mock_extract.assert_called_once() + + def test_route_returns_none_safely_when_extractor_returns_none(self, client): + with patch.object(app_module, "extract_supply_items", return_value=None): + rv = client.get("/api/plex/supply_items") + assert rv.status_code == 200 + body = rv.get_json() + assert body["count"] == 0 + assert body["data"] == [] + + +# ───────────────────────────────────────────── +# /api/fusion/tools/stats (testing harness) +# ───────────────────────────────────────────── +class TestFusionToolsStats: + SAMPLE_LIBS = { + "BROTHER 879": [ + {"type": "flat end mill", "vendor": "HARVEY TOOL"}, + {"type": "flat end mill", "vendor": "Garr Tool"}, + {"type": "drill", "vendor": "OSG"}, + {"type": "holder", "vendor": "Big Daishowa"}, + {"type": "probe", "vendor": "Renishaw"}, + ], + "BROTHER 880": [ + {"type": "bull nose end mill", "vendor": "HARVEY TOOL"}, + {"type": "holder", "vendor": "Big Daishowa"}, + ], + } + + def test_stats_aggregates_across_libraries(self, client): + with patch.object(app_module, "load_all_libraries", return_value=self.SAMPLE_LIBS): + rv = client.get("/api/fusion/tools/stats") + assert rv.status_code == 200 + body = rv.get_json() + assert body["status"] == "success" + assert body["library_count"] == 2 + assert body["total_records"] == 7 + # 2 holders + 1 probe = 3 non-consumable; 4 consumables + assert body["consumable_count"] == 4 + assert body["non_consumable_count"] == 3 + assert body["global_type_counts"]["flat end mill"] == 2 + assert body["global_type_counts"]["holder"] == 2 + assert body["global_type_counts"]["probe"] == 1 + assert body["global_vendor_counts"]["HARVEY TOOL"] == 2 + + def test_stats_handles_empty_libraries(self, client): + with patch.object(app_module, "load_all_libraries", return_value={}): + rv = client.get("/api/fusion/tools/stats") + assert rv.status_code == 200 + body = rv.get_json() + assert body["library_count"] == 0 + assert body["total_records"] == 0 + assert body["consumable_count"] == 0 + + +# ───────────────────────────────────────────── +# /api/fusion/tools/consumables (testing harness) +# ───────────────────────────────────────────── +class TestFusionToolsConsumables: + def test_excludes_holders_and_probes(self, client): + libs = { + "lib1": [ + {"guid": "g1", "type": "flat end mill", "vendor": "HARVEY TOOL", "product-id": "990910", "description": "5/8 SQ"}, + {"guid": "g2", "type": "drill", "vendor": "OSG", "product-id": "OSG-1234", "description": "1/4 drill"}, + {"guid": "g3", "type": "holder", "vendor": "Big Daishowa", "product-id": "BIG-1", "description": "BT30"}, + {"guid": "g4", "type": "probe", "vendor": "Renishaw", "product-id": "RNS-1", "description": "Probe"}, + ], + } + with patch.object(app_module, "load_all_libraries", return_value=libs): + rv = client.get("/api/fusion/tools/consumables") + assert rv.status_code == 200 + body = rv.get_json() + assert body["status"] == "success" + assert body["count"] == 2 + guids = {c["guid"] for c in body["data"]} + assert guids == {"g1", "g2"} + + def test_normalizes_field_names_to_snake_case(self, client): + libs = { + "lib1": [ + {"guid": "g1", "type": "drill", "vendor": "OSG", "product-id": "X-1", "description": "drill"}, + ], + } + with patch.object(app_module, "load_all_libraries", return_value=libs): + rv = client.get("/api/fusion/tools/consumables") + body = rv.get_json() + assert "product_id" in body["data"][0] # NOT product-id + assert body["data"][0]["product_id"] == "X-1" + + +# ───────────────────────────────────────────── +# Stdout encoding regression test +# ───────────────────────────────────────────── +class TestStdoutEncoding: + """ + Pin down the fix for the cp1252 print() bug. + + Without sys.stdout.reconfigure(encoding='utf-8') at startup, any + print() containing a non-ASCII character (like → or —) inside a + Flask request handler raises UnicodeEncodeError on a Windows + cp1252 console and turns into a 500 from the route's exception + handler. The fix lives at the top of app.py. + + These tests verify both the reconfigure call and that + plex_api.py's extract_* functions no longer print Unicode + arrows in their summary lines. + """ + + def test_app_module_attempts_stdout_reconfigure(self): + # The reconfigure call is wrapped in try/except so it can't + # raise even on Python builds that don't expose the method, + # but the call itself should be present in the source. + import inspect + src = inspect.getsource(app_module) + assert "sys.stdout.reconfigure" in src + assert 'encoding="utf-8"' in src or "encoding='utf-8'" in src + + def test_no_unicode_arrows_in_plex_api_print_statements(self): + import plex_api + import inspect + src = inspect.getsource(plex_api) + # The Unicode right-arrow → (U+2192) crashes Windows cp1252. + # Use ASCII -> instead. This is a belt-and-suspenders check + # in addition to the stdout reconfigure. + assert "\u2192" not in src, ( + "plex_api.py contains a Unicode arrow (U+2192) which will " + "raise UnicodeEncodeError on Windows cp1252 stdout. Replace " + "with ASCII '->'." + ) diff --git a/tests/test_aps_client.py b/tests/test_aps_client.py new file mode 100644 index 0000000..4809930 --- /dev/null +++ b/tests/test_aps_client.py @@ -0,0 +1,414 @@ +""" +Tests for aps_client.py — APS OAuth + Data Management client. + +Focuses on: + - Config errors when env vars are missing + - OAuth URL generation + - Token exchange and refresh mechanics + - Token persistence to file + - Hub/project/folder traversal (mocked HTTP) + - .tools file download via signed S3 URL + - Storage ID parsing (URN and URL formats) + - Auto-refresh on expired tokens + +All HTTP traffic is patched — no real network calls. +""" +from __future__ import annotations + +import io +import json +import time +import zipfile +from pathlib import Path +from unittest.mock import MagicMock, patch, call + +import pytest + +from aps_client import ( + APSClient, + APSConfigError, + APSAuthError, + APSHTTPError, + TokenStore, +) + + +# ───────────────────────────────────────────── +# Token store — in-memory (path=None) +# ───────────────────────────────────────────── +class TestTokenStore: + def test_initially_invalid(self): + store = TokenStore(path=None) + assert not store.is_valid + + def test_update_makes_valid(self): + store = TokenStore(path=None) + store.update({ + "access_token": "tok123", + "refresh_token": "ref456", + "expires_in": 3600, + }) + assert store.is_valid + assert store.access_token == "tok123" + assert store.refresh_token == "ref456" + + def test_expired_token_is_invalid(self): + store = TokenStore(path=None) + store.update({ + "access_token": "tok", + "expires_in": 0, # already expired (minus 60s buffer) + }) + assert not store.is_valid + + def test_clear_invalidates(self): + store = TokenStore(path=None) + store.update({"access_token": "tok", "expires_in": 3600}) + store.clear() + assert not store.is_valid + + +# ───────────────────────────────────────────── +# Token store — file persistence +# ───────────────────────────────────────────── +class TestTokenPersistence: + def test_save_and_load(self, tmp_path): + token_file = tmp_path / ".aps_tokens.json" + + # Save tokens + store1 = TokenStore(path=token_file) + store1.update({ + "access_token": "persisted-at", + "refresh_token": "persisted-rt", + "expires_in": 3600, + }) + assert token_file.exists() + + # Load into a new store + store2 = TokenStore(path=token_file) + assert store2.access_token == "persisted-at" + assert store2.refresh_token == "persisted-rt" + assert store2.is_valid + + def test_clear_deletes_file(self, tmp_path): + token_file = tmp_path / ".aps_tokens.json" + store = TokenStore(path=token_file) + store.update({"access_token": "tok", "expires_in": 3600}) + assert token_file.exists() + store.clear() + assert not token_file.exists() + + def test_missing_file_is_ok(self, tmp_path): + token_file = tmp_path / "nonexistent.json" + store = TokenStore(path=token_file) + assert not store.is_valid + + def test_corrupt_file_is_ok(self, tmp_path): + token_file = tmp_path / ".aps_tokens.json" + token_file.write_text("not json", encoding="utf-8") + store = TokenStore(path=token_file) + assert not store.is_valid + + +# ───────────────────────────────────────────── +# Config errors +# ───────────────────────────────────────────── +class TestConfigErrors: + def test_missing_client_id_raises(self, monkeypatch): + monkeypatch.delenv("APS_CLIENT_ID", raising=False) + monkeypatch.delenv("APS_CLIENT_SECRET", raising=False) + monkeypatch.setattr("aps_client.APS_CLIENT_ID", "") + monkeypatch.setattr("aps_client.APS_CLIENT_SECRET", "s") + client = APSClient(client_id="", client_secret="s", token_path=None) + with pytest.raises(APSConfigError, match="APS_CLIENT_ID"): + client._require_config() + + def test_missing_client_secret_raises(self, monkeypatch): + monkeypatch.setattr("aps_client.APS_CLIENT_ID", "id") + monkeypatch.setattr("aps_client.APS_CLIENT_SECRET", "") + client = APSClient(client_id="id", client_secret="", token_path=None) + with pytest.raises(APSConfigError, match="APS_CLIENT_SECRET"): + client._require_config() + + def test_explicit_args_work(self): + client = APSClient( + client_id="my-id", + client_secret="my-secret", + callback_url="http://localhost:9999/cb", + token_path=None, + ) + assert client.client_id == "my-id" + assert client.client_secret == "my-secret" + assert client.callback_url == "http://localhost:9999/cb" + + +# ───────────────────────────────────────────── +# OAuth URL generation +# ───────────────────────────────────────────── +class TestAuthorizeURL: + def test_url_contains_client_id(self): + client = APSClient(client_id="test-id", client_secret="test-secret", token_path=None) + url = client.get_authorize_url() + assert "client_id=test-id" in url + assert "response_type=code" in url + assert "scope=data%3Aread" in url + + def test_url_contains_callback(self): + client = APSClient( + client_id="id", + client_secret="s", + callback_url="http://example.com/cb", + token_path=None, + ) + url = client.get_authorize_url() + assert "redirect_uri=http%3A%2F%2Fexample.com%2Fcb" in url + + +# ───────────────────────────────────────────── +# Token exchange +# ───────────────────────────────────────────── +class TestExchangeCode: + def test_successful_exchange(self): + client = APSClient(client_id="id", client_secret="secret", token_path=None) + mock_resp = MagicMock() + mock_resp.ok = True + mock_resp.json.return_value = { + "access_token": "at", + "refresh_token": "rt", + "expires_in": 3600, + } + + with patch.object(client._session, "post", return_value=mock_resp): + data = client.exchange_code("authcode123") + + assert data["access_token"] == "at" + assert client.tokens.is_valid + + def test_failed_exchange_raises(self): + client = APSClient(client_id="id", client_secret="secret", token_path=None) + mock_resp = MagicMock() + mock_resp.ok = False + mock_resp.status_code = 401 + mock_resp.text = "invalid_grant" + + with patch.object(client._session, "post", return_value=mock_resp): + with pytest.raises(APSAuthError, match="401"): + client.exchange_code("badcode") + + +class TestRefreshToken: + def test_refresh_updates_tokens(self): + client = APSClient(client_id="id", client_secret="secret", token_path=None) + client.tokens.refresh_token = "old-rt" + + mock_resp = MagicMock() + mock_resp.ok = True + mock_resp.json.return_value = { + "access_token": "new-at", + "refresh_token": "new-rt", + "expires_in": 3600, + } + + with patch.object(client._session, "post", return_value=mock_resp): + client.refresh_access_token() + + assert client.tokens.access_token == "new-at" + assert client.tokens.refresh_token == "new-rt" + + def test_no_refresh_token_raises(self): + client = APSClient(client_id="id", client_secret="secret", token_path=None) + with pytest.raises(APSAuthError, match="No refresh token"): + client.refresh_access_token() + + +# ───────────────────────────────────────────── +# Data Management API calls +# ───────────────────────────────────────────── +def _authed_client() -> APSClient: + """Return a client with a valid (fake) token, no file persistence.""" + client = APSClient(client_id="id", client_secret="secret", token_path=None) + client.tokens.update({ + "access_token": "valid-token", + "expires_in": 3600, + }) + return client + + +class TestGetHubs: + def test_returns_hub_list(self): + client = _authed_client() + mock_resp = MagicMock() + mock_resp.ok = True + mock_resp.json.return_value = { + "data": [{"id": "hub1", "type": "hubs"}] + } + + with patch.object(client._session, "get", return_value=mock_resp): + hubs = client.get_hubs() + + assert len(hubs) == 1 + assert hubs[0]["id"] == "hub1" + + +class TestGetProjects: + def test_returns_project_list(self): + client = _authed_client() + mock_resp = MagicMock() + mock_resp.ok = True + mock_resp.json.return_value = { + "data": [{"id": "proj1", "type": "projects"}] + } + + with patch.object(client._session, "get", return_value=mock_resp): + projects = client.get_projects("hub1") + + assert len(projects) == 1 + assert projects[0]["id"] == "proj1" + + +class TestHTTPErrors: + def test_non_2xx_raises_aps_http_error(self): + client = _authed_client() + mock_resp = MagicMock() + mock_resp.ok = False + mock_resp.status_code = 403 + mock_resp.json.return_value = {"reason": "Forbidden"} + mock_resp.url = "https://developer.api.autodesk.com/project/v1/hubs" + + with patch.object(client._session, "get", return_value=mock_resp): + with pytest.raises(APSHTTPError) as exc_info: + client.get_hubs() + assert exc_info.value.status == 403 + + +# ───────────────────────────────────────────── +# Storage ID parsing +# ───────────────────────────────────────────── +class TestParseStorageId: + def test_urn_format(self): + bucket, key = APSClient._parse_storage_id( + "urn:adsk.objects:os.object:wip.dm.prod/abc-123.json" + ) + assert bucket == "wip.dm.prod" + assert key == "abc-123.json" + + def test_url_format(self): + bucket, key = APSClient._parse_storage_id( + "https://developer.api.autodesk.com/oss/v2/buckets/wip.dm.prod/objects/abc-123.json?scopes=global" + ) + assert bucket == "wip.dm.prod" + assert key == "abc-123.json" + + def test_unknown_format_returns_empty(self): + bucket, key = APSClient._parse_storage_id("something-else") + assert bucket == "" + assert key == "" + + +# ───────────────────────────────────────────── +# Tool library download + parsing (signed S3) +# ───────────────────────────────────────────── +class TestDownloadToolLibrary: + def _make_tools_zip(self, tools: list[dict]) -> bytes: + """Create a fake .tools ZIP containing a JSON file.""" + buf = io.BytesIO() + with zipfile.ZipFile(buf, "w") as zf: + zf.writestr("library.json", json.dumps({"data": tools})) + return buf.getvalue() + + def _mock_signed_download(self, client, content_bytes): + """ + Mock the two-step signed S3 download: + 1. GET /oss/v2/buckets/.../signeds3download → {"url": "https://s3..."} + 2. GET https://s3... → raw bytes + """ + sign_resp = MagicMock() + sign_resp.ok = True + sign_resp.json.return_value = {"url": "https://s3.signed.example.com/file"} + + dl_resp = MagicMock() + dl_resp.ok = True + dl_resp.content = content_bytes + + def side_effect(url, **kwargs): + if "signeds3download" in url: + return sign_resp + return dl_resp + + return patch.object(client._session, "get", side_effect=side_effect) + + def test_zip_extraction(self): + client = _authed_client() + tools_data = [ + {"guid": "a", "type": "flat end mill", "vendor": "Acme"}, + {"guid": "b", "type": "ball end mill", "vendor": "Acme"}, + ] + zip_bytes = self._make_tools_zip(tools_data) + + with self._mock_signed_download(client, zip_bytes): + result = client.download_tool_library( + "urn:adsk.objects:os.object:wip.dm.prod/lib.json" + ) + + assert len(result) == 2 + assert result[0]["guid"] == "a" + + def test_raw_json_fallback(self): + """If the file isn't a ZIP, try parsing as raw JSON.""" + client = _authed_client() + raw = json.dumps({"data": [{"guid": "c", "type": "drill"}]}).encode() + + with self._mock_signed_download(client, raw): + result = client.download_tool_library( + "urn:adsk.objects:os.object:wip.dm.prod/lib.json" + ) + + assert len(result) == 1 + assert result[0]["type"] == "drill" + + def test_empty_data_returns_empty_list(self): + client = _authed_client() + raw = json.dumps({"version": 1}).encode() # no "data" key + + with self._mock_signed_download(client, raw): + result = client.download_tool_library( + "urn:adsk.objects:os.object:wip.dm.prod/lib.json" + ) + + assert result == [] + + def test_unparseable_storage_ref_raises(self): + client = _authed_client() + with pytest.raises(APSHTTPError, match="Cannot parse"): + client.download_tool_library("not-a-valid-ref") + + +# ───────────────────────────────────────────── +# Auto-refresh +# ───────────────────────────────────────────── +class TestAutoRefresh: + def test_ensure_token_refreshes_when_expired(self): + client = APSClient(client_id="id", client_secret="secret", token_path=None) + client.tokens.access_token = "old" + client.tokens.refresh_token = "rt" + client.tokens.expires_at = time.time() - 100 + + mock_resp = MagicMock() + mock_resp.ok = True + mock_resp.json.return_value = { + "access_token": "refreshed", + "refresh_token": "new-rt", + "expires_in": 3600, + } + + with patch.object(client._session, "post", return_value=mock_resp): + client._ensure_token() + + assert client.tokens.access_token == "refreshed" + + def test_ensure_token_raises_when_no_refresh(self): + client = APSClient(client_id="id", client_secret="secret", token_path=None) + client.tokens.access_token = "old" + client.tokens.expires_at = time.time() - 100 + + with pytest.raises(APSAuthError, match="expired"): + client._ensure_token() diff --git a/tests/test_bootstrap.py b/tests/test_bootstrap.py new file mode 100644 index 0000000..b2795a8 --- /dev/null +++ b/tests/test_bootstrap.py @@ -0,0 +1,253 @@ +""" +Tests for bootstrap.py — .env.local loader. + +Verifies the contract: + - missing file is a no-op + - KEY=VALUE pairs are parsed and injected via setdefault + - existing env vars are NEVER overridden + - blank lines and # comments are skipped + - matched surrounding quotes (single or double) are stripped + - returns the count of injected variables + - walk-up finds .env.local in parent/grandparent directories + - explicit path= arg wins over walk-up +""" +import os +from unittest.mock import patch + +import pytest + +from bootstrap import load_env_local, _find_env_local + + +# ───────────────────────────────────────────── +# Missing file behavior +# ───────────────────────────────────────────── +class TestMissingFile: + def test_missing_file_is_noop(self, tmp_path): + missing = tmp_path / "does-not-exist.env" + result = load_env_local(missing) + assert result == 0 + + def test_missing_file_does_not_raise(self, tmp_path): + # Should not raise even if directory itself does not exist + nowhere = tmp_path / "nope" / "alsonope" / ".env.local" + load_env_local(nowhere) # no exception + + +# ───────────────────────────────────────────── +# Basic parsing +# ───────────────────────────────────────────── +class TestBasicParsing: + def test_simple_key_value(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("FOO=bar\n") + monkeypatch.delenv("FOO", raising=False) + injected = load_env_local(f) + assert injected == 1 + assert os.environ["FOO"] == "bar" + + def test_multiple_pairs(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("FOO=bar\nBAZ=qux\nHELLO=world\n") + for k in ("FOO", "BAZ", "HELLO"): + monkeypatch.delenv(k, raising=False) + injected = load_env_local(f) + assert injected == 3 + assert os.environ["FOO"] == "bar" + assert os.environ["BAZ"] == "qux" + assert os.environ["HELLO"] == "world" + + def test_value_can_contain_equals(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("URL=https://example.com/?a=1&b=2\n") + monkeypatch.delenv("URL", raising=False) + load_env_local(f) + assert os.environ["URL"] == "https://example.com/?a=1&b=2" + + +# ───────────────────────────────────────────── +# Comments and blank lines +# ───────────────────────────────────────────── +class TestCommentsAndBlanks: + def test_skips_comments(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("# this is a comment\nFOO=bar\n# another comment\n") + monkeypatch.delenv("FOO", raising=False) + injected = load_env_local(f) + assert injected == 1 + assert os.environ["FOO"] == "bar" + + def test_skips_blank_lines(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("\n\nFOO=bar\n\n\nBAZ=qux\n") + for k in ("FOO", "BAZ"): + monkeypatch.delenv(k, raising=False) + injected = load_env_local(f) + assert injected == 2 + + def test_skips_lines_without_equals(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("not-a-pair\nFOO=bar\nalso-not-a-pair\n") + monkeypatch.delenv("FOO", raising=False) + injected = load_env_local(f) + assert injected == 1 + + +# ───────────────────────────────────────────── +# Quote stripping +# ───────────────────────────────────────────── +class TestQuoteStripping: + def test_strips_double_quotes(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text('FOO="bar baz"\n') + monkeypatch.delenv("FOO", raising=False) + load_env_local(f) + assert os.environ["FOO"] == "bar baz" + + def test_strips_single_quotes(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("FOO='bar baz'\n") + monkeypatch.delenv("FOO", raising=False) + load_env_local(f) + assert os.environ["FOO"] == "bar baz" + + def test_does_not_strip_mismatched_quotes(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("FOO=\"bar'\n") + monkeypatch.delenv("FOO", raising=False) + load_env_local(f) + assert os.environ["FOO"] == "\"bar'" + + def test_preserves_internal_quotes(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text('FOO=bar"baz\n') + monkeypatch.delenv("FOO", raising=False) + load_env_local(f) + assert os.environ["FOO"] == 'bar"baz' + + +# ───────────────────────────────────────────── +# setdefault semantics — real env always wins +# ───────────────────────────────────────────── +class TestSetdefaultBehavior: + def test_existing_env_var_is_not_overridden(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("FOO=from-file\n") + monkeypatch.setenv("FOO", "from-shell") + injected = load_env_local(f) + # Was already set, so injected count is 0 + assert injected == 0 + assert os.environ["FOO"] == "from-shell" + + def test_partial_override_only_sets_missing(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("FOO=from-file\nBAZ=from-file\n") + monkeypatch.setenv("FOO", "from-shell") + monkeypatch.delenv("BAZ", raising=False) + injected = load_env_local(f) + assert injected == 1 + assert os.environ["FOO"] == "from-shell" + assert os.environ["BAZ"] == "from-file" + + +# ───────────────────────────────────────────── +# Whitespace handling +# ───────────────────────────────────────────── +class TestWhitespace: + def test_strips_whitespace_around_key_and_value(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text(" FOO = bar \n") + monkeypatch.delenv("FOO", raising=False) + load_env_local(f) + assert os.environ["FOO"] == "bar" + + def test_handles_crlf_line_endings(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_bytes(b"FOO=bar\r\nBAZ=qux\r\n") + monkeypatch.delenv("FOO", raising=False) + monkeypatch.delenv("BAZ", raising=False) + injected = load_env_local(f) + assert injected == 2 + assert os.environ["FOO"] == "bar" + assert os.environ["BAZ"] == "qux" + + +# ------------------------------------------------- +# Walk-up directory search for .env.local +# ------------------------------------------------- +class TestWalkUp: + def test_finds_env_local_in_parent(self, tmp_path): + """Simulates a worktree at tmp/child/ with .env.local at tmp/.""" + (tmp_path / ".env.local").write_text("X=1\n") + child = tmp_path / "child" + child.mkdir() + + with patch("bootstrap._PROJECT_ROOT", child): + found = _find_env_local() + assert found == tmp_path / ".env.local" + + def test_finds_env_local_in_grandparent(self, tmp_path): + """Simulates .claude/worktrees/foo with .env.local two levels up.""" + (tmp_path / ".env.local").write_text("X=1\n") + deep = tmp_path / "a" / "b" + deep.mkdir(parents=True) + + with patch("bootstrap._PROJECT_ROOT", deep): + found = _find_env_local() + assert found == tmp_path / ".env.local" + + def test_returns_none_when_nothing_in_chain(self, tmp_path): + """No .env.local anywhere — should return None, not raise.""" + empty = tmp_path / "nowhere" + empty.mkdir() + + with patch("bootstrap._PROJECT_ROOT", empty): + found = _find_env_local() + assert found is None + + def test_prefers_closest_ancestor(self, tmp_path): + """If both parent/ and grandparent/ have .env.local, pick closest.""" + (tmp_path / ".env.local").write_text("LEVEL=root\n") + mid = tmp_path / "mid" + mid.mkdir() + (mid / ".env.local").write_text("LEVEL=mid\n") + child = mid / "child" + child.mkdir() + + with patch("bootstrap._PROJECT_ROOT", child): + found = _find_env_local() + assert found == mid / ".env.local" + + def test_explicit_path_wins_over_walkup(self, tmp_path, monkeypatch): + """An explicit path= argument bypasses the walk-up entirely.""" + # Put a .env.local in the walk-up chain + (tmp_path / ".env.local").write_text("FROM_WALKUP=yes\n") + child = tmp_path / "child" + child.mkdir() + + # But pass an explicit file with different content + explicit = tmp_path / "custom.env" + explicit.write_text("FROM_EXPLICIT=yes\n") + + monkeypatch.delenv("FROM_WALKUP", raising=False) + monkeypatch.delenv("FROM_EXPLICIT", raising=False) + + with patch("bootstrap._PROJECT_ROOT", child): + injected = load_env_local(path=explicit) + + assert injected == 1 + assert os.environ.get("FROM_EXPLICIT") == "yes" + assert "FROM_WALKUP" not in os.environ + + def test_walkup_default_loads_vars(self, tmp_path, monkeypatch): + """load_env_local() with no args uses walk-up and loads vars.""" + (tmp_path / ".env.local").write_text("WALKUP_TEST=hello\n") + child = tmp_path / "child" + child.mkdir() + monkeypatch.delenv("WALKUP_TEST", raising=False) + + with patch("bootstrap._PROJECT_ROOT", child): + injected = load_env_local() + + assert injected == 1 + assert os.environ["WALKUP_TEST"] == "hello" diff --git a/tests/test_enrich.py b/tests/test_enrich.py new file mode 100644 index 0000000..da5a111 --- /dev/null +++ b/tests/test_enrich.py @@ -0,0 +1,121 @@ +""" +Tests for enrich.py — geometry-based tool enrichment from reference catalog. + +Covers: + - find_tools_missing_product_id query + - find_reference_match_raw matching + - enrich_tools dry-run and live update +""" +from __future__ import annotations + +from unittest.mock import MagicMock, patch, call +import json + +import pytest + +from enrich import ( + enrich_tools, + find_tools_missing_product_id, +) + + +def _mock_client(): + client = MagicMock() + return client + + +class TestFindToolsMissing: + def test_calls_select_with_filter(self): + client = _mock_client() + client.select.return_value = [ + {"id": "t1", "type": "drill", "product_id": "", "geo_dc": 3.45, "geo_nof": 2}, + ] + result = find_tools_missing_product_id(client) + assert len(result) == 1 + client.select.assert_called_once() + call_kwargs = client.select.call_args + assert "tools" in call_kwargs[0] + + +class TestEnrichTools: + def test_dry_run_no_update(self): + client = _mock_client() + client.select.return_value = [ + {"id": "t1", "type": "drill", "description": "#29 drill", + "geo_dc": 3.45, "geo_nof": 2, "vendor": "", "product_id": ""}, + ] + + # Mock the raw HTTP call for reference lookup + mock_resp = MagicMock() + mock_resp.ok = True + mock_resp.json.return_value = [ + {"vendor": "Garr Tool", "product_id": "19230", + "description": "#29", "catalog_name": "Garr Tool", "geo_dc": 3.45, + "geo_nof": 2, "geo_oal": None}, + ] + client._session.get.return_value = mock_resp + client._table_url.return_value = "http://test/rest/v1/reference_catalog" + + counts = enrich_tools(client, dry_run=True) + + assert counts["matched"] == 1 + assert counts["unmatched"] == 0 + # update() should NOT have been called in dry-run + client.update.assert_not_called() + + def test_live_update(self): + client = _mock_client() + client.select.return_value = [ + {"id": "t1", "type": "flat end mill", "description": "1/4 endmill", + "geo_dc": 6.35, "geo_nof": 3, "vendor": "", "product_id": ""}, + ] + + mock_resp = MagicMock() + mock_resp.ok = True + mock_resp.json.return_value = [ + {"vendor": "Harvey Tool", "product_id": "978412", + "description": "1/4 flat", "catalog_name": "Harvey", "geo_dc": 6.35, + "geo_nof": 3, "geo_oal": None}, + ] + client._session.get.return_value = mock_resp + client._table_url.return_value = "http://test/rest/v1/reference_catalog" + + counts = enrich_tools(client, dry_run=False) + + assert counts["matched"] == 1 + client.update.assert_called_once_with( + "tools", + {"product_id": "978412", "vendor": "Harvey Tool"}, + filters={"id": "eq.t1"}, + ) + + def test_no_match_returns_unmatched(self): + client = _mock_client() + client.select.return_value = [ + {"id": "t1", "type": "tap right hand", "description": "#8-32", + "geo_dc": 4.16, "geo_nof": 2, "vendor": "", "product_id": ""}, + ] + + mock_resp = MagicMock() + mock_resp.ok = True + mock_resp.json.return_value = [] + client._session.get.return_value = mock_resp + client._table_url.return_value = "http://test/rest/v1/reference_catalog" + + counts = enrich_tools(client, dry_run=True) + + assert counts["matched"] == 0 + assert counts["unmatched"] == 1 + + def test_none_geometry_skipped(self): + client = _mock_client() + client.select.return_value = [ + {"id": "t1", "type": "drill", "description": "mystery", + "geo_dc": None, "geo_nof": None, "vendor": "", "product_id": ""}, + ] + + counts = enrich_tools(client, dry_run=True) + + assert counts["unmatched"] == 1 + # Should not have tried the HTTP lookup + client._session.get.assert_not_called() diff --git a/tests/test_ingest_reference.py b/tests/test_ingest_reference.py new file mode 100644 index 0000000..c228290 --- /dev/null +++ b/tests/test_ingest_reference.py @@ -0,0 +1,128 @@ +""" +Tests for ingest_reference.py — vendor catalog ingest into reference_catalog. + +Covers: + - build_reference_rows: normalization, filtering, unit conversion + - is_vendor_catalog: pattern matching on filenames + - ingest_catalog_file: dry-run and live upsert (mocked) +""" +from __future__ import annotations + +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + +from ingest_reference import ( + build_reference_rows, + is_vendor_catalog, + ingest_catalog_file, + INCHES_TO_MM, +) + + +# ───────────────────────────────────────────── +# build_reference_rows +# ───────────────────────────────────────────── +class TestBuildReferenceRows: + def test_basic_tool(self): + tools = [{ + "type": "flat end mill", + "vendor": "Harvey Tool", + "product-id": "978412", + "description": "1/4 endmill", + "unit": "inches", + "geometry": {"DC": 0.25, "NOF": 3, "OAL": 2.5, "LCF": 0.7}, + }] + rows = build_reference_rows("Test Catalog", tools) + assert len(rows) == 1 + assert rows[0]["catalog_name"] == "Test Catalog" + assert rows[0]["vendor"] == "Harvey Tool" + assert rows[0]["product_id"] == "978412" + assert rows[0]["type"] == "flat end mill" + assert rows[0]["geo_dc"] == pytest.approx(0.25 * INCHES_TO_MM, abs=0.001) + assert rows[0]["geo_nof"] == 3.0 + + def test_skips_holders(self): + tools = [ + {"type": "holder", "product-id": "H123", "vendor": "X"}, + {"type": "flat end mill", "product-id": "E456", "vendor": "X", + "geometry": {"DC": 0.5, "NOF": 4}, "unit": "inches"}, + ] + rows = build_reference_rows("Cat", tools) + assert len(rows) == 1 + assert rows[0]["product_id"] == "E456" + + def test_skips_no_product_id(self): + tools = [ + {"type": "drill", "vendor": "X", "geometry": {"DC": 0.1, "NOF": 2}, + "unit": "inches"}, + {"type": "drill", "product-id": "", "vendor": "X", + "geometry": {"DC": 0.2, "NOF": 2}, "unit": "inches"}, + ] + rows = build_reference_rows("Cat", tools) + assert len(rows) == 0 + + def test_mm_units_no_conversion(self): + tools = [{ + "type": "drill", + "vendor": "Guhring", + "product-id": "9005", + "unit": "millimeters", + "geometry": {"DC": 6.35, "NOF": 2}, + }] + rows = build_reference_rows("Cat", tools) + assert rows[0]["geo_dc"] == pytest.approx(6.35) + + +# ───────────────────────────────────────────── +# is_vendor_catalog +# ───────────────────────────────────────────── +class TestIsVendorCatalog: + def test_recognized_vendors(self): + assert is_vendor_catalog(Path("Harvey Tool-End Mills.json")) + assert is_vendor_catalog(Path("Guhring-Solid Hole Making (1).json")) + assert is_vendor_catalog(Path("Garr Tool-Garr Tool.json")) + assert is_vendor_catalog(Path("Sandvik Coromant-Solid End Mills.json")) + + def test_shop_specific_rejected(self): + assert not is_vendor_catalog(Path("848 (HAAS VF2SSYT).json")) + assert not is_vendor_catalog(Path("BROTHER SPEEDIO ALUMINUM.json")) + assert not is_vendor_catalog(Path("MAZAK C600.json")) + + +# ───────────────────────────────────────────── +# ingest_catalog_file +# ───────────────────────────────────────────── +class TestIngestCatalogFile: + def test_dry_run_no_client(self, tmp_path): + import json + f = tmp_path / "Harvey Tool-End Mills (1).json" + f.write_text(json.dumps({"data": [ + {"type": "flat end mill", "vendor": "Harvey", "product-id": "123", + "unit": "inches", "geometry": {"DC": 0.25, "NOF": 3}}, + {"type": "holder", "vendor": "Harvey", "product-id": "H1"}, + ]})) + + counts = ingest_catalog_file(f, dry_run=True) + assert counts["tools"] == 1 + assert counts["skipped"] == 1 + + def test_strips_copy_suffix_from_catalog_name(self, tmp_path): + import json + f = tmp_path / "Garr Tool-Garr Tool (2).json" + f.write_text(json.dumps({"data": [ + {"type": "drill", "vendor": "Garr", "product-id": "19230", + "unit": "inches", "geometry": {"DC": 0.136, "NOF": 2}}, + ]})) + + with patch("ingest_reference.SupabaseClient") as MockSB: + client = MockSB.return_value + client.upsert.return_value = [{"id": "abc"}] + counts = ingest_catalog_file(f, client=client, dry_run=False) + + assert counts["tools"] == 1 + # Verify catalog_name had suffix stripped + call_args = client.upsert.call_args + rows = call_args[0][1] + assert rows[0]["catalog_name"] == "Garr Tool-Garr Tool" diff --git a/tests/test_plex_api.py b/tests/test_plex_api.py new file mode 100644 index 0000000..c591f4b --- /dev/null +++ b/tests/test_plex_api.py @@ -0,0 +1,429 @@ +""" +Tests for plex_api.PlexClient — header construction, configuration, +and the get_envelope() method. +""" +import importlib +import os +from unittest.mock import MagicMock, patch + +import pytest +import requests + +import plex_api +from plex_api import ( + PlexClient, + BASE_URL, + TEST_URL, + GRACE_TENANT_ID, + extract_supply_items, + TOOLING_CATEGORY, +) + + +# ───────────────────────────────────────────── +# Header construction +# ───────────────────────────────────────────── +class TestPlexClientHeaders: + def test_sets_api_key_header(self): + c = PlexClient(api_key="my-key") + assert c.headers["X-Plex-Connect-Api-Key"] == "my-key" + + def test_sets_api_secret_header_when_provided(self): + c = PlexClient(api_key="k", api_secret="my-secret") + assert c.headers["X-Plex-Connect-Api-Secret"] == "my-secret" + + def test_omits_api_secret_header_when_empty(self): + c = PlexClient(api_key="k", api_secret="") + assert "X-Plex-Connect-Api-Secret" not in c.headers + + def test_omits_api_secret_header_by_default(self): + c = PlexClient(api_key="k") + assert "X-Plex-Connect-Api-Secret" not in c.headers + + def test_sets_tenant_id_header_when_provided(self): + c = PlexClient(api_key="k", tenant_id="abc-123") + assert c.headers["X-Plex-Connect-Tenant-Id"] == "abc-123" + + def test_omits_tenant_id_header_when_empty(self): + c = PlexClient(api_key="k", tenant_id="") + assert "X-Plex-Connect-Tenant-Id" not in c.headers + + def test_sets_content_type_and_accept_headers(self): + c = PlexClient(api_key="k") + assert c.headers["Content-Type"] == "application/json" + assert c.headers["Accept"] == "application/json" + + def test_all_three_auth_headers_when_full_credentials(self): + c = PlexClient(api_key="k", api_secret="s", tenant_id="t") + assert c.headers["X-Plex-Connect-Api-Key"] == "k" + assert c.headers["X-Plex-Connect-Api-Secret"] == "s" + assert c.headers["X-Plex-Connect-Tenant-Id"] == "t" + + +# ───────────────────────────────────────────── +# Environment routing +# ───────────────────────────────────────────── +class TestPlexClientEnvironment: + @pytest.fixture(autouse=True) + def _no_base_url_override(self, monkeypatch): + """Ensure PLEX_BASE_URL is unset + module reloaded so OVERRIDE_URL == ''.""" + monkeypatch.delenv("PLEX_BASE_URL", raising=False) + import importlib + importlib.reload(plex_api) + yield + importlib.reload(plex_api) + + def test_use_test_true_uses_test_url(self): + c = PlexClient(api_key="k", use_test=True) + assert c.base == TEST_URL + assert "test." in c.base + + def test_use_test_false_uses_prod_url(self): + c = PlexClient(api_key="k", use_test=False) + assert c.base == BASE_URL + assert "test." not in c.base + + def test_use_test_default_is_prod(self): + # Default constructor arg is use_test=False + c = PlexClient(api_key="k") + assert c.base == BASE_URL + + def test_explicit_base_url_arg_wins(self): + c = PlexClient(api_key="k", base_url="http://localhost:8080") + assert c.base == "http://localhost:8080" + + def test_explicit_base_url_arg_wins_even_over_use_test(self): + c = PlexClient(api_key="k", use_test=True, base_url="http://localhost:8080") + assert c.base == "http://localhost:8080" + + def test_empty_base_url_falls_through_to_default(self, monkeypatch): + monkeypatch.delenv("PLEX_BASE_URL", raising=False) + import importlib + importlib.reload(plex_api) + c = plex_api.PlexClient(api_key="k", base_url="") + assert c.base == plex_api.BASE_URL + c = plex_api.PlexClient(api_key="k", base_url=" ") + assert c.base == plex_api.BASE_URL + importlib.reload(plex_api) + + +# ───────────────────────────────────────────── +# Throttle initialization +# ───────────────────────────────────────────── +class TestPlexClientThrottle: + def test_throttle_state_initialized(self): + c = PlexClient(api_key="k") + assert c._call_count == 0 + assert c._window_start > 0 + + def test_throttle_increments_call_count(self): + c = PlexClient(api_key="k") + c._throttle() + assert c._call_count == 1 + c._throttle() + assert c._call_count == 2 + + +# ───────────────────────────────────────────── +# Module-level config: env-var driven defaults +# ───────────────────────────────────────────── +class TestModuleDefaults: + def test_grace_tenant_id_constant_is_verified_uuid(self): + # The verified Grace tenant ID returned by the live API on 2026-04-07 + assert GRACE_TENANT_ID == "58f781ba-1691-4f32-b1db-381cdb21300c" + + def test_tenant_id_defaults_to_grace_when_env_unset(self, monkeypatch): + monkeypatch.delenv("PLEX_TENANT_ID", raising=False) + importlib.reload(plex_api) + assert plex_api.TENANT_ID == GRACE_TENANT_ID + # Restore for downstream tests + importlib.reload(plex_api) + + def test_tenant_id_uses_env_var_when_set(self, monkeypatch): + monkeypatch.setenv("PLEX_TENANT_ID", "custom-tenant-uuid") + importlib.reload(plex_api) + assert plex_api.TENANT_ID == "custom-tenant-uuid" + importlib.reload(plex_api) + + def test_use_test_defaults_false(self, monkeypatch): + monkeypatch.delenv("PLEX_USE_TEST", raising=False) + importlib.reload(plex_api) + assert plex_api.USE_TEST is False + importlib.reload(plex_api) + + def test_use_test_true_when_env_var_is_1(self, monkeypatch): + monkeypatch.setenv("PLEX_USE_TEST", "1") + importlib.reload(plex_api) + assert plex_api.USE_TEST is True + importlib.reload(plex_api) + + def test_use_test_true_when_env_var_is_true(self, monkeypatch): + monkeypatch.setenv("PLEX_USE_TEST", "true") + importlib.reload(plex_api) + assert plex_api.USE_TEST is True + importlib.reload(plex_api) + + def test_use_test_false_when_env_var_is_garbage(self, monkeypatch): + monkeypatch.setenv("PLEX_USE_TEST", "nope") + importlib.reload(plex_api) + assert plex_api.USE_TEST is False + importlib.reload(plex_api) + + def test_override_url_empty_when_env_unset(self, monkeypatch): + monkeypatch.delenv("PLEX_BASE_URL", raising=False) + importlib.reload(plex_api) + assert plex_api.OVERRIDE_URL == "" + importlib.reload(plex_api) + + def test_override_url_set_from_env(self, monkeypatch): + monkeypatch.setenv("PLEX_BASE_URL", "http://localhost:8080") + importlib.reload(plex_api) + assert plex_api.OVERRIDE_URL == "http://localhost:8080" + importlib.reload(plex_api) + + def test_client_uses_override_url_when_env_set(self, monkeypatch): + monkeypatch.setenv("PLEX_BASE_URL", "http://localhost:8080") + importlib.reload(plex_api) + c = plex_api.PlexClient(api_key="k") + assert c.base == "http://localhost:8080" + importlib.reload(plex_api) + + def test_client_override_url_wins_over_use_test(self, monkeypatch): + monkeypatch.setenv("PLEX_BASE_URL", "http://localhost:8080") + importlib.reload(plex_api) + c = plex_api.PlexClient(api_key="k", use_test=True) + assert c.base == "http://localhost:8080" + importlib.reload(plex_api) + + def test_client_unchanged_when_override_unset(self, monkeypatch): + monkeypatch.delenv("PLEX_BASE_URL", raising=False) + importlib.reload(plex_api) + c = plex_api.PlexClient(api_key="k") + assert c.base == plex_api.BASE_URL + importlib.reload(plex_api) + + +# ───────────────────────────────────────────── +# get_envelope() — structured success/error envelope +# ───────────────────────────────────────────── +def _mock_response(status, json_body=None, text="", reason="", url=""): + """Build a MagicMock that mimics a requests.Response.""" + r = MagicMock(spec=requests.Response) + r.status_code = status + r.reason = reason or {200: "OK", 401: "Unauthorized", 403: "Forbidden", + 404: "Not Found", 500: "Internal Server Error"}.get(status, "") + r.ok = 200 <= status < 300 + r.text = text + r.url = url or "https://test.connect.plex.com/mdm/v1/x" + if json_body is not None: + r.json.return_value = json_body + else: + r.json.side_effect = ValueError("no json") + return r + + +class TestGetEnvelopeSuccess: + def test_returns_ok_envelope_for_200(self): + c = PlexClient(api_key="k", api_secret="s", use_test=True) + with patch("plex_api.requests.get", return_value=_mock_response( + 200, json_body=[{"id": "abc", "code": "G5"}] + )): + env = c.get_envelope("mdm", "v1", "tenants") + assert env["ok"] is True + assert env["status"] == 200 + assert env["reason"] == "OK" + assert env["body"] == [{"id": "abc", "code": "G5"}] + assert env["error"] is None + assert env["elapsed_ms"] >= 0 + + def test_envelope_contains_url(self): + c = PlexClient(api_key="k", use_test=True) + with patch("plex_api.requests.get", return_value=_mock_response( + 200, json_body={}, url="https://test.connect.plex.com/mdm/v1/parts" + )): + env = c.get_envelope("mdm", "v1", "parts") + assert "mdm/v1/parts" in env["url"] + + def test_text_body_when_json_parse_fails(self): + c = PlexClient(api_key="k", use_test=True) + with patch("plex_api.requests.get", return_value=_mock_response( + 200, json_body=None, text="not json" + )): + env = c.get_envelope("mdm", "v1", "tenants") + assert env["ok"] is True + assert env["body"] == "not json" + + def test_none_body_when_text_empty_and_no_json(self): + c = PlexClient(api_key="k", use_test=True) + with patch("plex_api.requests.get", return_value=_mock_response( + 200, json_body=None, text="" + )): + env = c.get_envelope("mdm", "v1", "tenants") + assert env["body"] is None + + +class TestGetEnvelopeHttpErrors: + def test_401_returns_error_envelope(self): + c = PlexClient(api_key="k", api_secret="s", use_test=True) + with patch("plex_api.requests.get", return_value=_mock_response( + 401, json_body={"code": "REQUEST_NOT_AUTHENTICATED"} + )): + env = c.get_envelope("mdm", "v1", "tenants") + assert env["ok"] is False + assert env["status"] == 401 + assert env["reason"] == "Unauthorized" + assert env["body"] == {"code": "REQUEST_NOT_AUTHENTICATED"} + assert "401" in env["error"] + assert "Unauthorized" in env["error"] + + def test_403_returns_error_envelope(self): + c = PlexClient(api_key="k", use_test=True) + with patch("plex_api.requests.get", return_value=_mock_response(403, json_body={})): + env = c.get_envelope("tooling", "v1", "tools") + assert env["ok"] is False + assert env["status"] == 403 + + def test_404_returns_error_envelope(self): + c = PlexClient(api_key="k", use_test=True) + with patch("plex_api.requests.get", return_value=_mock_response(404, json_body={})): + env = c.get_envelope("mdm", "v1", "tenants/nonexistent") + assert env["ok"] is False + assert env["status"] == 404 + + def test_500_returns_error_envelope(self): + c = PlexClient(api_key="k", use_test=True) + with patch("plex_api.requests.get", return_value=_mock_response(500, json_body={})): + env = c.get_envelope("mdm", "v1", "tenants") + assert env["ok"] is False + assert env["status"] == 500 + + +class TestGetEnvelopeNetworkErrors: + def test_connection_error_returns_status_zero(self): + c = PlexClient(api_key="k", use_test=True) + with patch("plex_api.requests.get", side_effect=requests.exceptions.ConnectionError("refused")): + env = c.get_envelope("mdm", "v1", "tenants") + assert env["ok"] is False + assert env["status"] == 0 + assert env["reason"] == "ConnectionError" + assert env["body"] is None + assert "refused" in env["error"] + + def test_timeout_returns_status_zero(self): + c = PlexClient(api_key="k", use_test=True) + with patch("plex_api.requests.get", side_effect=requests.exceptions.Timeout("timed out")): + env = c.get_envelope("mdm", "v1", "tenants") + assert env["ok"] is False + assert env["status"] == 0 + assert env["reason"] == "Timeout" + + def test_dns_failure_returns_status_zero(self): + c = PlexClient(api_key="k", use_test=True) + with patch("plex_api.requests.get", side_effect=requests.exceptions.ConnectionError("dns")): + env = c.get_envelope("mdm", "v1", "tenants") + assert env["status"] == 0 + + +# ───────────────────────────────────────────── +# get() (legacy) — verify backward compat after refactor +# ───────────────────────────────────────────── +class TestGetLegacy: + def test_get_returns_body_on_success(self): + c = PlexClient(api_key="k", use_test=True) + with patch("plex_api.requests.get", return_value=_mock_response( + 200, json_body={"items": [1, 2, 3]} + )): + result = c.get("mdm", "v1", "tenants") + assert result == {"items": [1, 2, 3]} + + def test_get_returns_none_on_4xx(self, capsys): + c = PlexClient(api_key="k", use_test=True) + with patch("plex_api.requests.get", return_value=_mock_response(401, json_body={"code": "X"})): + result = c.get("mdm", "v1", "tenants") + assert result is None + # Legacy stdout logging is preserved + captured = capsys.readouterr() + assert "401" in captured.out + + def test_get_returns_none_on_network_error(self): + c = PlexClient(api_key="k", use_test=True) + with patch("plex_api.requests.get", side_effect=requests.exceptions.ConnectionError("x")): + result = c.get("mdm", "v1", "tenants") + assert result is None + + +# ───────────────────────────────────────────── +# extract_supply_items — issue #2 +# ───────────────────────────────────────────── +class TestExtractSupplyItems: + SAMPLE_TOOLS_AND_INSERTS = [ + {"category": "Tools & Inserts", "supplyItemNumber": "990910", "description": "5/8 SQ END", "group": "Machining", "id": "u1", "inventoryUnit": "Each", "type": "SUPPLY"}, + {"category": "Tools & Inserts", "supplyItemNumber": "ABC123", "description": "1/4 drill", "group": "Tool Room", "id": "u2", "inventoryUnit": "Each", "type": "SUPPLY"}, + ] + SAMPLE_OFFICE = [ + {"category": "Office Supplies", "supplyItemNumber": "PEN-01", "description": "Blue pen", "group": "Office", "id": "u3", "inventoryUnit": "Each", "type": "OFFICE"}, + ] + + def _full_set(self): + return self.SAMPLE_TOOLS_AND_INSERTS + self.SAMPLE_OFFICE + + def test_default_filters_to_tools_and_inserts(self, fake_client, tmp_path, monkeypatch): + # Redirect OUTPUT_DIR so the CSV write goes to tmp + monkeypatch.setattr(plex_api, "OUTPUT_DIR", str(tmp_path)) + fake_client.set_response("inventory-definitions/supply-items", self._full_set()) + result = extract_supply_items(fake_client) + assert result is not None + assert len(result) == 2 + for r in result: + assert r["category"] == TOOLING_CATEGORY + + def test_filter_can_be_disabled_with_empty_string(self, fake_client, tmp_path, monkeypatch): + monkeypatch.setattr(plex_api, "OUTPUT_DIR", str(tmp_path)) + fake_client.set_response("inventory-definitions/supply-items", self._full_set()) + result = extract_supply_items(fake_client, category="") + # All 3 records returned, no filter + assert len(result) == 3 + + def test_filter_can_be_overridden(self, fake_client, tmp_path, monkeypatch): + monkeypatch.setattr(plex_api, "OUTPUT_DIR", str(tmp_path)) + fake_client.set_response("inventory-definitions/supply-items", self._full_set()) + result = extract_supply_items(fake_client, category="Office Supplies") + assert len(result) == 1 + assert result[0]["category"] == "Office Supplies" + + def test_returns_none_on_network_error(self, fake_client): + # No response set on the fake client → get returns None + result = extract_supply_items(fake_client) + assert result is None + + def test_calls_correct_endpoint(self, fake_client, tmp_path, monkeypatch): + monkeypatch.setattr(plex_api, "OUTPUT_DIR", str(tmp_path)) + fake_client.set_response("inventory-definitions/supply-items", []) + extract_supply_items(fake_client) + # The fake client should have recorded a call to inventory/v1/inventory-definitions/supply-items + calls = [c for c in fake_client.calls if c[0] == "inventory" and c[1] == "v1"] + assert len(calls) == 1 + assert calls[0][2] == "inventory-definitions/supply-items" + + def test_normalizes_dict_data_wrapper(self, fake_client, tmp_path, monkeypatch): + # Some Plex endpoints wrap the list in a dict — extract_supply_items + # should handle either shape gracefully + monkeypatch.setattr(plex_api, "OUTPUT_DIR", str(tmp_path)) + fake_client.set_response( + "inventory-definitions/supply-items", + {"data": self.SAMPLE_TOOLS_AND_INSERTS}, + ) + result = extract_supply_items(fake_client) + assert len(result) == 2 + + def test_writes_csv_snapshot(self, fake_client, tmp_path, monkeypatch): + monkeypatch.setattr(plex_api, "OUTPUT_DIR", str(tmp_path)) + fake_client.set_response("inventory-definitions/supply-items", self._full_set()) + extract_supply_items(fake_client) + csv_path = tmp_path / "plex_supply_items.csv" + assert csv_path.exists() + content = csv_path.read_text(encoding="utf-8") + # Should have the 2 tools-and-inserts records, not the office one + assert "990910" in content + assert "ABC123" in content + assert "PEN-01" not in content diff --git a/tests/test_plex_diagnostics.py b/tests/test_plex_diagnostics.py new file mode 100644 index 0000000..90a16cc --- /dev/null +++ b/tests/test_plex_diagnostics.py @@ -0,0 +1,328 @@ +""" +Tests for plex_diagnostics — tenant_whoami composite check. + +Verifies all 6 logic branches: + 1. Connected to Grace + 2. Connected to G5 + 3. Connected to a configured-but-unknown tenant + 4. Connected to an unrecognized tenant + 5. list_tenants returns None (auth failure) + 6. list_tenants returns empty list / no parseable IDs + +Plus normalization of dict-wrapped responses (Plex sometimes returns +{"items": [...]}, {"data": [...]}, or a bare list). +""" +import pytest + +from plex_diagnostics import ( + GRACE_TENANT_ID, + GRACE_OLD_TENANT_ID, + G5_TENANT_ID, + KNOWN_TENANTS, + list_tenants, + get_tenant, + tenant_whoami, +) + + +# ───────────────────────────────────────────── +# Constants sanity +# ───────────────────────────────────────────── +class TestKnownTenants: + def test_grace_tenant_id_is_verified_uuid(self): + # Verified empirically against the live API on 2026-04-07 + assert GRACE_TENANT_ID == "58f781ba-1691-4f32-b1db-381cdb21300c" + + def test_grace_tenant_id_in_known(self): + assert GRACE_TENANT_ID in KNOWN_TENANTS + assert KNOWN_TENANTS[GRACE_TENANT_ID] == "Grace Engineering" + + def test_grace_old_tenant_id_kept_as_stale(self): + # The wrong UUID from earlier docs is preserved with a "stale" label + # so anyone hitting it gets a clear signal instead of "unknown" + assert GRACE_OLD_TENANT_ID == "a6af9c99-bce5-4938-a007-364dc5603d08" + assert GRACE_OLD_TENANT_ID in KNOWN_TENANTS + assert "stale" in KNOWN_TENANTS[GRACE_OLD_TENANT_ID].lower() + + def test_g5_tenant_id_in_known(self): + assert G5_TENANT_ID in KNOWN_TENANTS + assert KNOWN_TENANTS[G5_TENANT_ID] == "G5" + + def test_all_known_tenants_are_distinct(self): + ids = [GRACE_TENANT_ID, GRACE_OLD_TENANT_ID, G5_TENANT_ID] + assert len(set(ids)) == len(ids), "tenant IDs must be unique" + + +# ───────────────────────────────────────────── +# Raw wrappers — verify they call client.get with the right path +# ───────────────────────────────────────────── +class TestRawWrappers: + def test_list_tenants_calls_correct_endpoint(self, fake_client): + fake_client.set_response("tenants", []) + list_tenants(fake_client) + assert fake_client.calls[0][:3] == ("mdm", "v1", "tenants") + + def test_get_tenant_calls_correct_endpoint(self, fake_client): + fake_client.set_default({"id": "abc"}) + get_tenant(fake_client, "abc-123") + assert fake_client.calls[0][:3] == ("mdm", "v1", "tenants/abc-123") + + +# ───────────────────────────────────────────── +# tenant_whoami — match logic +# ───────────────────────────────────────────── +class TestTenantWhoami: + def test_grace_match(self, fake_client): + fake_client.set_response("tenants", [ + {"id": GRACE_TENANT_ID, "code": "GRACE", "name": "Grace Engineering"} + ]) + report = tenant_whoami(fake_client, GRACE_TENANT_ID) + assert report["match"] == "grace" + assert "Grace Engineering" in report["summary"] + assert "[OK]" in report["summary"] + + def test_g5_match(self, fake_client): + fake_client.set_response("tenants", [ + {"id": G5_TENANT_ID, "code": "G5", "name": "G5 Manufacturing"} + ]) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert report["match"] == "g5" + assert "G5" in report["summary"] + assert "[WARN]" in report["summary"] + + def test_no_data_when_list_returns_none(self, fake_client): + # No set_response → FakePlexClient.get_envelope synthesizes a 200 OK + # with body=None → tenant_whoami should still report no_data because + # there are no parseable IDs to work with. + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert report["match"] == "no_data" + assert "no data" in report["summary"].lower() + + def test_no_data_when_list_returns_empty(self, fake_client): + fake_client.set_response("tenants", []) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert report["match"] == "no_data" + assert "no data" in report["summary"].lower() + + def test_unknown_tenant_match(self, fake_client): + unknown_id = "11111111-2222-3333-4444-555555555555" + fake_client.set_response("tenants", [ + {"id": unknown_id, "code": "UNK", "name": "Unknown Co"} + ]) + report = tenant_whoami(fake_client, unknown_id) + assert report["match"] == "configured" + assert "Verify this is intentional" in report["summary"] + + def test_other_match_when_visible_unrecognized_and_no_config(self, fake_client): + unknown_id = "11111111-2222-3333-4444-555555555555" + fake_client.set_response("tenants", [ + {"id": unknown_id, "code": "UNK"} + ]) + report = tenant_whoami(fake_client, "") + assert report["match"] == "other" + + def test_grace_takes_priority_over_configured_g5(self, fake_client): + # Edge case: visible tenants include Grace, but TENANT_ID is still G5. + # The match should be "grace" because the routing has actually landed. + fake_client.set_response("tenants", [ + {"id": GRACE_TENANT_ID, "code": "GRACE"} + ]) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert report["match"] == "grace" + + +# ───────────────────────────────────────────── +# Response shape normalization +# ───────────────────────────────────────────── +class TestResponseNormalization: + def test_handles_bare_list_response(self, fake_client): + fake_client.set_response("tenants", [ + {"id": G5_TENANT_ID, "code": "G5"} + ]) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert len(report["visible_tenants"]) == 1 + + def test_handles_dict_data_wrapper(self, fake_client): + fake_client.set_response("tenants", { + "data": [{"id": G5_TENANT_ID, "code": "G5"}] + }) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert len(report["visible_tenants"]) == 1 + assert report["match"] == "g5" + + def test_handles_dict_items_wrapper(self, fake_client): + fake_client.set_response("tenants", { + "items": [{"id": G5_TENANT_ID, "code": "G5"}] + }) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert len(report["visible_tenants"]) == 1 + + def test_handles_dict_rows_wrapper(self, fake_client): + fake_client.set_response("tenants", { + "rows": [{"id": G5_TENANT_ID, "code": "G5"}] + }) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert len(report["visible_tenants"]) == 1 + + def test_handles_single_object_response(self, fake_client): + # Some endpoints return a bare object instead of a list + fake_client.set_response("tenants", { + "id": G5_TENANT_ID, "code": "G5" + }) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert len(report["visible_tenants"]) == 1 + assert report["match"] == "g5" + + +# ───────────────────────────────────────────── +# Report structure +# ───────────────────────────────────────────── +class TestReportStructure: + def test_report_has_required_keys(self, fake_client): + fake_client.set_response("tenants", [{"id": G5_TENANT_ID}]) + report = tenant_whoami(fake_client, G5_TENANT_ID) + for key in ( + "configured_tenant_id", + "configured_tenant_label", + "visible_tenants", + "list_tenants_raw", + "get_tenant_raw", + "match", + "summary", + ): + assert key in report + + def test_report_records_configured_label(self, fake_client): + fake_client.set_response("tenants", [{"id": G5_TENANT_ID}]) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert report["configured_tenant_label"] == "G5" + + def test_report_records_unknown_label_for_unknown_id(self, fake_client): + unknown = "deadbeef-dead-beef-dead-beefdeadbeef" + fake_client.set_response("tenants", [{"id": unknown}]) + report = tenant_whoami(fake_client, unknown) + assert report["configured_tenant_label"] == "unknown" + + def test_get_tenant_called_when_configured_id_provided(self, fake_client): + fake_client.set_response("tenants", [{"id": G5_TENANT_ID}]) + fake_client.set_response(f"tenants/{G5_TENANT_ID}", {"id": G5_TENANT_ID, "name": "G5 Detail"}) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert report["get_tenant_raw"] is not None + # Two calls should have been made: list + get + assert any(c[2] == "tenants" for c in fake_client.calls) + assert any(c[2] == f"tenants/{G5_TENANT_ID}" for c in fake_client.calls) + + def test_get_tenant_skipped_when_no_configured_id(self, fake_client): + fake_client.set_response("tenants", [{"id": G5_TENANT_ID}]) + report = tenant_whoami(fake_client, "") + assert report["get_tenant_raw"] is None + + def test_report_includes_envelope_metadata(self, fake_client): + fake_client.set_response("tenants", [{"id": G5_TENANT_ID}]) + report = tenant_whoami(fake_client, G5_TENANT_ID) + env = report["list_tenants_envelope"] + assert env is not None + assert env["ok"] is True + assert env["status"] == 200 + assert env["error"] is None + + +# ───────────────────────────────────────────── +# HTTP error visibility — the whole reason for this PR +# ───────────────────────────────────────────── +def _err_envelope(status, reason, error_msg, body=None): + """Build a fake error envelope as PlexClient.get_envelope would return.""" + return { + "ok": False, + "status": status, + "reason": reason, + "body": body, + "elapsed_ms": 100, + "url": "https://test.connect.plex.com/mdm/v1/tenants", + "error": error_msg, + } + + +class TestAuthFailureBranch: + def test_401_maps_to_auth_failed(self, fake_client): + fake_client.set_envelope("tenants", _err_envelope( + 401, "Unauthorized", "HTTP 401 Unauthorized", + body={"code": "REQUEST_NOT_AUTHENTICATED"} + )) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert report["match"] == "auth_failed" + assert "401" in report["summary"] + assert "PLEX_API_KEY" in report["summary"] + assert "PLEX_API_SECRET" in report["summary"] + + def test_403_maps_to_auth_failed(self, fake_client): + fake_client.set_envelope("tenants", _err_envelope( + 403, "Forbidden", "HTTP 403 Forbidden" + )) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert report["match"] == "auth_failed" + assert "403" in report["summary"] + + def test_auth_failed_preserves_envelope_metadata(self, fake_client): + fake_client.set_envelope("tenants", _err_envelope( + 401, "Unauthorized", "HTTP 401 Unauthorized" + )) + report = tenant_whoami(fake_client, G5_TENANT_ID) + env = report["list_tenants_envelope"] + assert env["ok"] is False + assert env["status"] == 401 + assert env["error"] == "HTTP 401 Unauthorized" + + def test_auth_failed_does_not_call_get_tenant(self, fake_client): + fake_client.set_envelope("tenants", _err_envelope( + 401, "Unauthorized", "x" + )) + tenant_whoami(fake_client, G5_TENANT_ID) + # Only the list call should have been made, not the by-id call + list_calls = [c for c in fake_client.calls if c[2] == "tenants"] + get_calls = [c for c in fake_client.calls if c[2] == f"tenants/{G5_TENANT_ID}"] + assert len(list_calls) == 1 + assert len(get_calls) == 0 + + +class TestRequestFailedBranch: + def test_network_error_maps_to_request_failed(self, fake_client): + fake_client.set_envelope("tenants", _err_envelope( + 0, "ConnectionError", "Connection refused" + )) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert report["match"] == "request_failed" + assert "could not reach" in report["summary"].lower() + assert "Connection refused" in report["summary"] + + def test_timeout_maps_to_request_failed(self, fake_client): + fake_client.set_envelope("tenants", _err_envelope( + 0, "Timeout", "Read timed out" + )) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert report["match"] == "request_failed" + + def test_404_maps_to_request_failed(self, fake_client): + fake_client.set_envelope("tenants", _err_envelope( + 404, "Not Found", "HTTP 404 Not Found" + )) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert report["match"] == "request_failed" + assert "404" in report["summary"] + + def test_500_maps_to_request_failed(self, fake_client): + fake_client.set_envelope("tenants", _err_envelope( + 500, "Internal Server Error", "HTTP 500 Internal Server Error" + )) + report = tenant_whoami(fake_client, G5_TENANT_ID) + assert report["match"] == "request_failed" + assert "500" in report["summary"] + + def test_request_failed_preserves_envelope_metadata(self, fake_client): + fake_client.set_envelope("tenants", _err_envelope( + 500, "Internal Server Error", "HTTP 500" + )) + report = tenant_whoami(fake_client, G5_TENANT_ID) + env = report["list_tenants_envelope"] + assert env["status"] == 500 + assert env["ok"] is False diff --git a/tests/test_plex_mock_diff.py b/tests/test_plex_mock_diff.py new file mode 100644 index 0000000..39d4166 --- /dev/null +++ b/tests/test_plex_mock_diff.py @@ -0,0 +1,100 @@ +"""Tests for the Plex-mock diff CLI.""" +import json +from pathlib import Path + +import pytest + +from tools.plex_mock.diff import diff_run, DiffResult +from tools.plex_mock.store import CaptureStore + + +FIXTURE = Path(__file__).parent / "fixtures" / "plex_mock" / "expected_supply_items.json" + + +@pytest.fixture +def store(tmp_path: Path) -> CaptureStore: + return CaptureStore(tmp_path / "captures.db") + + +@pytest.fixture +def expected() -> dict: + return json.loads(FIXTURE.read_text()) + + +class TestDiffRun: + def test_clean_run_returns_no_issues(self, store: CaptureStore, expected: dict): + store.append( + method="POST", + path="/inventory/v1/inventory-definitions/supply-items", + body={ + "category": "Tools & Inserts", "description": "x", + "group": "Machining - End Mills", "inventoryUnit": "Ea", + "supplyItemNumber": "ABC-1", "type": "SUPPLY", + }, + run_id="r1", + ) + result = diff_run(store=store, run_id="r1", expected=expected) + assert isinstance(result, DiffResult) + assert result.issues == [] + assert result.ok is True + + def test_missing_required_field_flagged(self, store: CaptureStore, expected: dict): + store.append( + method="POST", + path="/inventory/v1/inventory-definitions/supply-items", + body={"supplyItemNumber": "ABC-1"}, # missing everything else + run_id="r1", + ) + result = diff_run(store=store, run_id="r1", expected=expected) + assert result.ok is False + msgs = " ".join(result.issues) + assert "missing" in msgs.lower() + assert "category" in msgs + + def test_forbidden_field_flagged(self, store: CaptureStore, expected: dict): + store.append( + method="POST", + path="/inventory/v1/inventory-definitions/supply-items", + body={ + "category": "Tools & Inserts", "description": "x", + "group": "Machining - End Mills", "inventoryUnit": "Ea", + "supplyItemNumber": "ABC-1", "type": "SUPPLY", + "id": "client-should-not-send-this", + }, + run_id="r1", + ) + result = diff_run(store=store, run_id="r1", expected=expected) + assert result.ok is False + assert any("forbidden" in m.lower() and "id" in m for m in result.issues) + + def test_wrong_field_type_flagged(self, store: CaptureStore, expected: dict): + store.append( + method="POST", + path="/inventory/v1/inventory-definitions/supply-items", + body={ + "category": "Tools & Inserts", "description": 42, # should be str + "group": "Machining - End Mills", "inventoryUnit": "Ea", + "supplyItemNumber": "ABC-1", "type": "SUPPLY", + }, + run_id="r1", + ) + result = diff_run(store=store, run_id="r1", expected=expected) + assert result.ok is False + assert any("description" in m and "str" in m for m in result.issues) + + def test_checked_counter_reflects_rows(self, store: CaptureStore, expected: dict): + body = { + "category": "Tools & Inserts", "description": "x", + "group": "Machining - End Mills", "inventoryUnit": "Ea", + "supplyItemNumber": "ABC-1", "type": "SUPPLY", + } + store.append(method="POST", path="/inventory/v1/inventory-definitions/supply-items", body=body, run_id="r1") + store.append(method="POST", path="/inventory/v1/inventory-definitions/supply-items", body=body, run_id="r1") + result = diff_run(store=store, run_id="r1", expected=expected) + assert result.checked == 2 + assert result.ok is True + + def test_empty_run_returns_ok_with_zero_checked(self, store: CaptureStore, expected: dict): + result = diff_run(store=store, run_id="nope", expected=expected) + assert result.ok is True + assert result.checked == 0 diff --git a/tests/test_plex_mock_server.py b/tests/test_plex_mock_server.py new file mode 100644 index 0000000..dbf30d6 --- /dev/null +++ b/tests/test_plex_mock_server.py @@ -0,0 +1,265 @@ +"""Tests for the Plex-mock Flask server.""" +import json +from pathlib import Path + +import pytest + +from tools.plex_mock.server import create_app + + +@pytest.fixture +def snapshots_dir(tmp_path: Path) -> Path: + d = tmp_path / "snapshots" + d.mkdir() + supply = [ + {"id": "11111111-1111-1111-1111-111111111111", "supplyItemNumber": "ABC-1", + "description": "Test tool", "category": "Tools & Inserts", + "group": "Machining - End Mills", "inventoryUnit": "Ea", "type": "SUPPLY"}, + {"id": "22222222-2222-2222-2222-222222222222", "supplyItemNumber": "ABC-2", + "description": "Test tool 2", "category": "Tools & Inserts", + "group": "Machining - Drills", "inventoryUnit": "Ea", "type": "SUPPLY"}, + ] + workcenters = [ + {"workcenterId": "0b6cf62b-2809-4d3d-ab24-369cd0171f62", + "workcenterCode": "879", "name": "Brother Speedio 879", + "workcenterGroup": "MILLS"}, + ] + (d / "supply_items_list.json").write_text(json.dumps(supply)) + (d / "workcenters_list.json").write_text(json.dumps(workcenters)) + return d + + +@pytest.fixture +def client(tmp_path: Path, snapshots_dir: Path): + app = create_app(snapshots_dir=snapshots_dir, db_path=tmp_path / "captures.db", run_id="test-run") + return app.test_client() + + +class TestSupplyItemsGetList: + def test_returns_200(self, client): + rv = client.get("/inventory/v1/inventory-definitions/supply-items") + assert rv.status_code == 200 + + def test_returns_snapshot_body(self, client): + rv = client.get("/inventory/v1/inventory-definitions/supply-items") + body = rv.get_json() + assert isinstance(body, list) + assert len(body) == 2 + assert body[0]["supplyItemNumber"] == "ABC-1" + + +class TestSupplyItemsGetById: + def test_returns_200_when_found(self, client): + rv = client.get("/inventory/v1/inventory-definitions/supply-items/11111111-1111-1111-1111-111111111111") + assert rv.status_code == 200 + assert rv.get_json()["supplyItemNumber"] == "ABC-1" + + def test_returns_404_when_unknown(self, client): + rv = client.get("/inventory/v1/inventory-definitions/supply-items/does-not-exist") + assert rv.status_code == 404 + + +class TestWorkcentersGet: + def test_returns_200_list(self, client): + rv = client.get("/production/v1/production-definitions/workcenters") + assert rv.status_code == 200 + assert len(rv.get_json()) == 1 + + def test_returns_200_by_id(self, client): + rv = client.get("/production/v1/production-definitions/workcenters/0b6cf62b-2809-4d3d-ab24-369cd0171f62") + assert rv.status_code == 200 + assert rv.get_json()["workcenterCode"] == "879" + + def test_returns_404_for_unknown_workcenter(self, client): + rv = client.get("/production/v1/production-definitions/workcenters/nope") + assert rv.status_code == 404 + + +class TestHealth: + def test_health_endpoint(self, client): + rv = client.get("/healthz") + assert rv.status_code == 200 + assert rv.get_json() == {"ok": True} + + +class TestMalformedSnapshot: + def test_malformed_json_raises_value_error_with_path(self, tmp_path: Path): + d = tmp_path / "snapshots" + d.mkdir() + (d / "supply_items_list.json").write_text("not json at all") + (d / "workcenters_list.json").write_text("[]") + with pytest.raises(ValueError) as excinfo: + create_app(snapshots_dir=d, db_path=tmp_path / "c.db", run_id="r1") + assert "supply_items_list.json" in str(excinfo.value) + + def test_missing_snapshot_raises_file_not_found_with_actionable_message(self, tmp_path: Path): + d = tmp_path / "snapshots" + d.mkdir() + # Only workcenters present; supply_items_list.json is missing. + (d / "workcenters_list.json").write_text("[]") + with pytest.raises(FileNotFoundError) as excinfo: + create_app(snapshots_dir=d, db_path=tmp_path / "c.db", run_id="r1") + msg = str(excinfo.value) + assert "supply_items_list.json" in msg + assert "capture_snapshots" in msg + + +class TestMalformedWriteBodies: + """Malformed / non-object bodies must return 400 instead of being captured as {}.""" + + def test_post_rejects_non_json_body(self, client): + rv = client.post( + "/inventory/v1/inventory-definitions/supply-items", + data="not json at all", + content_type="application/json", + ) + assert rv.status_code == 400 + assert rv.get_json()["error"] == "invalid JSON body" + + def test_post_rejects_array_body(self, client): + rv = client.post("/inventory/v1/inventory-definitions/supply-items", json=[1, 2, 3]) + assert rv.status_code == 400 + assert "JSON object" in rv.get_json()["error"] + + def test_post_rejects_scalar_body(self, client): + rv = client.post("/inventory/v1/inventory-definitions/supply-items", json="hello") + assert rv.status_code == 400 + + def test_rejected_post_does_not_capture(self, client): + client.post( + "/inventory/v1/inventory-definitions/supply-items", + data="not json", + content_type="application/json", + ) + store = client.application.config["PLEX_MOCK_STORE"] + rows = store.query(run_id=client.application.config["PLEX_MOCK_RUN_ID"]) + assert rows == [] + + def test_put_rejects_malformed_json(self, client): + rv = client.put( + "/inventory/v1/inventory-definitions/supply-items/11111111-1111-1111-1111-111111111111", + data="{not json", + content_type="application/json", + ) + assert rv.status_code == 400 + + def test_put_404_takes_precedence_over_body_parse(self, client): + # Unknown id + malformed body: 404 wins (matches original ordering). + rv = client.put( + "/inventory/v1/inventory-definitions/supply-items/not-a-real-id", + data="not json", + content_type="application/json", + ) + assert rv.status_code == 404 + + def test_workcenter_put_rejects_array_body(self, client): + rv = client.put( + "/production/v1/production-definitions/workcenters/0b6cf62b-2809-4d3d-ab24-369cd0171f62", + json=[1, 2], + ) + assert rv.status_code == 400 + + def test_workcenter_patch_rejects_malformed_json(self, client): + rv = client.patch( + "/production/v1/production-definitions/workcenters/0b6cf62b-2809-4d3d-ab24-369cd0171f62", + data="bogus", + content_type="application/json", + ) + assert rv.status_code == 400 + + +import uuid + + +class TestSupplyItemsPost: + def test_post_returns_201_with_synthetic_id(self, client): + payload = {"supplyItemNumber": "NEW-1", "description": "New tool", + "category": "Tools & Inserts", "group": "Machining - End Mills", + "inventoryUnit": "Ea", "type": "SUPPLY"} + rv = client.post("/inventory/v1/inventory-definitions/supply-items", json=payload) + assert rv.status_code == 201 + body = rv.get_json() + assert "id" in body + uuid.UUID(body["id"]) # valid uuid4 + assert body["supplyItemNumber"] == "NEW-1" + + def test_post_echoes_payload_fields(self, client): + payload = {"supplyItemNumber": "NEW-2", "description": "x", + "group": "Machining - Drills", "inventoryUnit": "Ea", + "category": "Tools & Inserts", "type": "SUPPLY"} + rv = client.post("/inventory/v1/inventory-definitions/supply-items", json=payload) + body = rv.get_json() + for k, v in payload.items(): + assert body[k] == v + + def test_post_persists_to_capture_store(self, client): + from tools.plex_mock.store import CaptureStore + payload = {"supplyItemNumber": "NEW-3"} + client.post("/inventory/v1/inventory-definitions/supply-items", json=payload) + store: CaptureStore = client.application.config["PLEX_MOCK_STORE"] + rows = store.query(run_id=client.application.config["PLEX_MOCK_RUN_ID"]) + assert len(rows) == 1 + assert rows[0]["method"] == "POST" + assert rows[0]["path"].endswith("/supply-items") + assert rows[0]["body"]["supplyItemNumber"] == "NEW-3" + + def test_post_409_on_duplicate_supply_item_number(self, client): + # Snapshot already has "ABC-1" — mock should treat that as a conflict + payload = {"supplyItemNumber": "ABC-1", "description": "dup"} + rv = client.post("/inventory/v1/inventory-definitions/supply-items", json=payload) + assert rv.status_code == 409 + + def test_post_409_does_not_capture(self, client): + payload = {"supplyItemNumber": "ABC-1", "description": "dup"} + rv = client.post("/inventory/v1/inventory-definitions/supply-items", json=payload) + assert rv.status_code == 409 + store = client.application.config["PLEX_MOCK_STORE"] + rows = store.query(run_id=client.application.config["PLEX_MOCK_RUN_ID"]) + assert len(rows) == 0 + + +class TestSupplyItemsPut: + def test_put_200_and_captured(self, client): + payload = {"description": "updated description"} + rv = client.put( + "/inventory/v1/inventory-definitions/supply-items/11111111-1111-1111-1111-111111111111", + json=payload, + ) + assert rv.status_code == 200 + assert rv.get_json()["description"] == "updated description" + + store = client.application.config["PLEX_MOCK_STORE"] + rows = store.query(run_id=client.application.config["PLEX_MOCK_RUN_ID"], method="PUT") + assert len(rows) == 1 + + def test_put_404_on_unknown_id(self, client): + rv = client.put( + "/inventory/v1/inventory-definitions/supply-items/not-a-real-id", + json={"description": "x"}, + ) + assert rv.status_code == 404 + + +class TestWorkcenterWrites: + def test_put_workcenter_captured(self, client): + # #6 probe — we don't yet know the body shape, just confirm the mock + # records whatever we send it. + payload = {"unknownFieldForProbe": True} + rv = client.put( + "/production/v1/production-definitions/workcenters/0b6cf62b-2809-4d3d-ab24-369cd0171f62", + json=payload, + ) + assert rv.status_code == 200 + store = client.application.config["PLEX_MOCK_STORE"] + rows = store.query(run_id=client.application.config["PLEX_MOCK_RUN_ID"], method="PUT") + assert any(r["body"].get("unknownFieldForProbe") is True for r in rows) + + def test_patch_workcenter_captured(self, client): + rv = client.patch( + "/production/v1/production-definitions/workcenters/0b6cf62b-2809-4d3d-ab24-369cd0171f62", + json={"note": "patched"}, + ) + assert rv.status_code == 200 + store = client.application.config["PLEX_MOCK_STORE"] + rows = store.query(run_id=client.application.config["PLEX_MOCK_RUN_ID"], method="PATCH") + assert len(rows) == 1 diff --git a/tests/test_plex_mock_store.py b/tests/test_plex_mock_store.py new file mode 100644 index 0000000..2bde783 --- /dev/null +++ b/tests/test_plex_mock_store.py @@ -0,0 +1,77 @@ +"""Tests for the Plex-mock SQLite capture store.""" +import json +import sqlite3 +from pathlib import Path + +import pytest + +from tools.plex_mock.store import CaptureStore + + +@pytest.fixture +def store(tmp_path: Path) -> CaptureStore: + return CaptureStore(tmp_path / "captures.db") + + +class TestCaptureStoreInit: + def test_creates_db_file_on_open(self, tmp_path: Path): + db = tmp_path / "captures.db" + assert not db.exists() + CaptureStore(db) + assert db.exists() + + def test_creates_table_schema(self, store: CaptureStore): + with sqlite3.connect(store.path) as con: + cols = {row[1] for row in con.execute("PRAGMA table_info(captures)")} + assert {"id", "ts", "method", "path", "body_json", "run_id"} <= cols + + +class TestCaptureStoreAppend: + def test_append_returns_integer_id(self, store: CaptureStore): + rid = store.append(method="POST", path="/foo", body={"a": 1}, run_id="r1") + assert isinstance(rid, int) + assert rid >= 1 + + def test_append_persists_row(self, store: CaptureStore): + store.append(method="POST", path="/foo", body={"a": 1}, run_id="r1") + rows = store.query(run_id="r1") + assert len(rows) == 1 + assert rows[0]["method"] == "POST" + assert rows[0]["path"] == "/foo" + assert rows[0]["body"] == {"a": 1} + assert rows[0]["run_id"] == "r1" + + def test_append_stores_body_as_json(self, store: CaptureStore): + payload = {"nested": {"k": [1, 2, 3]}} + store.append(method="PUT", path="/x", body=payload, run_id="r1") + with sqlite3.connect(store.path) as con: + raw = con.execute("SELECT body_json FROM captures").fetchone()[0] + assert json.loads(raw) == payload + + def test_append_handles_null_body(self, store: CaptureStore): + store.append(method="PATCH", path="/x", body=None, run_id="r1") + rows = store.query(run_id="r1") + assert rows[0]["body"] is None + + +class TestCaptureStoreQuery: + def test_query_filters_by_run_id(self, store: CaptureStore): + store.append(method="POST", path="/a", body={}, run_id="r1") + store.append(method="POST", path="/b", body={}, run_id="r2") + assert len(store.query(run_id="r1")) == 1 + assert len(store.query(run_id="r2")) == 1 + + def test_query_filters_by_method(self, store: CaptureStore): + store.append(method="POST", path="/a", body={}, run_id="r1") + store.append(method="PUT", path="/a", body={}, run_id="r1") + assert len(store.query(run_id="r1", method="POST")) == 1 + assert len(store.query(run_id="r1", method="PUT")) == 1 + + def test_query_orders_by_id_ascending(self, store: CaptureStore): + store.append(method="POST", path="/a", body={"n": 1}, run_id="r1") + store.append(method="POST", path="/b", body={"n": 2}, run_id="r1") + rows = store.query(run_id="r1") + assert [r["body"]["n"] for r in rows] == [1, 2] + + def test_query_empty_when_no_match(self, store: CaptureStore): + assert store.query(run_id="nope") == [] diff --git a/tests/test_populate_supply_items.py b/tests/test_populate_supply_items.py new file mode 100644 index 0000000..2d0bac7 --- /dev/null +++ b/tests/test_populate_supply_items.py @@ -0,0 +1,255 @@ +""" +Tests for populate_supply_items.py -- tools → plex_supply_items staging. + +Focus on: + - build_supply_item_row(): 3 derived columns + fusion_guid, DB defaults omitted + - tool_type_to_group(): type → group mapping, default "Machining" + - populate_supply_items(): upserts eligible rows, skips tools without + product_id, dry-run suppresses writes, Supabase failure marks rows failed + - CLI exit codes: 0 success, 1 partial fail, 2 no tools +""" +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +import pytest + +from populate_supply_items import ( + build_supply_item_row, + tool_type_to_group, + populate_supply_items, + main, + DEFAULT_GROUP, + RowResult, +) + + +# --------------------------------------------------------------- +# tool_type_to_group +# --------------------------------------------------------------- +class TestToolTypeToGroup: + def test_default_for_standard_type(self): + assert tool_type_to_group("flat end mill") == "Machining" + + def test_default_for_none(self): + assert tool_type_to_group(None) == "Machining" + + def test_default_for_empty_string(self): + assert tool_type_to_group("") == "Machining" + + def test_case_insensitive(self): + assert tool_type_to_group("Flat End Mill") == "Machining" + + def test_default_group_constant(self): + assert DEFAULT_GROUP == "Machining" + + +# --------------------------------------------------------------- +# build_supply_item_row +# --------------------------------------------------------------- +class TestBuildSupplyItemRow: + def test_maps_three_derived_columns(self): + tool = { + "fusion_guid": "abc-123", + "description": "1/2 in 3FL end mill", + "product_id": "HVN-12345", + "type": "flat end mill", + } + row = build_supply_item_row(tool) + assert row == { + "fusion_guid": "abc-123", + "description": "1/2 in 3FL end mill", + "item_group": "Machining", + "supply_item_number": "HVN-12345", + } + + def test_omits_defaulted_columns(self): + tool = { + "fusion_guid": "abc-123", + "description": "x", + "product_id": "y", + "type": "drill", + } + row = build_supply_item_row(tool) + # category, inventory_unit, item_type should NOT be in the row + assert "category" not in row + assert "inventory_unit" not in row + assert "item_type" not in row + + def test_missing_description_defaults_to_empty(self): + tool = {"fusion_guid": "g", "product_id": "p", "type": "drill"} + row = build_supply_item_row(tool) + assert row["description"] == "" + + def test_none_description_defaults_to_empty(self): + tool = { + "fusion_guid": "g", + "description": None, + "product_id": "p", + "type": "drill", + } + row = build_supply_item_row(tool) + assert row["description"] == "" + + def test_missing_product_id_defaults_to_empty(self): + tool = {"fusion_guid": "g", "description": "d", "type": "drill"} + row = build_supply_item_row(tool) + assert row["supply_item_number"] == "" + + def test_missing_type_uses_default_group(self): + tool = {"fusion_guid": "g", "description": "d", "product_id": "p"} + row = build_supply_item_row(tool) + assert row["item_group"] == "Machining" + + +# --------------------------------------------------------------- +# populate_supply_items +# --------------------------------------------------------------- +def _make_tool(guid: str, product_id: str = "P-123", **kwargs) -> dict: + return { + "fusion_guid": guid, + "description": f"Tool {guid}", + "product_id": product_id, + "type": "flat end mill", + **kwargs, + } + + +class TestPopulateSupplyItems: + def test_upserts_eligible_tools(self): + sb = MagicMock() + sb.select.return_value = [_make_tool("a"), _make_tool("b")] + sb.upsert.return_value = [] + + report = populate_supply_items(sb) + + assert len(report.staged) == 2 + assert len(report.skipped) == 0 + assert len(report.failed) == 0 + sb.upsert.assert_called_once() + call_args = sb.upsert.call_args + assert call_args.args[0] == "plex_supply_items" + rows = call_args.args[1] + assert len(rows) == 2 + assert call_args.kwargs["on_conflict"] == "fusion_guid" + + def test_skips_tools_without_product_id(self): + sb = MagicMock() + sb.select.return_value = [ + _make_tool("a", product_id="HVN-1"), + _make_tool("b", product_id=""), + _make_tool("c", product_id=" "), # whitespace-only + ] + sb.upsert.return_value = [] + + report = populate_supply_items(sb) + + assert len(report.staged) == 1 + assert len(report.skipped) == 2 + # Only one row upserted + rows = sb.upsert.call_args.args[1] + assert len(rows) == 1 + assert rows[0]["fusion_guid"] == "a" + + def test_skips_tools_with_none_product_id(self): + sb = MagicMock() + sb.select.return_value = [_make_tool("a", product_id=None)] + sb.upsert.return_value = [] + + report = populate_supply_items(sb) + + assert len(report.skipped) == 1 + sb.upsert.assert_not_called() + + def test_dry_run_does_not_write(self): + sb = MagicMock() + sb.select.return_value = [_make_tool("a")] + + report = populate_supply_items(sb, dry_run=True) + + sb.upsert.assert_not_called() + assert len(report.staged) == 1 + + def test_no_tools_returns_empty_report(self): + sb = MagicMock() + sb.select.return_value = [] + + report = populate_supply_items(sb) + + assert report.results == [] + sb.upsert.assert_not_called() + + def test_supabase_upsert_failure_marks_rows_failed(self): + sb = MagicMock() + sb.select.return_value = [_make_tool("a"), _make_tool("b")] + sb.upsert.side_effect = RuntimeError("postgrest borked") + + report = populate_supply_items(sb) + + assert len(report.failed) == 2 + assert len(report.staged) == 0 + assert "postgrest borked" in report.failed[0].message + + def test_all_skipped_no_upsert_call(self): + sb = MagicMock() + sb.select.return_value = [ + _make_tool("a", product_id=""), + _make_tool("b", product_id=""), + ] + + report = populate_supply_items(sb) + + assert len(report.skipped) == 2 + sb.upsert.assert_not_called() + + def test_select_requests_correct_columns(self): + sb = MagicMock() + sb.select.return_value = [] + + populate_supply_items(sb) + + sb.select.assert_called_once() + kwargs = sb.select.call_args.kwargs + cols = kwargs["columns"] + for c in ("fusion_guid", "description", "product_id", "type"): + assert c in cols + + +# --------------------------------------------------------------- +# CLI +# --------------------------------------------------------------- +class TestCLI: + @patch("populate_supply_items.SupabaseClient") + @patch("populate_supply_items.populate_supply_items") + def test_exit_0_on_success(self, mock_pop, mock_sb): + from populate_supply_items import PopulateReport + rpt = PopulateReport() + rpt.results.append(RowResult("a", "staged")) + rpt.end_time = 1.0 + mock_pop.return_value = rpt + + assert main([]) == 0 + + @patch("populate_supply_items.SupabaseClient") + @patch("populate_supply_items.populate_supply_items") + def test_exit_1_on_partial_failure(self, mock_pop, mock_sb): + from populate_supply_items import PopulateReport + rpt = PopulateReport() + rpt.results.append(RowResult("a", "staged")) + rpt.results.append(RowResult("b", "fail", "boom")) + rpt.end_time = 1.0 + mock_pop.return_value = rpt + + assert main([]) == 1 + + @patch("populate_supply_items.SupabaseClient") + @patch("populate_supply_items.populate_supply_items") + def test_exit_2_on_no_tools(self, mock_pop, mock_sb): + from populate_supply_items import PopulateReport + mock_pop.return_value = PopulateReport(end_time=1.0) + + assert main([]) == 2 + + @patch("populate_supply_items.SupabaseClient", side_effect=RuntimeError("no key")) + def test_exit_2_on_config_error(self, _sb): + assert main([]) == 2 diff --git a/tests/test_run_dev.py b/tests/test_run_dev.py new file mode 100644 index 0000000..b2dfb3d --- /dev/null +++ b/tests/test_run_dev.py @@ -0,0 +1,155 @@ +""" +Tests for run_dev.py — local dev launcher with force-override semantics. + +The whole point of run_dev.py is the OPPOSITE of bootstrap.py: +bootstrap uses setdefault (real shell wins), run_dev uses direct +assignment (file wins). These tests pin down that contract. +""" +import os + +import pytest + +from run_dev import force_override_from_env_local + + +# ───────────────────────────────────────────── +# Missing file is a no-op +# ───────────────────────────────────────────── +class TestMissingFile: + def test_missing_file_returns_zero(self, tmp_path): + missing = tmp_path / "does-not-exist.env" + assert force_override_from_env_local(missing) == 0 + + def test_missing_file_does_not_raise(self, tmp_path): + nowhere = tmp_path / "nope" / "alsonope" / ".env.local" + force_override_from_env_local(nowhere) # no exception + + +# ───────────────────────────────────────────── +# Override behavior — the whole point +# ───────────────────────────────────────────── +class TestOverrideSemantics: + def test_overrides_existing_env_var(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("FOO=from-file\n") + monkeypatch.setenv("FOO", "from-shell") + + changed = force_override_from_env_local(f) + + assert changed == 1 + assert os.environ["FOO"] == "from-file" + + def test_sets_var_when_unset(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("FOO=from-file\n") + monkeypatch.delenv("FOO", raising=False) + + changed = force_override_from_env_local(f) + + assert changed == 1 + assert os.environ["FOO"] == "from-file" + + def test_no_change_count_when_already_correct(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("FOO=already-correct\n") + monkeypatch.setenv("FOO", "already-correct") + + changed = force_override_from_env_local(f) + + # The shell already has the right value — counts as zero changes + assert changed == 0 + assert os.environ["FOO"] == "already-correct" + + def test_partial_override_multiple_vars(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("FOO=new-foo\nBAR=new-bar\nBAZ=new-baz\n") + monkeypatch.setenv("FOO", "old-foo") # will be overridden + monkeypatch.setenv("BAR", "new-bar") # already correct + monkeypatch.delenv("BAZ", raising=False) # unset + + changed = force_override_from_env_local(f) + + # FOO: changed, BAR: no-op, BAZ: added → 2 changes + assert changed == 2 + assert os.environ["FOO"] == "new-foo" + assert os.environ["BAR"] == "new-bar" + assert os.environ["BAZ"] == "new-baz" + + +# ───────────────────────────────────────────── +# Parsing — comments, blanks, quotes +# ───────────────────────────────────────────── +class TestParsing: + def test_skips_comments(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("# comment\nFOO=bar\n# another\n") + monkeypatch.delenv("FOO", raising=False) + + changed = force_override_from_env_local(f) + + assert changed == 1 + assert os.environ["FOO"] == "bar" + + def test_skips_blank_lines(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("\n\nFOO=bar\n\n\n") + monkeypatch.delenv("FOO", raising=False) + + changed = force_override_from_env_local(f) + assert changed == 1 + + def test_skips_lines_without_equals(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("not-a-pair\nFOO=bar\n") + monkeypatch.delenv("FOO", raising=False) + + changed = force_override_from_env_local(f) + assert changed == 1 + + def test_strips_double_quotes(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text('FOO="bar baz"\n') + monkeypatch.delenv("FOO", raising=False) + + force_override_from_env_local(f) + assert os.environ["FOO"] == "bar baz" + + def test_strips_single_quotes(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("FOO='bar baz'\n") + monkeypatch.delenv("FOO", raising=False) + + force_override_from_env_local(f) + assert os.environ["FOO"] == "bar baz" + + def test_handles_value_with_equals(self, tmp_path, monkeypatch): + f = tmp_path / ".env" + f.write_text("URL=https://example.com/?a=1&b=2\n") + monkeypatch.delenv("URL", raising=False) + + force_override_from_env_local(f) + assert os.environ["URL"] == "https://example.com/?a=1&b=2" + + +# ───────────────────────────────────────────── +# Contract: run_dev opposite of bootstrap +# ───────────────────────────────────────────── +class TestRunDevVsBootstrap: + def test_run_dev_overrides_where_bootstrap_would_not(self, tmp_path, monkeypatch): + """ + Pin down the differing semantics. With the same .env.local content + and pre-existing shell env, bootstrap.setdefault keeps the shell + value while run_dev.force_override replaces it. + """ + f = tmp_path / ".env" + f.write_text("CRED=from-file\n") + monkeypatch.setenv("CRED", "from-shell") + + # bootstrap.load_env_local is the SAFE path: shell wins + from bootstrap import load_env_local + load_env_local(f) + assert os.environ["CRED"] == "from-shell" + + # run_dev.force_override is the DEV path: file wins + force_override_from_env_local(f) + assert os.environ["CRED"] == "from-file" diff --git a/tests/test_supabase_client.py b/tests/test_supabase_client.py new file mode 100644 index 0000000..e92bfb3 --- /dev/null +++ b/tests/test_supabase_client.py @@ -0,0 +1,176 @@ +""" +Tests for supabase_client.py — thin PostgREST wrapper. + +Focuses on the contract: + - Config errors when env vars are missing + - Headers are set correctly on the session + - URL building routes to /rest/v1/ + - delete() refuses unfiltered calls + - HTTP errors surface as SupabaseHTTPError + +All HTTP traffic is patched — no real network calls. +""" +from __future__ import annotations + +import json +from unittest.mock import patch + +import pytest + +from supabase_client import ( + SupabaseClient, + SupabaseConfigError, + SupabaseHTTPError, +) + + +# ───────────────────────────────────────────── +# Config errors +# ───────────────────────────────────────────── +class TestConfigErrors: + def test_missing_url_raises(self, monkeypatch): + monkeypatch.delenv("SUPABASE_URL", raising=False) + monkeypatch.setenv("SUPABASE_SERVICE_ROLE_KEY", "k") + with pytest.raises(SupabaseConfigError, match="SUPABASE_URL"): + SupabaseClient() + + def test_missing_key_raises(self, monkeypatch): + monkeypatch.setenv("SUPABASE_URL", "https://x.supabase.co") + monkeypatch.delenv("SUPABASE_SERVICE_ROLE_KEY", raising=False) + with pytest.raises(SupabaseConfigError, match="SUPABASE_SERVICE_ROLE_KEY"): + SupabaseClient() + + def test_explicit_args_override_env(self, monkeypatch): + monkeypatch.delenv("SUPABASE_URL", raising=False) + monkeypatch.delenv("SUPABASE_SERVICE_ROLE_KEY", raising=False) + client = SupabaseClient( + url="https://explicit.supabase.co", + service_role_key="explicit-key", + ) + assert client.url == "https://explicit.supabase.co" + assert client.key == "explicit-key" + + +# ───────────────────────────────────────────── +# Session headers +# ───────────────────────────────────────────── +class TestHeaders: + def test_both_apikey_and_bearer_set(self, monkeypatch): + monkeypatch.setenv("SUPABASE_URL", "https://x.supabase.co") + monkeypatch.setenv("SUPABASE_SERVICE_ROLE_KEY", "abc123") + client = SupabaseClient() + assert client._session.headers["apikey"] == "abc123" + assert client._session.headers["Authorization"] == "Bearer abc123" + + def test_trailing_slash_stripped(self, monkeypatch): + monkeypatch.setenv("SUPABASE_URL", "https://x.supabase.co/") + monkeypatch.setenv("SUPABASE_SERVICE_ROLE_KEY", "k") + client = SupabaseClient() + assert client.url == "https://x.supabase.co" + + +# ───────────────────────────────────────────── +# URL building +# ───────────────────────────────────────────── +class TestTableUrl: + def test_table_url_format(self, monkeypatch): + monkeypatch.setenv("SUPABASE_URL", "https://x.supabase.co") + monkeypatch.setenv("SUPABASE_SERVICE_ROLE_KEY", "k") + client = SupabaseClient() + assert ( + client._table_url("tools") + == "https://x.supabase.co/rest/v1/tools" + ) + + +# ───────────────────────────────────────────── +# Delete safety guard +# ───────────────────────────────────────────── +class TestDeleteSafety: + def test_delete_without_filters_raises(self, monkeypatch): + monkeypatch.setenv("SUPABASE_URL", "https://x.supabase.co") + monkeypatch.setenv("SUPABASE_SERVICE_ROLE_KEY", "k") + client = SupabaseClient() + with pytest.raises(ValueError, match="at least one filter"): + client.delete("tools", filters={}) + + +# ───────────────────────────────────────────── +# HTTP error surfacing +# ───────────────────────────────────────────── +class TestErrorHandling: + def test_non_2xx_raises_supabase_http_error(self, monkeypatch): + monkeypatch.setenv("SUPABASE_URL", "https://x.supabase.co") + monkeypatch.setenv("SUPABASE_SERVICE_ROLE_KEY", "k") + client = SupabaseClient() + + class FakeResponse: + ok = False + status_code = 400 + url = "https://x.supabase.co/rest/v1/foo" + content = b'{"message": "bad request"}' + + def json(self): + return {"message": "bad request"} + + with patch.object(client._session, "get", return_value=FakeResponse()): + with pytest.raises(SupabaseHTTPError) as exc_info: + client.select("foo") + assert exc_info.value.status == 400 + assert exc_info.value.body == {"message": "bad request"} + + def test_2xx_with_empty_body_returns_empty_list(self, monkeypatch): + monkeypatch.setenv("SUPABASE_URL", "https://x.supabase.co") + monkeypatch.setenv("SUPABASE_SERVICE_ROLE_KEY", "k") + client = SupabaseClient() + + class FakeResponse: + ok = True + status_code = 204 + content = b"" + + with patch.object(client._session, "delete", return_value=FakeResponse()): + result = client.delete("foo", filters={"id": "eq.1"}) + assert result == [] + + +# ───────────────────────────────────────────── +# Upsert request shape +# ───────────────────────────────────────────── +class TestUpsertRequestShape: + def test_upsert_sends_merge_duplicates_and_on_conflict(self, monkeypatch): + monkeypatch.setenv("SUPABASE_URL", "https://x.supabase.co") + monkeypatch.setenv("SUPABASE_SERVICE_ROLE_KEY", "k") + client = SupabaseClient() + + captured = {} + + class FakeResponse: + ok = True + status_code = 201 + content = b"[]" + + def json(self): + return [] + + def fake_post(url, data=None, headers=None, params=None, timeout=None): + captured["url"] = url + captured["data"] = data + captured["headers"] = headers + captured["params"] = params + return FakeResponse() + + with patch.object(client._session, "post", side_effect=fake_post): + client.upsert( + "tools", + {"fusion_guid": "abc", "vendor": "V"}, + on_conflict="fusion_guid", + ) + + assert captured["url"].endswith("/rest/v1/tools") + assert "resolution=merge-duplicates" in captured["headers"]["Prefer"] + assert captured["params"] == {"on_conflict": "fusion_guid"} + # Body is JSON-serialized list + assert json.loads(captured["data"]) == [ + {"fusion_guid": "abc", "vendor": "V"} + ] diff --git a/tests/test_sync.py b/tests/test_sync.py new file mode 100644 index 0000000..62c97eb --- /dev/null +++ b/tests/test_sync.py @@ -0,0 +1,387 @@ +""" +Tests for sync.py — nightly sync CLI entrypoint. + +Covers: + - APS cloud sync path (mocked APS client + Supabase) + - Local ADC fallback path (mocked loader) + - Validation gate (libraries that fail validation are rejected) + - --dry-run mode (no Supabase writes) + - --local flag (skips APS, goes straight to local) + - Fallback from APS to local on auth failure + - Exit codes (0 = success, 1 = partial failure, 2 = fatal) + - SyncReport summary helpers + +All I/O is mocked — no real network or filesystem calls. +""" +from __future__ import annotations + +import logging +from unittest.mock import MagicMock, patch + +import pytest + +from sync import ( + main, + sync_from_aps, + sync_from_local, + LibraryResult, + SyncReport, +) +from aps_client import APSAuthError, APSConfigError +from validate_library import ValidationResult + + +# ───────────────────────────────────────────── +# Helpers +# ───────────────────────────────────────────── +def _make_folder_contents(*names: str) -> list[dict]: + """Build a fake APS folder contents response.""" + items = [] + for name in names: + items.append({ + "type": "items", + "id": f"urn:adsk.wipprod:dm.lineage:{name}", + "attributes": {"displayName": f"{name}.json"}, + }) + return items + + +def _make_tip(storage_urn: str = "urn:adsk.objects:os.object:bucket/obj") -> dict: + return { + "relationships": { + "storage": { + "data": {"id": storage_urn}, + }, + }, + } + + +def _passing_validation(library_name: str, tool_count: int = 10) -> ValidationResult: + return ValidationResult( + library_name=library_name, + passed=True, + tool_count=tool_count, + sync_candidate_count=tool_count, + ) + + +def _failing_validation(library_name: str) -> ValidationResult: + from validate_library import ValidationIssue + return ValidationResult( + library_name=library_name, + passed=False, + tool_count=5, + sync_candidate_count=5, + issues=[ValidationIssue( + severity="FAIL", + rule="TEST_RULE", + tool_index=0, + tool_description="test tool", + field="guid", + value=None, + message="missing guid", + )], + ) + + +# ───────────────────────────────────────────── +# SyncReport +# ───────────────────────────────────────────── +class TestSyncReport: + def test_succeeded_failed_skipped(self): + report = SyncReport(source="test") + report.results = [ + LibraryResult("A", "success", tools=10, presets=20), + LibraryResult("B", "fail", message="boom"), + LibraryResult("C", "skipped", message="empty"), + LibraryResult("D", "success", tools=5, presets=8), + ] + assert len(report.succeeded) == 2 + assert len(report.failed) == 1 + assert len(report.skipped) == 1 + assert report.total_tools == 15 + assert report.total_presets == 28 + + def test_elapsed(self): + report = SyncReport(source="test", start_time=100.0, end_time=105.5) + assert report.elapsed == pytest.approx(5.5) + + +# ───────────────────────────────────────────── +# sync_from_aps +# ───────────────────────────────────────────── +class TestSyncFromAps: + @patch("sync.SupabaseClient") + @patch("sync.sync_library") + @patch("sync.validate_library") + @patch("sync.APSClient") + def test_full_sync_success(self, MockAPS, mock_validate, mock_sync, MockSB): + aps = MockAPS.return_value + aps.get_folder_contents.return_value = _make_folder_contents("LIB_A", "LIB_B") + aps.get_item_tip.return_value = _make_tip() + aps.download_tool_library.return_value = [{"guid": "g1", "type": "flat end mill"}] + + mock_validate.return_value = _passing_validation("LIB_A") + mock_sync.return_value = {"tools": 10, "presets": 20} + + report = sync_from_aps(dry_run=False) + + assert len(report.succeeded) == 2 + assert report.total_tools == 20 + assert report.total_presets == 40 + assert mock_sync.call_count == 2 + assert report.source == "aps" + + @patch("sync.APSClient") + @patch("sync.validate_library") + def test_dry_run_no_supabase(self, mock_validate, MockAPS): + aps = MockAPS.return_value + aps.get_folder_contents.return_value = _make_folder_contents("LIB_A") + aps.get_item_tip.return_value = _make_tip() + aps.download_tool_library.return_value = [{"guid": "g1", "type": "drill"}] + + mock_validate.return_value = _passing_validation("LIB_A", tool_count=5) + + report = sync_from_aps(dry_run=True) + + assert len(report.succeeded) == 1 + assert report.results[0].message == "dry-run — validated OK, no write" + # sync_library should NOT have been called + # (SupabaseClient is never instantiated either — no mock needed) + + @patch("sync.APSClient") + @patch("sync.validate_library") + def test_validation_failure_blocks_sync(self, mock_validate, MockAPS): + aps = MockAPS.return_value + aps.get_folder_contents.return_value = _make_folder_contents("BAD_LIB") + aps.get_item_tip.return_value = _make_tip() + aps.download_tool_library.return_value = [{"guid": "g1"}] + + mock_validate.return_value = _failing_validation("BAD_LIB") + + report = sync_from_aps(dry_run=False) + + assert len(report.failed) == 1 + assert "Validation failed" in report.results[0].message + + @patch("sync.APSClient") + def test_empty_library_skipped(self, MockAPS): + aps = MockAPS.return_value + aps.get_folder_contents.return_value = _make_folder_contents("EMPTY") + aps.get_item_tip.return_value = _make_tip() + aps.download_tool_library.return_value = [] + + report = sync_from_aps(dry_run=False) + + assert len(report.skipped) == 1 + + @patch("sync.APSClient") + def test_no_storage_urn_fails(self, MockAPS): + aps = MockAPS.return_value + aps.get_folder_contents.return_value = _make_folder_contents("BROKEN") + aps.get_item_tip.return_value = {"relationships": {"storage": {"data": {"id": ""}}}} + + report = sync_from_aps(dry_run=False) + + assert len(report.failed) == 1 + assert "storage URN" in report.results[0].message + + @patch("sync.APSClient") + def test_aps_config_error_propagates(self, MockAPS): + MockAPS.return_value._require_config.side_effect = APSConfigError("no creds") + with pytest.raises(APSConfigError): + sync_from_aps() + + +# ───────────────────────────────────────────── +# sync_from_local +# ───────────────────────────────────────────── +class TestSyncFromLocal: + @patch("sync.hash_file", return_value="abc123") + @patch("sync.SupabaseClient") + @patch("sync.sync_library") + @patch("sync.validate_library") + @patch("sync.load_all_libraries") + def test_full_local_sync( + self, mock_load, mock_validate, mock_sync, MockSB, mock_hash, + ): + mock_cam = MagicMock() + mock_cam.exists.return_value = True + mock_file = MagicMock() + mock_file.exists.return_value = True + mock_cam.__truediv__ = MagicMock(return_value=mock_file) + + mock_load.return_value = { + "LIB_A": [{"guid": "g1", "type": "drill"}], + } + mock_validate.return_value = _passing_validation("LIB_A") + mock_sync.return_value = {"tools": 5, "presets": 10} + + with patch("sync.CAM_TOOLS_DIR", mock_cam): + report = sync_from_local(dry_run=False) + + assert len(report.succeeded) == 1 + assert report.source == "local" + assert mock_sync.call_count == 1 + + @patch("sync.validate_library") + @patch("sync.load_all_libraries") + def test_local_dry_run(self, mock_load, mock_validate): + mock_cam = MagicMock() + mock_cam.exists.return_value = True + + mock_load.return_value = { + "LIB_A": [{"guid": "g1"}], + } + mock_validate.return_value = _passing_validation("LIB_A", tool_count=3) + + with patch("sync.CAM_TOOLS_DIR", mock_cam): + report = sync_from_local(dry_run=True) + + assert len(report.succeeded) == 1 + assert "dry-run" in report.results[0].message + + def test_missing_directory(self): + mock_cam = MagicMock() + mock_cam.exists.return_value = False + + with patch("sync.CAM_TOOLS_DIR", mock_cam): + report = sync_from_local(dry_run=False) + + assert len(report.results) == 0 + + @patch("sync.validate_library") + @patch("sync.load_all_libraries") + def test_validation_failure(self, mock_load, mock_validate): + mock_cam = MagicMock() + mock_cam.exists.return_value = True + + mock_load.return_value = {"BAD": [{"guid": "g1"}]} + mock_validate.return_value = _failing_validation("BAD") + + with patch("sync.CAM_TOOLS_DIR", mock_cam): + report = sync_from_local(dry_run=False) + + assert len(report.failed) == 1 + + +# ───────────────────────────────────────────── +# CLI (main) +# ───────────────────────────────────────────── +class TestMain: + @patch("sync.populate_supply_items") + @patch("sync.SupabaseClient") + @patch("sync.sync_from_aps") + def test_exit_0_on_success(self, mock_aps, _sb, _pop): + report = SyncReport(source="aps", start_time=0, end_time=1) + report.results = [LibraryResult("A", "success", tools=5, presets=10)] + mock_aps.return_value = report + + assert main([]) == 0 + + @patch("sync.populate_supply_items") + @patch("sync.SupabaseClient") + @patch("sync.sync_from_aps") + def test_exit_1_on_partial_failure(self, mock_aps, _sb, _pop): + report = SyncReport(source="aps", start_time=0, end_time=1) + report.results = [ + LibraryResult("A", "success", tools=5, presets=10), + LibraryResult("B", "fail", message="boom"), + ] + mock_aps.return_value = report + + assert main([]) == 1 + + @patch("sync.populate_supply_items") + @patch("sync.SupabaseClient") + @patch("sync.sync_from_local") + @patch("sync.sync_from_aps", side_effect=APSAuthError("expired")) + def test_fallback_to_local_on_auth_error(self, mock_aps, mock_local, _sb, _pop): + report = SyncReport(source="local", start_time=0, end_time=1) + report.results = [LibraryResult("A", "success", tools=5, presets=10)] + mock_local.return_value = report + + assert main([]) == 0 + mock_local.assert_called_once() + + @patch("sync.populate_supply_items") + @patch("sync.SupabaseClient") + @patch("sync.sync_from_local") + def test_local_flag_skips_aps(self, mock_local, _sb, _pop): + report = SyncReport(source="local", start_time=0, end_time=1) + report.results = [LibraryResult("A", "success", tools=5, presets=10)] + mock_local.return_value = report + + assert main(["--local"]) == 0 + + @patch("sync.sync_from_local") + @patch("sync.sync_from_aps", side_effect=APSConfigError("no creds")) + def test_exit_2_when_no_libraries(self, mock_aps, mock_local): + report = SyncReport(source="local", start_time=0, end_time=1) + report.results = [] + mock_local.return_value = report + + assert main([]) == 2 + + @patch("sync.sync_from_aps") + def test_dry_run_flag(self, mock_aps): + report = SyncReport(source="aps", start_time=0, end_time=1) + report.results = [LibraryResult("A", "success", tools=5, message="dry-run")] + mock_aps.return_value = report + + assert main(["--dry-run"]) == 0 + mock_aps.assert_called_once_with(dry_run=True) + + +# ───────────────────────────────────────────── +# Post-sync supply-item staging hook (#80) +# ───────────────────────────────────────────── +class TestPostSyncHook: + @patch("sync.populate_supply_items") + @patch("sync.SupabaseClient") + @patch("sync.sync_from_aps") + def test_populate_called_after_successful_sync(self, mock_aps, mock_sb, mock_pop): + from populate_supply_items import PopulateReport, RowResult + report = SyncReport(source="aps", start_time=0, end_time=1) + report.results = [LibraryResult("A", "success", tools=5, presets=10)] + mock_aps.return_value = report + pop_rpt = PopulateReport() + pop_rpt.results = [RowResult("g1", "staged")] + pop_rpt.end_time = 1.0 + mock_pop.return_value = pop_rpt + + assert main([]) == 0 + mock_pop.assert_called_once() + + @patch("sync.populate_supply_items") + @patch("sync.SupabaseClient") + @patch("sync.sync_from_aps") + def test_populate_not_called_on_dry_run(self, mock_aps, mock_sb, mock_pop): + report = SyncReport(source="aps", start_time=0, end_time=1) + report.results = [LibraryResult("A", "success", tools=5)] + mock_aps.return_value = report + + assert main(["--dry-run"]) == 0 + mock_pop.assert_not_called() + + @patch("sync.populate_supply_items") + @patch("sync.SupabaseClient") + @patch("sync.sync_from_aps") + def test_populate_not_called_when_no_succeeded(self, mock_aps, mock_sb, mock_pop): + report = SyncReport(source="aps", start_time=0, end_time=1) + report.results = [LibraryResult("A", "fail", message="boom")] + mock_aps.return_value = report + + main([]) + mock_pop.assert_not_called() + + @patch("sync.populate_supply_items", side_effect=RuntimeError("staging broke")) + @patch("sync.SupabaseClient") + @patch("sync.sync_from_aps") + def test_populate_failure_is_nonfatal(self, mock_aps, mock_sb, mock_pop): + report = SyncReport(source="aps", start_time=0, end_time=1) + report.results = [LibraryResult("A", "success", tools=5, presets=10)] + mock_aps.return_value = report + + # Should still return 0 despite staging failure + assert main([]) == 0 diff --git a/tests/test_sync_supabase.py b/tests/test_sync_supabase.py new file mode 100644 index 0000000..d2840de --- /dev/null +++ b/tests/test_sync_supabase.py @@ -0,0 +1,389 @@ +""" +Tests for sync_supabase.py — Fusion → Supabase normalization. + +Covers all eight normalization rules from the Supabase Schema Design +(Notion · 2026-04-08): + + 1. Unit conversion (inches → mm) on dimensional geometry + 2. product_id whitespace cleanup (external only) + 3. Preset GUID curly-brace strip + 4. Raw vendor casing preserved + 5. JSON null passthrough in presets (do not substitute 0) + 6. Sync filter excludes holder / probe + 7. shaft.segments JSONB passthrough, absent = NULL + 8. post_process.comment uses .get, never direct access + +Also covers the batch-level sync_library flow via a fake Supabase +client so no network traffic happens in the test suite. +""" +from __future__ import annotations + +import pytest + +from sync_supabase import ( + INCHES_TO_MM, + build_preset_rows, + build_tool_row, + normalize_preset_guid, + normalize_product_id, + sync_library, + unit_scale, +) + + +# ───────────────────────────────────────────── +# Rule 1 — unit conversion +# ───────────────────────────────────────────── +class TestUnitScale: + def test_inches_value_is_multiplied(self): + # 0.5 in → 12.7 mm + assert unit_scale(0.5, is_inches=True) == pytest.approx(12.7) + + def test_millimeters_value_is_unchanged(self): + assert unit_scale(12.7, is_inches=False) == pytest.approx(12.7) + + def test_integer_value_is_coerced_and_scaled(self): + assert unit_scale(1, is_inches=True) == pytest.approx(25.4) + + def test_none_passes_through(self): + assert unit_scale(None, is_inches=True) is None + assert unit_scale(None, is_inches=False) is None + + def test_bool_is_rejected_as_non_dimensional(self): + # Python bool is a subclass of int — we want it rejected, not scaled. + assert unit_scale(True, is_inches=True) is None + assert unit_scale(False, is_inches=True) is None + + def test_non_numeric_string_returns_none(self): + assert unit_scale("banana", is_inches=True) is None + + +# ───────────────────────────────────────────── +# Rule 2 — product_id cleanup +# ───────────────────────────────────────────── +class TestNormalizeProductId: + def test_strips_leading_and_trailing_whitespace(self): + assert normalize_product_id(" 990910 ") == "990910" + + def test_preserves_internal_space_sandvik(self): + # Sandvik real example — the space between CK04P and 1640 must survive. + raw = "RA216.33-0845-CK04P 1640" + assert normalize_product_id(raw) == raw + + def test_preserves_internal_dots_and_dashes(self): + assert normalize_product_id("RA216.33-0845") == "RA216.33-0845" + + def test_none_returns_none(self): + assert normalize_product_id(None) is None + + def test_all_whitespace_returns_none(self): + assert normalize_product_id(" ") is None + + def test_non_string_is_coerced(self): + assert normalize_product_id(990910) == "990910" + + +# ───────────────────────────────────────────── +# Rule 3 — preset GUID brace strip +# ───────────────────────────────────────────── +class TestNormalizePresetGuid: + def test_strips_matched_braces(self): + assert normalize_preset_guid("{6a2d224-abc-def}") == "6a2d224-abc-def" + + def test_leaves_plain_guid_alone(self): + assert normalize_preset_guid("6a2d224-abc-def") == "6a2d224-abc-def" + + def test_unmatched_left_brace_kept(self): + assert normalize_preset_guid("{6a2d224") == "{6a2d224" + + def test_unmatched_right_brace_kept(self): + assert normalize_preset_guid("6a2d224}") == "6a2d224}" + + def test_none_returns_none(self): + assert normalize_preset_guid(None) is None + + def test_empty_braces_returns_none(self): + assert normalize_preset_guid("{}") is None + + +# ───────────────────────────────────────────── +# Rule 4 — vendor casing preserved +# ───────────────────────────────────────────── +class TestVendorCasing: + def test_uppercase_vendor_preserved(self): + tool = _tool(vendor="HARVEY TOOL") + assert build_tool_row(tool)["vendor"] == "HARVEY TOOL" + + def test_lowercase_vendor_preserved(self): + tool = _tool(vendor="deltamill") + assert build_tool_row(tool)["vendor"] == "deltamill" + + def test_titlecase_vendor_preserved(self): + tool = _tool(vendor="Garr Tool") + assert build_tool_row(tool)["vendor"] == "Garr Tool" + + +# ───────────────────────────────────────────── +# Rule 5 — JSON null passthrough +# ───────────────────────────────────────────── +class TestPresetNullPassthrough: + def test_explicit_null_becomes_sql_null(self): + tool = _tool( + presets=[ + { + "guid": "p1", + "name": "Default", + "f_n": None, + "f_z": None, + "v_c": None, + } + ] + ) + rows = build_preset_rows(tool, tool_id="tool-uuid") + assert len(rows) == 1 + assert rows[0]["f_n"] is None + assert rows[0]["f_z"] is None + assert rows[0]["v_c"] is None + + def test_real_values_preserved(self): + tool = _tool( + presets=[{"guid": "p1", "name": "D", "v_c": 120.5, "n": 12000.0}] + ) + rows = build_preset_rows(tool, tool_id="tool-uuid") + assert rows[0]["v_c"] == 120.5 + assert rows[0]["n"] == 12000.0 + + def test_absent_field_becomes_null(self): + tool = _tool(presets=[{"guid": "p1", "name": "D"}]) + rows = build_preset_rows(tool, tool_id="tool-uuid") + assert rows[0]["f_n"] is None + assert rows[0]["v_c"] is None + + +# ───────────────────────────────────────────── +# Rule 6 — sync filter (holder + probe excluded) +# ───────────────────────────────────────────── +class TestSyncFilter: + def test_holders_excluded_from_tool_rows(self, fake_supabase): + tools = [ + _tool(guid="t1", type="flat end mill"), + _tool(guid="h1", type="holder"), + _tool(guid="p1", type="probe"), + _tool(guid="t2", type="drill"), + ] + result = sync_library("sample", tools, client=fake_supabase) + assert result["tools"] == 2 + + tool_inserts = fake_supabase.inserts_for("tools") + guids = sorted(r["fusion_guid"] for r in tool_inserts) + assert guids == ["t1", "t2"] + + def test_library_tool_count_reflects_filter(self, fake_supabase): + tools = [ + _tool(guid="t1", type="flat end mill"), + _tool(guid="h1", type="holder"), + ] + sync_library("sample", tools, client=fake_supabase) + lib_row = fake_supabase.inserts_for("libraries")[0] + assert lib_row["tool_count"] == 1 + + +# ───────────────────────────────────────────── +# Rule 7 — shaft segments passthrough +# ───────────────────────────────────────────── +class TestShaftPassthrough: + def test_missing_shaft_is_null(self): + tool = _tool() + tool.pop("shaft", None) + row = build_tool_row(tool) + assert row["shaft_segments"] is None + + def test_shaft_without_segments_is_null(self): + tool = _tool() + tool["shaft"] = {"type": "shaft"} + row = build_tool_row(tool) + assert row["shaft_segments"] is None + + def test_shaft_with_segments_is_passthrough(self): + segments = [{"lower": 0.0, "upper": 10.0, "diameter": 6.0}] + tool = _tool() + tool["shaft"] = {"type": "shaft", "segments": segments} + row = build_tool_row(tool) + assert row["shaft_segments"] == segments + + def test_empty_segments_list_is_preserved_not_nulled(self): + # Helical ships stubs with segments=[] — we store the empty list, + # not NULL, so we can distinguish "stub present" from "no shaft key". + tool = _tool() + tool["shaft"] = {"type": "shaft", "segments": []} + row = build_tool_row(tool) + assert row["shaft_segments"] == [] + + +# ───────────────────────────────────────────── +# Rule 8 — pp comment .get access +# ───────────────────────────────────────────── +class TestPostProcessCommentSafe: + def test_missing_comment_does_not_raise(self): + # Sandvik omits post-process.comment entirely. + tool = _tool() + tool["post-process"] = {"number": 0} + row = build_tool_row(tool) + assert row["pp_comment"] is None + + def test_comment_present(self): + tool = _tool() + tool["post-process"] = {"number": 0, "comment": "(Corner Chamfer 0.2x45°)"} + row = build_tool_row(tool) + assert row["pp_comment"] == "(Corner Chamfer 0.2x45°)" + + def test_missing_post_process_does_not_raise(self): + tool = _tool() + tool.pop("post-process", None) + row = build_tool_row(tool) + assert row["pp_comment"] is None + assert row["pp_number"] is None + + +# ───────────────────────────────────────────── +# Geometry unit conversion end-to-end +# ───────────────────────────────────────────── +class TestBuildToolRowGeometry: + def test_inches_library_converts_length_fields(self): + tool = _tool( + unit="inches", + geometry={"DC": 0.25, "OAL": 2.0, "NOF": 4, "HAND": True, "SIG": 118}, + ) + row = build_tool_row(tool) + assert row["geo_dc"] == pytest.approx(0.25 * INCHES_TO_MM) + assert row["geo_oal"] == pytest.approx(2.0 * INCHES_TO_MM) + # NOF and SIG are dimensionless — must not scale. + assert row["geo_nof"] == 4.0 + assert row["geo_sig"] == 118.0 + assert row["geo_hand"] is True + + def test_millimeters_library_preserves_length_fields(self): + tool = _tool( + unit="millimeters", + geometry={"DC": 6.0, "OAL": 60.0, "NOF": 3, "HAND": True}, + ) + row = build_tool_row(tool) + assert row["geo_dc"] == 6.0 + assert row["geo_oal"] == 60.0 + assert row["geo_nof"] == 3.0 + + def test_missing_geometry_field_is_null(self): + tool = _tool(unit="millimeters", geometry={"DC": 6.0}) + row = build_tool_row(tool) + assert row["geo_dc"] == 6.0 + assert row["geo_oal"] is None + assert row["geo_re"] is None + + +# ───────────────────────────────────────────── +# Idempotent re-sync +# ───────────────────────────────────────────── +class TestIdempotency: + def test_rerun_flushes_presets_before_reinsert(self, fake_supabase): + tools = [ + _tool( + guid="t1", + presets=[ + {"guid": "p1", "name": "Aluminum", "n": 12000}, + {"guid": "p2", "name": "Steel", "n": 8000}, + ], + ) + ] + sync_library("lib1", tools, client=fake_supabase) + sync_library("lib1", tools, client=fake_supabase) + + # Each run issues one delete per tool BEFORE inserting its presets. + deletes = [op for op in fake_supabase.ops if op["kind"] == "delete"] + assert len(deletes) == 2 + assert deletes[0]["table"] == "cutting_presets" + assert deletes[0]["filters"]["tool_id"].startswith("eq.") + + +# ───────────────────────────────────────────── +# Test helpers +# ───────────────────────────────────────────── +def _tool(**overrides) -> dict: + """Build a minimal valid tool dict with sensible defaults.""" + base = { + "guid": overrides.pop("guid", "default-guid"), + "type": overrides.pop("type", "flat end mill"), + "description": overrides.pop("description", "test tool"), + "product-id": overrides.pop("product_id", "TEST-001"), + "vendor": overrides.pop("vendor", "Test Vendor"), + "unit": overrides.pop("unit", "millimeters"), + "BMC": overrides.pop("bmc", "carbide"), + "geometry": overrides.pop("geometry", {"DC": 6.0, "OAL": 60.0, "NOF": 3}), + "post-process": overrides.pop( + "post_process", {"number": 0, "comment": ""} + ), + "start-values": {"presets": overrides.pop("presets", [])}, + } + base.update(overrides) + return base + + +class FakeSupabaseClient: + """ + In-memory stand-in for SupabaseClient used by sync_library tests. + Records every call and returns synthesized ids so the ingest + pipeline can complete end-to-end without any network traffic. + """ + + def __init__(self): + self.ops: list[dict] = [] + self._next_id = 0 + + def _make_id(self, prefix: str) -> str: + self._next_id += 1 + return f"{prefix}-{self._next_id:04d}" + + def inserts_for(self, table: str) -> list[dict]: + """All rows sent to ``table`` across insert + upsert ops.""" + rows: list[dict] = [] + for op in self.ops: + if op["kind"] in ("insert", "upsert") and op["table"] == table: + rows.extend(op["rows"]) + return rows + + # ── SupabaseClient interface ─────────────────────────────────── + def upsert(self, table, rows, *, on_conflict, returning="representation"): + if isinstance(rows, dict): + rows = [rows] + rows = [dict(r) for r in rows] + self.ops.append( + {"kind": "upsert", "table": table, "rows": rows, "on_conflict": on_conflict} + ) + # Synthesize ids mirroring the on_conflict key for deterministic + # lookups (guid → id). + echoed = [] + for r in rows: + new = dict(r) + if "id" not in new: + new["id"] = self._make_id(table.split("_")[-1]) + echoed.append(new) + return echoed + + def insert(self, table, rows, *, returning="representation"): + if isinstance(rows, dict): + rows = [rows] + rows = [dict(r) for r in rows] + self.ops.append({"kind": "insert", "table": table, "rows": rows}) + return [{**r, "id": self._make_id(table.split("_")[-1])} for r in rows] + + def delete(self, table, *, filters): + self.ops.append({"kind": "delete", "table": table, "filters": dict(filters)}) + return [] + + def select(self, table, **kwargs): + self.ops.append({"kind": "select", "table": table, "kwargs": dict(kwargs)}) + return [] + + +@pytest.fixture +def fake_supabase(): + return FakeSupabaseClient() diff --git a/tests/test_sync_tool_inventory.py b/tests/test_sync_tool_inventory.py new file mode 100644 index 0000000..9256628 --- /dev/null +++ b/tests/test_sync_tool_inventory.py @@ -0,0 +1,319 @@ +""" +Tests for sync_tool_inventory.py -- Plex -> Supabase qty sync. + +Focus on: + - compute_qty(): pre-signed quantity sum, empty records, bad quantities + - collect_unknown_types(): detection of new transactionType values + - _unwrap_records(): handles bare-list and {data: [...]} envelopes + - sync_tool_inventory(): writes qty_on_hand/qty_tracked/qty_synced_at, + skips writes on --dry-run, logs failures without aborting the batch + - CLI exit codes: 0 success, 1 partial fail, 2 no linked tools + +All Plex + Supabase I/O is mocked. +""" +from __future__ import annotations + +from unittest.mock import MagicMock, patch + +import pytest + +from sync_tool_inventory import ( + compute_qty, + collect_unknown_types, + _unwrap_records, + sync_tool_inventory, + main, + KNOWN_TRANSACTION_TYPES, + ToolResult, +) + + +# --------------------------------------------------------------- +# compute_qty +# --------------------------------------------------------------- +class TestComputeQty: + def test_pre_signed_positive_and_negative_sum(self): + # Real-world: PO Receipt +50, Checkout -43, PO Receipt +44 -> +51 + records = [ + {"quantity": 50.0, "transactionType": "PO Receipt"}, + {"quantity": -43.0, "transactionType": "Checkout"}, + {"quantity": 44.0, "transactionType": "Check In"}, + ] + qty, tracked = compute_qty(records) + assert qty == pytest.approx(51.0) + assert tracked is True + + def test_empty_records_zero_and_untracked(self): + qty, tracked = compute_qty([]) + assert qty == 0.0 + assert tracked is False + + def test_single_record_tracked_even_if_zero(self): + # A linked tool with one adjustment whose delta nets to zero is + # still "tracked" -- the distinction is presence of history. + records = [{"quantity": 0, "transactionType": "Correction"}] + qty, tracked = compute_qty(records) + assert qty == 0.0 + assert tracked is True + + def test_missing_quantity_is_skipped(self): + records = [ + {"quantity": 10, "transactionType": "PO Receipt"}, + {"transactionType": "Correction"}, # no quantity key + {"quantity": None, "transactionType": "Correction"}, + ] + qty, tracked = compute_qty(records) + assert qty == pytest.approx(10.0) + # tracked counts records, not summable ones -- Plex returned 3 rows + assert tracked is True + + def test_non_numeric_quantity_is_skipped(self): + records = [ + {"quantity": 5, "transactionType": "PO Receipt"}, + {"quantity": "not-a-number", "transactionType": "Correction"}, + ] + qty, _ = compute_qty(records) + assert qty == pytest.approx(5.0) + + def test_string_numeric_quantity_is_summed(self): + # PostgREST-style numeric strings should still work. + records = [{"quantity": "12.5", "transactionType": "PO Receipt"}] + qty, _ = compute_qty(records) + assert qty == pytest.approx(12.5) + + def test_negative_running_balance_preserved(self): + # Plex occasionally returns a net-negative balance (data-quality + # issue). The sync must preserve it faithfully, not clamp to 0. + records = [ + {"quantity": 10, "transactionType": "PO Receipt"}, + {"quantity": -50, "transactionType": "Checkout"}, + ] + qty, _ = compute_qty(records) + assert qty == pytest.approx(-40.0) + + +# --------------------------------------------------------------- +# collect_unknown_types +# --------------------------------------------------------------- +class TestCollectUnknownTypes: + def test_all_known_returns_empty(self): + records = [ + {"transactionType": "PO Receipt"}, + {"transactionType": "Checkout"}, + {"transactionType": "Correction"}, + {"transactionType": "Check In"}, + ] + assert collect_unknown_types(records) == set() + + def test_null_transaction_type_is_not_unknown(self): + # null is an observed data-quality quirk, not a new type. + assert collect_unknown_types([{"transactionType": None}]) == set() + + def test_new_type_is_flagged(self): + records = [ + {"transactionType": "PO Receipt"}, + {"transactionType": "Scrap"}, + {"transactionType": "Physical Inventory"}, + ] + assert collect_unknown_types(records) == {"Scrap", "Physical Inventory"} + + def test_known_set_matches_docs(self): + # Guard against accidental drift from docs/Plex_API_Reference.md Section 3.6. + assert KNOWN_TRANSACTION_TYPES == { + "PO Receipt", "Checkout", "Correction", "Check In", + } + + +# --------------------------------------------------------------- +# _unwrap_records +# --------------------------------------------------------------- +class TestUnwrapRecords: + def test_bare_list(self): + assert _unwrap_records([{"a": 1}]) == [{"a": 1}] + + def test_data_envelope(self): + assert _unwrap_records({"data": [{"a": 1}]}) == [{"a": 1}] + + def test_empty_data_envelope(self): + assert _unwrap_records({"data": []}) == [] + + def test_missing_data_key(self): + assert _unwrap_records({"error": "nope"}) == [] + + def test_none(self): + assert _unwrap_records(None) == [] + + +# --------------------------------------------------------------- +# sync_tool_inventory +# --------------------------------------------------------------- +def _ok_env(body): + return {"ok": True, "status": 200, "body": body, "error": None} + + +def _fail_env(status=500, error="HTTP 500"): + return {"ok": False, "status": status, "body": None, "error": error} + + +class TestSyncToolInventory: + def _linked_tools(self, *guids): + return [ + {"fusion_guid": g, "plex_supply_item_id": f"plex-{g}"} + for g in guids + ] + + def test_writes_qty_for_each_linked_tool(self): + plex = MagicMock() + sb = MagicMock() + sb.select.return_value = self._linked_tools("a", "b") + plex.get_envelope.side_effect = [ + _ok_env([ + {"quantity": 50, "transactionType": "PO Receipt"}, + {"quantity": -10, "transactionType": "Checkout"}, + ]), + _ok_env([]), # linked but no history + ] + + report = sync_tool_inventory(plex, sb) + + assert len(report.succeeded) == 2 + assert len(report.failed) == 0 + # Two update calls, one per tool + assert sb.update.call_count == 2 + first_call = sb.update.call_args_list[0] + assert first_call.args[0] == "tools" + values = first_call.args[1] + assert values["qty_on_hand"] == pytest.approx(40.0) + assert values["qty_tracked"] is True + assert "qty_synced_at" in values + assert first_call.kwargs["filters"] == {"fusion_guid": "eq.a"} + # Second tool: empty history -> tracked False, qty 0 + second = sb.update.call_args_list[1] + assert second.args[1]["qty_on_hand"] == 0.0 + assert second.args[1]["qty_tracked"] is False + + def test_dry_run_does_not_write(self): + plex = MagicMock() + sb = MagicMock() + sb.select.return_value = self._linked_tools("a") + plex.get_envelope.return_value = _ok_env( + [{"quantity": 5, "transactionType": "PO Receipt"}] + ) + + report = sync_tool_inventory(plex, sb, dry_run=True) + + sb.update.assert_not_called() + assert len(report.succeeded) == 1 + assert report.succeeded[0].qty_on_hand == pytest.approx(5.0) + + def test_plex_failure_is_recorded_and_does_not_abort_batch(self): + plex = MagicMock() + sb = MagicMock() + sb.select.return_value = self._linked_tools("a", "b") + plex.get_envelope.side_effect = [ + _fail_env(500, "boom"), + _ok_env([{"quantity": 3, "transactionType": "PO Receipt"}]), + ] + + report = sync_tool_inventory(plex, sb) + + assert len(report.failed) == 1 + assert len(report.succeeded) == 1 + assert report.failed[0].fusion_guid == "a" + # Only one Supabase write (for the successful tool) + assert sb.update.call_count == 1 + + def test_supabase_update_failure_is_recorded(self): + plex = MagicMock() + sb = MagicMock() + sb.select.return_value = self._linked_tools("a") + plex.get_envelope.return_value = _ok_env( + [{"quantity": 1, "transactionType": "PO Receipt"}] + ) + sb.update.side_effect = RuntimeError("postgrest borked") + + report = sync_tool_inventory(plex, sb) + + assert len(report.failed) == 1 + assert "postgrest borked" in report.failed[0].message + + def test_no_linked_tools_returns_empty_report(self): + plex = MagicMock() + sb = MagicMock() + sb.select.return_value = [] + + report = sync_tool_inventory(plex, sb) + + assert report.results == [] + plex.get_envelope.assert_not_called() + sb.update.assert_not_called() + + def test_unknown_transaction_type_logged_and_still_summed(self): + plex = MagicMock() + sb = MagicMock() + sb.select.return_value = self._linked_tools("a") + plex.get_envelope.return_value = _ok_env([ + {"quantity": 7, "transactionType": "PO Receipt"}, + {"quantity": -2, "transactionType": "Scrap"}, # new type + ]) + + report = sync_tool_inventory(plex, sb) + + assert report.succeeded[0].qty_on_hand == pytest.approx(5.0) + assert "Scrap" in report.unknown_transaction_types + + def test_supabase_select_uses_not_null_filter(self): + plex = MagicMock() + sb = MagicMock() + sb.select.return_value = [] + + sync_tool_inventory(plex, sb) + + sb.select.assert_called_once() + kwargs = sb.select.call_args.kwargs + assert kwargs["filters"] == {"plex_supply_item_id": "not.is.null"} + assert "fusion_guid" in kwargs["columns"] + assert "plex_supply_item_id" in kwargs["columns"] + + +# --------------------------------------------------------------- +# CLI +# --------------------------------------------------------------- +class TestCLI: + @patch("sync_tool_inventory.SupabaseClient") + @patch("sync_tool_inventory.PlexClient") + @patch("sync_tool_inventory.sync_tool_inventory") + def test_exit_0_on_full_success(self, mock_sync, mock_plex, mock_sb): + from sync_tool_inventory import SyncReport + rpt = SyncReport() + rpt.results.append(ToolResult("a", "p", "success", 5.0, True, 2)) + rpt.end_time = 1.0 + mock_sync.return_value = rpt + + assert main([]) == 0 + + @patch("sync_tool_inventory.SupabaseClient") + @patch("sync_tool_inventory.PlexClient") + @patch("sync_tool_inventory.sync_tool_inventory") + def test_exit_1_on_partial_failure(self, mock_sync, mock_plex, mock_sb): + from sync_tool_inventory import SyncReport + rpt = SyncReport() + rpt.results.append(ToolResult("a", "p", "success")) + rpt.results.append(ToolResult("b", "q", "fail", message="x")) + rpt.end_time = 1.0 + mock_sync.return_value = rpt + + assert main([]) == 1 + + @patch("sync_tool_inventory.SupabaseClient") + @patch("sync_tool_inventory.PlexClient") + @patch("sync_tool_inventory.sync_tool_inventory") + def test_exit_2_on_no_linked_tools(self, mock_sync, mock_plex, mock_sb): + from sync_tool_inventory import SyncReport + mock_sync.return_value = SyncReport(end_time=1.0) + assert main([]) == 2 + + @patch("sync_tool_inventory.SupabaseClient") + @patch("sync_tool_inventory.PlexClient", side_effect=RuntimeError("no key")) + def test_exit_2_on_config_error(self, _plex, _sb): + assert main([]) == 2 diff --git a/tests/test_tool_library_loader.py b/tests/test_tool_library_loader.py new file mode 100644 index 0000000..bfa2bd2 --- /dev/null +++ b/tests/test_tool_library_loader.py @@ -0,0 +1,176 @@ +""" +Tests for tool_library_loader — JSON parsing, schema validation, +stale-file guard, and directory glob. + +All tests use tmp_path so we don't touch the real CAMTools directory. +""" +import json +import os +import time +from datetime import datetime, timedelta +from pathlib import Path + +import pytest + +from tool_library_loader import ( + load_library, + load_all_libraries, + report_library_contents, + _check_file_age, + MAX_FILE_AGE_HOURS, +) + + +# ───────────────────────────────────────────── +# Helpers +# ───────────────────────────────────────────── +SAMPLE_LIBRARY = { + "data": [ + {"guid": "tool-1", "type": "flat end mill", "description": "5/8 SQ"}, + {"guid": "tool-2", "type": "drill", "description": "1/4 drill"}, + {"guid": "tool-3", "type": "holder", "description": "BT30"}, + ] +} + + +def write_json(path: Path, payload): + path.write_text(json.dumps(payload), encoding="utf-8") + + +# ───────────────────────────────────────────── +# load_library — happy path +# ───────────────────────────────────────────── +class TestLoadLibraryHappyPath: + def test_loads_valid_library(self, tmp_path): + f = tmp_path / "lib.json" + write_json(f, SAMPLE_LIBRARY) + tools = load_library(f) + assert tools is not None + assert len(tools) == 3 + assert tools[0]["guid"] == "tool-1" + + def test_empty_data_array_is_valid(self, tmp_path): + f = tmp_path / "lib.json" + write_json(f, {"data": []}) + tools = load_library(f) + assert tools == [] + + +# ───────────────────────────────────────────── +# load_library — error handling +# ───────────────────────────────────────────── +class TestLoadLibraryErrors: + def test_returns_none_for_malformed_json(self, tmp_path): + f = tmp_path / "bad.json" + f.write_text("{not valid json", encoding="utf-8") + assert load_library(f) is None + + def test_returns_none_for_missing_data_key(self, tmp_path): + f = tmp_path / "lib.json" + write_json(f, {"tools": [{"guid": "x"}]}) # wrong root key + assert load_library(f) is None + + def test_returns_none_when_data_is_not_a_list(self, tmp_path): + f = tmp_path / "lib.json" + write_json(f, {"data": "not a list"}) + assert load_library(f) is None + + def test_returns_none_for_stale_file(self, tmp_path): + f = tmp_path / "stale.json" + write_json(f, SAMPLE_LIBRARY) + # Backdate the mtime to 100 hours ago — well past the 25h limit. + old = time.time() - (100 * 3600) + os.utime(f, (old, old)) + assert load_library(f) is None + + +# ───────────────────────────────────────────── +# _check_file_age +# ───────────────────────────────────────────── +class TestFileAgeCheck: + def test_recent_file_passes(self, tmp_path): + f = tmp_path / "fresh.json" + f.write_text("{}", encoding="utf-8") + assert _check_file_age(f) is True + + def test_stale_file_fails(self, tmp_path): + f = tmp_path / "stale.json" + f.write_text("{}", encoding="utf-8") + old = time.time() - ((MAX_FILE_AGE_HOURS + 5) * 3600) + os.utime(f, (old, old)) + assert _check_file_age(f) is False + + def test_custom_max_age_window(self, tmp_path): + f = tmp_path / "f.json" + f.write_text("{}", encoding="utf-8") + old = time.time() - (3 * 3600) # 3 hours old + os.utime(f, (old, old)) + assert _check_file_age(f, max_age_hours=5) is True + assert _check_file_age(f, max_age_hours=1) is False + + +# ───────────────────────────────────────────── +# load_all_libraries +# ───────────────────────────────────────────── +class TestLoadAllLibraries: + def test_returns_empty_for_missing_directory(self, tmp_path): + missing = tmp_path / "nope" + result = load_all_libraries(missing) + assert result == {} + + def test_loads_multiple_files(self, tmp_path): + write_json(tmp_path / "a.json", SAMPLE_LIBRARY) + write_json(tmp_path / "b.json", {"data": [{"guid": "z", "type": "drill"}]}) + result = load_all_libraries(tmp_path) + assert set(result.keys()) == {"a", "b"} + assert len(result["a"]) == 3 + assert len(result["b"]) == 1 + + def test_returns_empty_when_no_json_files(self, tmp_path): + (tmp_path / "readme.txt").write_text("hi", encoding="utf-8") + result = load_all_libraries(tmp_path) + assert result == {} + + def test_abort_on_stale_aborts_full_run(self, tmp_path): + # Two files, one fresh, one stale → with abort_on_stale=True (default), + # the entire load should return {}. + write_json(tmp_path / "fresh.json", SAMPLE_LIBRARY) + stale = tmp_path / "stale.json" + write_json(stale, SAMPLE_LIBRARY) + old = time.time() - (100 * 3600) + os.utime(stale, (old, old)) + + result = load_all_libraries(tmp_path, abort_on_stale=True) + assert result == {} + + def test_skip_stale_continues_with_fresh(self, tmp_path): + write_json(tmp_path / "fresh.json", SAMPLE_LIBRARY) + stale = tmp_path / "stale.json" + write_json(stale, SAMPLE_LIBRARY) + old = time.time() - (100 * 3600) + os.utime(stale, (old, old)) + + result = load_all_libraries(tmp_path, abort_on_stale=False) + assert "fresh" in result + assert "stale" not in result + + +# ───────────────────────────────────────────── +# report_library_contents — smoke test +# ───────────────────────────────────────────── +class TestReportLibraryContents: + def test_runs_without_error(self, capsys): + libs = {"sample": SAMPLE_LIBRARY["data"]} + report_library_contents(libs) + captured = capsys.readouterr() + # Should print library name + per-type counts + assert "sample" in captured.out + assert "flat end mill" in captured.out + assert "drill" in captured.out + assert "holder" in captured.out + + def test_handles_empty_library(self, capsys): + report_library_contents({}) + captured = capsys.readouterr() + # No exception, no output for an empty dict + assert captured.out == "" diff --git a/tests/test_validate_library.py b/tests/test_validate_library.py new file mode 100644 index 0000000..fc124a2 --- /dev/null +++ b/tests/test_validate_library.py @@ -0,0 +1,580 @@ +""" +Tests for validate_library.py — covers every rule ID in the spec +at docs/validate_library_spec.md. + +Tests never touch the network. Supplier API checks use FakePlexClient +from conftest.py. +""" +from __future__ import annotations + +import copy +import pytest + +from validate_library import ( + KNOWN_TOOL_TYPES, + NON_SYNC_TYPES, + ValidationMode, + ValidationIssue, + ValidationResult, + _closest_supplier_names, + _edit_distance, + _get_supplier_names, + _match_vendor, + _reset_supplier_cache, + format_result, + validate_library, +) + + +# ───────────────────────────────────────────── +# Helpers — minimal valid tool dicts +# ───────────────────────────────────────────── + +def make_tool(**overrides) -> dict: + """A minimal tool that passes every per-tool rule.""" + base = { + "guid": "tool-guid-0001", + "type": "flat end mill", + "description": "1/4 SQ END", + "product-id": "HARVEY-12345", + "vendor": "Harvey Tool", + "geometry": { + "DC": 0.25, + "OAL": 2.5, + "NOF": 4, + }, + "post-process": {"number": 1}, + } + base.update(overrides) + return base + + +def make_holder(**overrides) -> dict: + base = { + "guid": "holder-guid-0001", + "type": "holder", + "description": "BT30-SHC.25", + } + base.update(overrides) + return base + + +def make_probe(**overrides) -> dict: + base = { + "guid": "probe-guid-0001", + "type": "probe", + "description": "Renishaw OMP40", + } + base.update(overrides) + return base + + +@pytest.fixture(autouse=True) +def _clear_supplier_cache(): + """Reset the module-level supplier cache between tests.""" + _reset_supplier_cache() + yield + _reset_supplier_cache() + + +# ───────────────────────────────────────────── +# Library-level rules +# ───────────────────────────────────────────── + +class TestLibraryLevelStructure: + def test_passes_on_valid_library(self): + result = validate_library( + tools=[make_tool()], + library_name="test", + ) + assert result.passed is True + assert result.tool_count == 1 + assert result.sync_candidate_count == 1 + assert result.fails == [] + + def test_struct_root_key_fail_on_none(self): + result = validate_library(tools=None, library_name="test") + assert result.passed is False + assert any(i.rule == "STRUCT_ROOT_KEY" for i in result.fails) + + def test_struct_data_list_fail_on_non_list(self): + result = validate_library(tools={"foo": "bar"}, library_name="test") + assert result.passed is False + assert any(i.rule == "STRUCT_DATA_LIST" for i in result.fails) + + def test_struct_empty_fail_on_empty_list(self): + result = validate_library(tools=[], library_name="test") + assert result.passed is False + assert any(i.rule == "STRUCT_EMPTY" for i in result.fails) + + def test_sync_candidates_zero_fail_on_all_holders(self): + result = validate_library( + tools=[make_holder(), make_probe()], + library_name="test", + ) + assert result.passed is False + assert any(i.rule == "SYNC_CANDIDATES_ZERO" for i in result.fails) + + def test_library_level_fail_skips_per_tool_checks(self): + # An empty library should produce only STRUCT_EMPTY, not a swarm + # of per-tool errors. + result = validate_library(tools=[], library_name="test") + assert len(result.fails) == 1 + assert result.fails[0].rule == "STRUCT_EMPTY" + + +class TestDuplicateDetection: + def test_duplicate_guid_fail(self): + a = make_tool(guid="dup-guid", product_id="A") + a["product-id"] = "PROD-A" + b = make_tool(guid="dup-guid", product_id="B") + b["product-id"] = "PROD-B" + result = validate_library(tools=[a, b], library_name="test") + assert result.passed is False + assert any(i.rule == "DUPLICATE_GUID" for i in result.fails) + + def test_duplicate_product_id_fail(self): + a = make_tool(guid="guid-a") + a["product-id"] = "DUP-PID" + b = make_tool(guid="guid-b") + b["product-id"] = "DUP-PID" + result = validate_library(tools=[a, b], library_name="test") + assert result.passed is False + assert any(i.rule == "DUPLICATE_PRODUCT_ID" for i in result.fails) + + def test_duplicate_product_id_only_on_sync_candidates(self): + # Holders are not sync candidates, so shared "product-id" on holders + # should NOT trip DUPLICATE_PRODUCT_ID. + a = make_holder(guid="h1") + a["product-id"] = "SAME" + b = make_holder(guid="h2") + b["product-id"] = "SAME" + # Need at least one sync candidate so we don't trip SYNC_CANDIDATES_ZERO + t = make_tool() + result = validate_library(tools=[a, b, t], library_name="test") + assert result.passed is True + assert not any(i.rule == "DUPLICATE_PRODUCT_ID" for i in result.fails) + + def test_cross_library_duplicate_warn(self): + tool = make_tool() + tool["product-id"] = "SHARED-PID" + result = validate_library( + tools=[tool], + library_name="current", + cross_library_product_ids={"SHARED-PID": "other-library"}, + ) + assert result.passed is True # WARN only + assert any( + i.rule == "CROSS_LIBRARY_DUPLICATE" for i in result.warns + ) + + def test_cross_library_duplicate_no_fire_when_dict_none(self): + tool = make_tool() + result = validate_library( + tools=[tool], + library_name="current", + cross_library_product_ids=None, + ) + assert not any( + i.rule == "CROSS_LIBRARY_DUPLICATE" for i in result.warns + ) + + +class TestUnknownType: + def test_unknown_type_warns_but_still_passes(self): + tool = make_tool(type="taper shank wizard") + result = validate_library(tools=[tool], library_name="test") + assert result.passed is True + assert any(i.rule == "UNKNOWN_TYPE_PRESENT" for i in result.warns) + + def test_known_type_does_not_warn(self): + for t in KNOWN_TOOL_TYPES - NON_SYNC_TYPES: + tool = make_tool(type=t) + result = validate_library(tools=[tool], library_name="test") + assert not any( + i.rule == "UNKNOWN_TYPE_PRESENT" for i in result.warns + ) + + +# ───────────────────────────────────────────── +# Per-tool rules — required fields +# ───────────────────────────────────────────── + +class TestRequiredFields: + @pytest.mark.parametrize("field_name", ["guid", "type", "description", "product-id"]) + def test_missing_required_field_fails(self, field_name): + tool = make_tool() + del tool[field_name] + result = validate_library(tools=[tool], library_name="test") + assert result.passed is False + assert any( + i.rule == "REQUIRED_FIELD" and i.field == field_name + for i in result.fails + ) + + @pytest.mark.parametrize("field_name", ["guid", "type", "description", "product-id"]) + def test_empty_string_required_field_fails(self, field_name): + tool = make_tool(**{field_name: ""}) + result = validate_library(tools=[tool], library_name="test") + assert any( + i.rule == "REQUIRED_FIELD" and i.field == field_name + for i in result.fails + ) + + def test_holders_skip_required_field_checks(self): + # Holders have no product-id and should still pass as long as a + # sync candidate exists in the library. + tools = [make_tool(), make_holder()] + result = validate_library(tools=tools, library_name="test") + assert result.passed is True + + +# ───────────────────────────────────────────── +# Per-tool rules — vendor +# ───────────────────────────────────────────── + +class TestVendorRules: + def test_missing_vendor_warns(self): + tool = make_tool() + del tool["vendor"] + result = validate_library(tools=[tool], library_name="test") + assert result.passed is True + assert any(i.rule == "VENDOR_MISSING" for i in result.warns) + + def test_empty_vendor_warns(self): + tool = make_tool(vendor="") + result = validate_library(tools=[tool], library_name="test") + assert any(i.rule == "VENDOR_MISSING" for i in result.warns) + + def test_vendor_not_in_plex_warns(self, fake_client): + fake_client.set_response( + "suppliers", + [{"name": "Harvey Tool"}, {"name": "Sandvik"}], + ) + tool = make_tool(vendor="GARR TOOL") # not in list + result = validate_library( + tools=[tool], + library_name="test", + use_api=True, + client=fake_client, + ) + assert result.passed is True + assert any(i.rule == "VENDOR_NOT_IN_PLEX" for i in result.warns) + + def test_vendor_case_insensitive_match(self, fake_client): + fake_client.set_response("suppliers", [{"name": "Harvey Tool"}]) + tool = make_tool(vendor="HARVEY TOOL") + result = validate_library( + tools=[tool], + library_name="test", + use_api=True, + client=fake_client, + ) + assert not any(i.rule == "VENDOR_NOT_IN_PLEX" for i in result.warns) + + def test_vendor_api_disabled_skips_check(self, fake_client): + fake_client.set_response("suppliers", [{"name": "OnlyThis"}]) + tool = make_tool(vendor="NotInList") + result = validate_library( + tools=[tool], + library_name="test", + use_api=False, + client=fake_client, + ) + assert not any(i.rule == "VENDOR_NOT_IN_PLEX" for i in result.warns) + + def test_supplier_api_failure_does_not_abort(self, fake_client): + fake_client.set_response("suppliers", None) # API returned nothing + tool = make_tool(vendor="Anyone") + result = validate_library( + tools=[tool], + library_name="test", + use_api=True, + client=fake_client, + ) + # Validation still runs and passes; vendor check is silently skipped + assert result.passed is True + + +# ───────────────────────────────────────────── +# Per-tool rules — geometry +# ───────────────────────────────────────────── + +class TestGeometryRules: + def test_missing_geometry_warns(self): + tool = make_tool() + del tool["geometry"] + result = validate_library(tools=[tool], library_name="test") + assert result.passed is True + assert any(i.rule == "GEOMETRY_MISSING" for i in result.warns) + + def test_dc_missing_warns(self): + tool = make_tool() + del tool["geometry"]["DC"] + result = validate_library(tools=[tool], library_name="test") + assert any(i.rule == "GEOMETRY_DC_MISSING" for i in result.warns) + + def test_dc_zero_fails(self): + tool = make_tool() + tool["geometry"]["DC"] = 0 + result = validate_library(tools=[tool], library_name="test") + assert result.passed is False + assert any(i.rule == "GEOMETRY_DC_NONPOSITIVE" for i in result.fails) + + def test_dc_negative_fails(self): + tool = make_tool() + tool["geometry"]["DC"] = -0.125 + result = validate_library(tools=[tool], library_name="test") + assert result.passed is False + assert any(i.rule == "GEOMETRY_DC_NONPOSITIVE" for i in result.fails) + + def test_oal_missing_warns(self): + tool = make_tool() + del tool["geometry"]["OAL"] + result = validate_library(tools=[tool], library_name="test") + assert any(i.rule == "GEOMETRY_OAL_MISSING" for i in result.warns) + + def test_oal_shorter_than_dc_warns(self): + tool = make_tool() + tool["geometry"]["DC"] = 1.0 + tool["geometry"]["OAL"] = 0.5 + result = validate_library(tools=[tool], library_name="test") + assert result.passed is True + assert any( + i.rule == "GEOMETRY_OAL_SHORTER_THAN_DC" for i in result.warns + ) + + def test_nof_missing_warns(self): + tool = make_tool() + del tool["geometry"]["NOF"] + result = validate_library(tools=[tool], library_name="test") + assert any(i.rule == "GEOMETRY_NOF_MISSING" for i in result.warns) + + def test_nof_zero_fails(self): + tool = make_tool() + tool["geometry"]["NOF"] = 0 + result = validate_library(tools=[tool], library_name="test") + assert result.passed is False + assert any(i.rule == "GEOMETRY_NOF_NONPOSITIVE" for i in result.fails) + + +# ───────────────────────────────────────────── +# Per-tool rules — post-process +# ───────────────────────────────────────────── + +class TestPostProcessRules: + def test_missing_post_process_warns(self): + tool = make_tool() + del tool["post-process"] + result = validate_library(tools=[tool], library_name="test") + assert result.passed is True + assert any(i.rule == "POSTPROCESS_NUMBER_MISSING" for i in result.warns) + + def test_post_process_number_missing_warns(self): + tool = make_tool() + tool["post-process"] = {} + result = validate_library(tools=[tool], library_name="test") + assert any(i.rule == "POSTPROCESS_NUMBER_MISSING" for i in result.warns) + + def test_post_process_number_zero_warns(self): + tool = make_tool() + tool["post-process"]["number"] = 0 + result = validate_library(tools=[tool], library_name="test") + assert any( + i.rule == "POSTPROCESS_NUMBER_NONPOSITIVE" for i in result.warns + ) + + def test_post_process_number_negative_warns(self): + tool = make_tool() + tool["post-process"]["number"] = -3 + result = validate_library(tools=[tool], library_name="test") + assert any( + i.rule == "POSTPROCESS_NUMBER_NONPOSITIVE" for i in result.warns + ) + + +# ───────────────────────────────────────────── +# Filtering — holders and probes skipped +# ───────────────────────────────────────────── + +class TestSyncCandidateFiltering: + def test_holders_and_probes_not_per_tool_checked(self): + # A bare-bones holder (no geometry, no vendor, no product-id) must + # NOT produce any per-tool issues when a valid sync candidate exists. + tools = [make_tool(), make_holder(), make_probe()] + result = validate_library(tools=tools, library_name="test") + assert result.passed is True + assert result.sync_candidate_count == 1 + + def test_sync_candidate_count_correct(self): + tools = [ + make_tool(), + make_tool(guid="t2"), + make_holder(), + make_probe(), + ] + # Ensure product-ids are unique to avoid DUPLICATE_PRODUCT_ID + tools[1]["product-id"] = "DIFFERENT-PID" + result = validate_library(tools=tools, library_name="test") + assert result.sync_candidate_count == 2 + assert result.tool_count == 4 + + +# ───────────────────────────────────────────── +# ValidationResult + format_result +# ───────────────────────────────────────────── + +class TestResultObject: + def test_summary_pass(self): + result = validate_library(tools=[make_tool()], library_name="LIB1") + assert "PASS" in result.summary() + assert "LIB1" in result.summary() + + def test_summary_fail(self): + result = validate_library(tools=[], library_name="LIB1") + assert "FAIL" in result.summary() + + def test_to_dict_roundtrip(self): + result = validate_library(tools=[make_tool()], library_name="LIB1") + d = result.to_dict() + assert d["library_name"] == "LIB1" + assert d["passed"] is True + assert isinstance(d["issues"], list) + + def test_debug_mode_populates_trace(self): + result = validate_library( + tools=[make_tool()], + library_name="LIB1", + mode=ValidationMode.DEBUG, + ) + assert result.debug_trace is not None + assert len(result.debug_trace) > 0 + + def test_production_mode_trace_is_none(self): + result = validate_library( + tools=[make_tool()], + library_name="LIB1", + mode=ValidationMode.PRODUCTION, + ) + assert result.debug_trace is None + + def test_format_result_pass(self): + result = validate_library(tools=[make_tool()], library_name="LIB1") + output = format_result(result, ValidationMode.PRODUCTION) + assert "PASS" in output + assert "LIB1" in output + + def test_format_result_fail_shows_error(self): + tool = make_tool() + del tool["product-id"] + result = validate_library(tools=[tool], library_name="LIB1") + output = format_result(result, ValidationMode.PRODUCTION) + assert "FAIL" in output + assert "REQUIRED_FIELD" in output + assert "product-id" in output + + def test_format_result_verbose_shows_warns(self): + tool = make_tool() + del tool["vendor"] + result = validate_library(tools=[tool], library_name="LIB1") + prod_out = format_result(result, ValidationMode.PRODUCTION) + verbose_out = format_result(result, ValidationMode.VERBOSE) + assert "VENDOR_MISSING" not in prod_out + assert "VENDOR_MISSING" in verbose_out + + +# ───────────────────────────────────────────── +# Edit distance helper +# ───────────────────────────────────────────── + +class TestEditDistance: + def test_zero_for_identical(self): + assert _edit_distance("abc", "abc") == 0 + + def test_one_for_single_edit(self): + assert _edit_distance("abc", "abd") == 1 + + def test_closest_supplier_names(self): + names = ["Sandvik", "Garr Tool Co.", "OSG"] + closest = _closest_supplier_names("Garr Tool", names, n=2) + assert len(closest) == 2 + # "Garr Tool Co." shares the full "Garr Tool" prefix (distance 4) + # and should be the closest match by a wide margin. + assert closest[0][0] == "Garr Tool Co." + + +# ───────────────────────────────────────────── +# Supplier cache +# ───────────────────────────────────────────── + +class TestSupplierCache: + def test_cache_populated_on_first_call(self, fake_client): + fake_client.set_response("suppliers", [{"name": "A"}, {"name": "B"}]) + names1 = _get_supplier_names(fake_client) + assert set(names1) == {"A", "B"} + + def test_cache_returns_same_on_second_call(self, fake_client): + fake_client.set_response("suppliers", [{"name": "First"}]) + _get_supplier_names(fake_client) + # Change the canned response — cache should still return the first set + fake_client.set_response("suppliers", [{"name": "Changed"}]) + names2 = _get_supplier_names(fake_client) + assert "First" in names2 + assert "Changed" not in names2 + + def test_cache_resets_with_helper(self, fake_client): + fake_client.set_response("suppliers", [{"name": "First"}]) + _get_supplier_names(fake_client) + _reset_supplier_cache() + fake_client.set_response("suppliers", [{"name": "Second"}]) + names2 = _get_supplier_names(fake_client) + assert "Second" in names2 + + +# ───────────────────────────────────────────── +# Integration with tool_library_loader +# ───────────────────────────────────────────── + +class TestLoaderIntegration: + def test_loader_returns_none_on_validation_failure(self, tmp_path): + import json + from tool_library_loader import load_library + + bad = { + "data": [ + {"guid": "g1", "type": "flat end mill", "description": "test"} + # no product-id → REQUIRED_FIELD FAIL + ] + } + f = tmp_path / "bad.json" + f.write_text(json.dumps(bad), encoding="utf-8") + result = load_library(f, validate=True) + assert result is None + + def test_loader_returns_tools_on_validation_pass(self, tmp_path): + import json + from tool_library_loader import load_library + + good = {"data": [make_tool()]} + f = tmp_path / "good.json" + f.write_text(json.dumps(good), encoding="utf-8") + result = load_library(f, validate=True) + assert result is not None + assert len(result) == 1 + + def test_loader_validate_false_is_default(self, tmp_path): + # Ensure validate defaults to False and does not break existing + # test fixtures that use partial tool dicts. + import json + from tool_library_loader import load_library + + partial = { + "data": [ + {"guid": "g1", "type": "drill", "description": "1/4 drill"} + ] + } + f = tmp_path / "partial.json" + f.write_text(json.dumps(partial), encoding="utf-8") + result = load_library(f) # no validate kwarg + assert result is not None + assert len(result) == 1 diff --git a/tool_library_loader.py b/tool_library_loader.py index 192e6a8..5f8da51 100644 --- a/tool_library_loader.py +++ b/tool_library_loader.py @@ -66,20 +66,35 @@ def _check_file_age(path: Path, max_age_hours: int = MAX_FILE_AGE_HOURS) -> bool # ───────────────────────────────────────────── # SINGLE FILE LOADER # ───────────────────────────────────────────── -def load_library(path: Path) -> list[dict] | None: +def load_library(path: Path, validate: bool = False) -> list[dict] | None: """ Load a single Fusion 360 tool library JSON file. Returns the list of tool/holder objects from the root "data" array, - or None on failure (stale, locked, malformed). + or None on failure (stale, locked, malformed, or validation failure). Handles: - File age guard (ADC stall detection) - PermissionError (ADC mid-sync file lock) - JSON decode errors (incomplete sync / corrupt file) + - Schema validation via ``validate_library`` (when ``validate=True``) + + Parameters + ---------- + path + Path to a .json tool library file. + validate + When True, runs ``validate_library.validate_library()`` in PRODUCTION + mode with ``use_api=False``. A failing validation returns None so the + sync layer can abort cleanly. Default is False to preserve the + existing offline diagnostic behaviour; sync callers should pass True. """ if not _check_file_age(path): return None # stale — caller decides whether to abort or skip + if path.stat().st_size == 0: + log.warning("EMPTY FILE — %s is 0 bytes. Skipping.", path.name) + return None + try: with open(path, "r", encoding="utf-8") as f: raw = json.load(f) @@ -92,7 +107,7 @@ def load_library(path: Path) -> list[dict] | None: ) return None - except json.JSONDecodeError as e: + except (json.JSONDecodeError, OSError) as e: log.error( "MALFORMED JSON — %s failed to parse: %s. " "File may be mid-write by ADC.", @@ -109,6 +124,25 @@ def load_library(path: Path) -> list[dict] | None: ) return None + if validate: + # Imported lazily so that importing tool_library_loader does not + # drag in validate_library for every caller that only wants a + # raw JSON load. + from validate_library import validate_library as _validate, ValidationMode + + result = _validate( + tools=tools, + library_name=path.stem, + mode=ValidationMode.PRODUCTION, + use_api=False, + ) + if not result.passed: + log.error("Validation failed for %s — sync aborted", path.name) + log.error(result.summary()) + for issue in result.fails: + log.error(" %s: %s", issue.rule, issue.message) + return None + log.info("Loaded %s — %d entries", path.name, len(tools)) return tools @@ -119,6 +153,7 @@ def load_library(path: Path) -> list[dict] | None: def load_all_libraries( directory: Path = CAM_TOOLS_DIR, abort_on_stale: bool = True, + validate: bool = False, ) -> dict[str, list[dict]]: """ Glob all .json files in the flat CAMTools directory and load each one. @@ -130,6 +165,9 @@ def load_all_libraries( stale. Prevents partial pushes where some libraries are current and others are not. Set False to skip stale files and continue with valid ones. + validate : If True, each library is passed through validate_library + and libraries that fail are treated the same as stale. + Default False; sync callers should pass True. Returns ------- @@ -155,7 +193,7 @@ def load_all_libraries( libraries: dict[str, list[dict]] = {} for path in json_files: - tools = load_library(path) + tools = load_library(path, validate=validate) if tools is None: if abort_on_stale: diff --git a/tools/__init__.py b/tools/__init__.py new file mode 100644 index 0000000..03a585a --- /dev/null +++ b/tools/__init__.py @@ -0,0 +1 @@ +"""Internal tooling for the Datum project (not packaged for distribution).""" diff --git a/tools/plex_mock/README.md b/tools/plex_mock/README.md new file mode 100644 index 0000000..14a9562 --- /dev/null +++ b/tools/plex_mock/README.md @@ -0,0 +1,59 @@ +# Plex-Mimic Mock + +Local HTTP server mirroring the Plex REST surface for write-pipeline +validation. Tracked in [#92](https://github.com/grace-shane/Datum/issues/92); +blocks [#3](https://github.com/grace-shane/Datum/issues/3) and +[#6](https://github.com/grace-shane/Datum/issues/6). + +## Quick start + +```bash +# Refresh snapshots from real Plex (read-only; safe to re-run) +python -m tools.plex_mock.capture_snapshots + +# Start the mock on localhost:8080 +python -m tools.plex_mock.server --run-id $(date +%Y%m%d-%H%M%S) + +# In another shell: point the sync at it +PLEX_BASE_URL=http://127.0.0.1:8080 \ +PLEX_ALLOW_WRITES=1 \ + datum-sync + +# After the run: diff captures against the expected payload shape +python -m tools.plex_mock.diff \ + --run-id \ + --db tools/plex_mock/captures.db \ + --expected tests/fixtures/plex_mock/expected_supply_items.json +``` + +## What it serves + +| Endpoint | Behavior | +|---|---| +| `GET /healthz` | liveness probe, returns `{"ok": true}` | +| `GET /inventory/v1/inventory-definitions/supply-items` | serves `snapshots/supply_items_list.json` | +| `GET /inventory/v1/inventory-definitions/supply-items/{id}` | one record from the snapshot; 404 if unknown | +| `POST /inventory/v1/inventory-definitions/supply-items` | captures body, returns 201 with synthetic UUID; 409 if `supplyItemNumber` collides with snapshot | +| `PUT /inventory/v1/inventory-definitions/supply-items/{id}` | captures body, merges over snapshot record, returns 200; 404 if unknown | +| `GET /production/v1/production-definitions/workcenters` | serves `snapshots/workcenters_list.json` | +| `GET /production/v1/production-definitions/workcenters/{id}` | one record; 404 if unknown | +| `PUT/PATCH /production/v1/production-definitions/workcenters/{id}` | captures body, returns merged record (the #6 probe path) | + +Every write lands in `captures.db` keyed by `run_id` for later diffing. + +## Validation-window protocol + +Before we flip `PLEX_ALLOW_WRITES=1` against real `connect.plex.com`: + +1. Three consecutive `datum-sync` runs against the mock produce matching row counts (read them off the diff CLI output) and all CLEAN diffs. +2. `datum-plex-mock-diff` reports CLEAN against `expected_supply_items.json` for all three runs. +3. The PR that enables real-Plex writes pastes the three diff outputs into its description and calls out any anomaly observed during the runs. The PR description is the rehearsal log — no separate notes file. +4. Only then: the PR that enables writes to real Plex merges, and only with explicit Shane approval. + +The mock is the validation surface. `test.connect.plex.com` (`PLEX_USE_TEST=1`) is not — the Datum Consumer Key only authenticates against production (see `docs/BRIEFING.md`). + +## Deploy on `datum-runtime` + +See `tools/plex_mock/systemd/datum-plex-mock.service`. Copy into +`/etc/systemd/system/`, `systemctl daemon-reload && systemctl enable --now datum-plex-mock`. +Bound to `127.0.0.1:8080` — no external exposure, no TLS needed. diff --git a/tools/plex_mock/__init__.py b/tools/plex_mock/__init__.py new file mode 100644 index 0000000..289d0bd --- /dev/null +++ b/tools/plex_mock/__init__.py @@ -0,0 +1,5 @@ +""" +Local mock HTTP server mirroring the Plex REST surface for write-pipeline +validation. See tools/plex_mock/README.md and issue #92. +""" +__version__ = "0.1.0" diff --git a/tools/plex_mock/capture_snapshots.py b/tools/plex_mock/capture_snapshots.py new file mode 100644 index 0000000..41e3221 --- /dev/null +++ b/tools/plex_mock/capture_snapshots.py @@ -0,0 +1,51 @@ +""" +One-off: hit real connect.plex.com and persist GET responses for the two +endpoints the mock needs to serve. Commit the output files. + +Run with credentials loaded the usual way (.env.local + bootstrap.py): + + python -m tools.plex_mock.capture_snapshots + +Refresh when the Plex shape changes. This script only GETs — safe to +run any time without the PLEX_ALLOW_WRITES guard. +""" +from __future__ import annotations + +import json +import sys +from pathlib import Path + +from plex_api import API_KEY, API_SECRET, TENANT_ID, USE_TEST, PlexClient + + +SNAPSHOTS_DIR = Path(__file__).parent / "snapshots" + + +def capture(client: PlexClient, collection: str, version: str, resource: str, outfile: str) -> int: + env = client.get_envelope(collection, version, resource) + if not env["ok"]: + print(f" FAILED {collection}/{version}/{resource}: HTTP {env['status']}", file=sys.stderr) + return 1 + data = env["body"] + out = SNAPSHOTS_DIR / outfile + out.write_text(json.dumps(data, indent=2, sort_keys=True), encoding="utf-8") + count = len(data) if isinstance(data, list) else 1 + print(f" wrote {out.relative_to(Path.cwd())} ({count} records, {out.stat().st_size} bytes)") + return 0 + + +def main() -> int: + if not API_KEY: + print("PLEX_API_KEY is not set; can't capture snapshots.", file=sys.stderr) + return 2 + client = PlexClient(API_KEY, API_SECRET, TENANT_ID, use_test=USE_TEST) + rc = 0 + rc |= capture(client, "inventory", "v1", "inventory-definitions/supply-items", + "supply_items_list.json") + rc |= capture(client, "production", "v1", "production-definitions/workcenters", + "workcenters_list.json") + return rc + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/tools/plex_mock/diff.py b/tools/plex_mock/diff.py new file mode 100644 index 0000000..6d00ead --- /dev/null +++ b/tools/plex_mock/diff.py @@ -0,0 +1,100 @@ +""" +Diff captured Plex-mock POSTs against an expected-payload fixture. + +Checks each supply-items POST for: + - required fields present + - forbidden fields absent (things the client shouldn't send) + - field types match the fixture + +Exit code 0 on clean, 1 on drift. Usage: + + python -m tools.plex_mock.diff --run-id --db --expected +""" +from __future__ import annotations + +import argparse +import json +import sys +from dataclasses import dataclass, field +from pathlib import Path + +from tools.plex_mock.store import CaptureStore + + +TYPE_MAP = {"str": str, "int": int, "float": float, "bool": bool, "list": list, "dict": dict} + + +@dataclass +class DiffResult: + issues: list[str] = field(default_factory=list) + checked: int = 0 + + @property + def ok(self) -> bool: + return not self.issues + + +def _check_supply_item_post(body: dict, shape: dict, row_id: int) -> list[str]: + issues: list[str] = [] + for f in shape["required_fields"]: + if f not in body: + issues.append(f"row {row_id}: missing required field '{f}'") + for f in shape["forbidden_fields"]: + if f in body: + issues.append(f"row {row_id}: forbidden field '{f}' present") + for f, t in shape["field_types"].items(): + if f in body: + expected_t = TYPE_MAP.get(t) + if expected_t and not isinstance(body[f], expected_t): + actual = type(body[f]).__name__ + issues.append(f"row {row_id}: field '{f}' expected {t}, got {actual}") + return issues + + +def diff_run(*, store: CaptureStore, run_id: str, expected: dict) -> DiffResult: + result = DiffResult() + shape = expected.get("supply_items_post_shape") + if not shape: + result.issues.append("fixture missing 'supply_items_post_shape'") + return result + + for row in store.query(run_id=run_id, method="POST"): + if not row["path"].endswith("/supply-items"): + continue + result.checked += 1 + body = row["body"] or {} + result.issues.extend(_check_supply_item_post(body, shape, row["id"])) + return result + + +def main() -> int: + ap = argparse.ArgumentParser(description="Plex-mock capture diff") + ap.add_argument("--run-id", required=True) + ap.add_argument("--db", required=True, type=Path) + ap.add_argument("--expected", required=True, type=Path) + args = ap.parse_args() + + if not args.db.exists(): + print(f"DB not found: {args.db}", file=sys.stderr) + return 2 + if not args.expected.exists(): + print(f"Expected fixture not found: {args.expected}", file=sys.stderr) + return 2 + + store = CaptureStore(args.db) + expected = json.loads(args.expected.read_text()) + result = diff_run(store=store, run_id=args.run_id, expected=expected) + if result.ok: + if result.checked == 0: + print(f"plex-mock diff: CLEAN but ZERO rows checked (run_id={args.run_id}) — is the run_id correct?", file=sys.stderr) + return 3 + print(f"plex-mock diff: CLEAN (run_id={args.run_id}, {result.checked} supply-items POSTs checked)") + return 0 + print(f"plex-mock diff: DRIFT (run_id={args.run_id}, {result.checked} checked, {len(result.issues)} issues)") + for issue in result.issues: + print(f" {issue}") + return 1 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/tools/plex_mock/server.py b/tools/plex_mock/server.py new file mode 100644 index 0000000..5de22d4 --- /dev/null +++ b/tools/plex_mock/server.py @@ -0,0 +1,196 @@ +""" +Flask app mimicking the Plex REST endpoints the sync writes to. +GETs serve canned snapshots from disk; POST/PUT/PATCH capture request +bodies to the SQLite store and return Plex-shape responses. + +Bound to 127.0.0.1 by the systemd unit — never expose publicly. +Issue: #92. +""" +from __future__ import annotations + +import json +import uuid +from pathlib import Path + +from flask import Flask, abort, jsonify, request +from werkzeug.exceptions import BadRequest + +from tools.plex_mock.store import CaptureStore + + +def _load_snapshot(snapshots_dir: Path, name: str) -> list[dict]: + path = snapshots_dir / name + if not path.exists(): + raise FileNotFoundError( + f"Snapshot {name} not found in {snapshots_dir}. " + f"Run `python -m tools.plex_mock.capture_snapshots` on a " + f"credentialed host to generate it." + ) + try: + return json.loads(path.read_text(encoding="utf-8")) + except json.JSONDecodeError as exc: + raise ValueError(f"Malformed JSON in snapshot {path}: {exc}") from exc + + +def _parse_json_object(req): + """Strictly parse a request body as a JSON object. + + Returns either the parsed dict, or a Flask response tuple (jsonify, 400) + describing why the body was rejected. Malformed JSON and non-object + bodies are refused so a broken sync surfaces as a 400 instead of + getting silently captured as an empty dict (#96 review follow-up). + """ + try: + payload = req.get_json(silent=False) + except BadRequest as exc: + return jsonify({ + "error": "invalid JSON body", + "detail": exc.description, + }), 400 + if not isinstance(payload, dict): + return jsonify({ + "error": "request body must be a JSON object", + "detail": f"got {type(payload).__name__}", + }), 400 + return payload + + +def create_app( + *, + snapshots_dir: Path, + db_path: Path, + run_id: str, +) -> Flask: + app = Flask(__name__) + app.config["PLEX_MOCK_SNAPSHOTS_DIR"] = snapshots_dir + app.config["PLEX_MOCK_STORE"] = CaptureStore(db_path) + app.config["PLEX_MOCK_RUN_ID"] = run_id + + # The mock is stateless: these dicts are loaded once at app creation + # and never mutated. Task 6 POST/PUT/PATCH handlers capture request + # bodies to the SQLite store and return Plex-shape responses with + # synthetic UUIDs, but they do NOT add/modify entries here. A GET of + # a synthetic id from an earlier POST therefore intentionally 404s — + # the mock doesn't simulate Plex's inventory state, just its wire shape. + supply_items = _load_snapshot(snapshots_dir, "supply_items_list.json") + workcenters = _load_snapshot(snapshots_dir, "workcenters_list.json") + supply_by_id = {rec["id"]: rec for rec in supply_items} + workcenter_by_id = {rec["workcenterId"]: rec for rec in workcenters} + + @app.get("/healthz") + def healthz(): + return jsonify({"ok": True}) + + @app.get("/inventory/v1/inventory-definitions/supply-items") + def supply_items_list(): + return jsonify(supply_items) + + @app.get("/inventory/v1/inventory-definitions/supply-items/") + def supply_items_get(item_id: str): + rec = supply_by_id.get(item_id) + if rec is None: + abort(404) + return jsonify(rec) + + @app.get("/production/v1/production-definitions/workcenters") + def workcenters_list(): + return jsonify(workcenters) + + @app.get("/production/v1/production-definitions/workcenters/") + def workcenter_get(wc_id: str): + rec = workcenter_by_id.get(wc_id) + if rec is None: + abort(404) + return jsonify(rec) + + @app.post("/inventory/v1/inventory-definitions/supply-items") + def supply_items_post(): + parsed = _parse_json_object(request) + if not isinstance(parsed, dict): + return parsed # 400 response tuple + payload = parsed + # Dedup by supplyItemNumber against the snapshot — Plex returns 409. + # Guard before capturing so failed requests aren't stored; matches + # the 404 ordering in supply_items_put and workcenter_write. + sin = payload.get("supplyItemNumber") + if sin and any(rec.get("supplyItemNumber") == sin for rec in supply_items): + return jsonify({"error": "duplicate supplyItemNumber", "supplyItemNumber": sin}), 409 + store: CaptureStore = app.config["PLEX_MOCK_STORE"] + store.append( + method="POST", + path=request.path, + body=payload, + run_id=app.config["PLEX_MOCK_RUN_ID"], + ) + resp = dict(payload) + resp["id"] = str(uuid.uuid4()) + return jsonify(resp), 201 + + @app.put("/inventory/v1/inventory-definitions/supply-items/") + def supply_items_put(item_id: str): + if item_id not in supply_by_id: + abort(404) + parsed = _parse_json_object(request) + if not isinstance(parsed, dict): + return parsed + payload = parsed + store: CaptureStore = app.config["PLEX_MOCK_STORE"] + store.append( + method="PUT", + path=request.path, + body=payload, + run_id=app.config["PLEX_MOCK_RUN_ID"], + ) + merged = {**supply_by_id[item_id], **payload, "id": item_id} + return jsonify(merged), 200 + + @app.route( + "/production/v1/production-definitions/workcenters/", + methods=["PUT", "PATCH"], + ) + def workcenter_write(wc_id: str): + if wc_id not in workcenter_by_id: + abort(404) + parsed = _parse_json_object(request) + if not isinstance(parsed, dict): + return parsed + payload = parsed + store: CaptureStore = app.config["PLEX_MOCK_STORE"] + store.append( + method=request.method, + path=request.path, + body=payload, + run_id=app.config["PLEX_MOCK_RUN_ID"], + ) + merged = {**workcenter_by_id[wc_id], **payload, "workcenterId": wc_id} + return jsonify(merged), 200 + + return app + + +def main() -> int: + """Console-script entry (datum-plex-mock-serve).""" + import argparse + + ap = argparse.ArgumentParser(description="Plex-mimic mock server") + ap.add_argument("--host", default="127.0.0.1") + ap.add_argument("--port", type=int, default=8080) + ap.add_argument("--snapshots", default=Path(__file__).parent / "snapshots") + ap.add_argument("--db", default=Path(__file__).parent / "captures.db") + ap.add_argument("--run-id", default=None, help="Override run_id (default: random uuid4)") + args = ap.parse_args() + + app = create_app( + snapshots_dir=Path(args.snapshots), + db_path=Path(args.db), + run_id=args.run_id or str(uuid.uuid4()), + ) + print(f"plex-mock serving on http://{args.host}:{args.port} run_id={app.config['PLEX_MOCK_RUN_ID']}") + # threaded=True so the sync can issue concurrent POSTs against the mock + # without Werkzeug's default single-threaded server serialising them. + app.run(host=args.host, port=args.port, debug=False, threaded=True) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/tools/plex_mock/snapshots/README.md b/tools/plex_mock/snapshots/README.md new file mode 100644 index 0000000..a61e7ce --- /dev/null +++ b/tools/plex_mock/snapshots/README.md @@ -0,0 +1,8 @@ +# Canned GET snapshots + +JSON responses captured from real `connect.plex.com` so the mock can +serve realistic GETs without a live-Plex dependency. Refresh via +`python -m tools.plex_mock.capture_snapshots` when Plex shapes change. + +Files here are committed. Ad-hoc mock captures (POSTs the sync sent) +live in `tools/plex_mock/captures/` which is gitignored. diff --git a/tools/plex_mock/store.py b/tools/plex_mock/store.py new file mode 100644 index 0000000..d29c7ab --- /dev/null +++ b/tools/plex_mock/store.py @@ -0,0 +1,88 @@ +""" +SQLite-backed capture store for the Plex-mimic mock. + +Every POST/PUT/PATCH the mock server sees is appended here so the +diff CLI (#92) can report what the sync *would have* sent to real +Plex, and three-runs-in-a-row idempotency checks can compare run sets. + +Append-only by design — no update/delete path. Gitignored; survives +mock restarts. +""" +from __future__ import annotations + +import json +import sqlite3 +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + + +SCHEMA = """ +CREATE TABLE IF NOT EXISTS captures ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + ts TEXT NOT NULL, + method TEXT NOT NULL, + path TEXT NOT NULL, + body_json TEXT, + run_id TEXT NOT NULL +); +CREATE INDEX IF NOT EXISTS captures_run_id_idx ON captures(run_id); +CREATE INDEX IF NOT EXISTS captures_run_method_idx ON captures(run_id, method); +""" + + +class CaptureStore: + """Thin wrapper around a SQLite file used as an append-only capture log.""" + + def __init__(self, path: Path | str): + self.path = Path(path) + self.path.parent.mkdir(parents=True, exist_ok=True) + with sqlite3.connect(self.path) as con: + con.executescript(SCHEMA) + + def append( + self, + *, + method: str, + path: str, + body: Any, + run_id: str, + ) -> int: + """Record one captured request. Returns the rowid.""" + ts = datetime.now(timezone.utc).isoformat() + body_json = json.dumps(body) if body is not None else None + with sqlite3.connect(self.path) as con: + cur = con.execute( + "INSERT INTO captures (ts, method, path, body_json, run_id) " + "VALUES (?, ?, ?, ?, ?)", + (ts, method, path, body_json, run_id), + ) + assert cur.lastrowid is not None # INSERT always yields a rowid + return cur.lastrowid + + def query( + self, + *, + run_id: str, + method: str | None = None, + ) -> list[dict]: + """Return all captures for a run, oldest first. Optional method filter.""" + sql = "SELECT id, ts, method, path, body_json, run_id FROM captures WHERE run_id = ?" + args: list[Any] = [run_id] + if method is not None: + sql += " AND method = ?" + args.append(method) + sql += " ORDER BY id ASC" + with sqlite3.connect(self.path) as con: + rows = con.execute(sql, args).fetchall() + return [ + { + "id": r[0], + "ts": r[1], + "method": r[2], + "path": r[3], + "body": json.loads(r[4]) if r[4] is not None else None, + "run_id": r[5], + } + for r in rows + ] diff --git a/tools/plex_mock/systemd/README.md b/tools/plex_mock/systemd/README.md new file mode 100644 index 0000000..c64e53b --- /dev/null +++ b/tools/plex_mock/systemd/README.md @@ -0,0 +1,56 @@ +# Deploy `datum-plex-mock` on `datum-runtime` + +The unit expects a `datum` system user, the repo at `/opt/datum`, a +virtualenv at `/opt/datum/.venv` with `pip install -e .` completed, and +snapshots captured into `/opt/datum/tools/plex_mock/snapshots/`. None +of those exist on a fresh `datum-runtime` VM — do the one-time prereq +block below first. + +## SSH in via IAP + +```bash +gcloud compute ssh datum-runtime --zone=us-central1-a --tunnel-through-iap \ + --project=$PROJECT_ID +``` + +## One-time prereqs (skip if already set up) + +```bash +# datum system user, owns the repo checkout + capture dir +sudo useradd --system --home /opt/datum --shell /usr/sbin/nologin datum + +sudo mkdir -p /opt/datum /var/lib/datum +sudo chown datum:datum /opt/datum /var/lib/datum + +# Clone + install (console script datum-plex-mock-serve lands in .venv) +sudo -u datum git clone https://github.com/grace-shane/Datum.git /opt/datum +sudo -u datum python3 -m venv /opt/datum/.venv +sudo -u datum /opt/datum/.venv/bin/pip install -e /opt/datum + +# Snapshots — needs real Plex creds, so this step runs on the creds-having +# host (e.g. datum-runtime with secret-manager access, or captured locally +# and scp'd in). Skip if snapshots are already committed in the repo. +sudo -u datum PLEX_API_KEY=... PLEX_TENANT_ID=... \ + /opt/datum/.venv/bin/datum-plex-mock-snapshot +``` + +The mock binary has no Plex env var dependencies — it serves local +snapshots and writes to its own SQLite. The unit therefore does *not* +load `.env.local`. Only `capture_snapshots` needs Plex credentials. + +## Install + start the unit + +```bash +sudo cp /opt/datum/tools/plex_mock/systemd/datum-plex-mock.service \ + /etc/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl enable --now datum-plex-mock +sudo systemctl status datum-plex-mock +curl -sf http://127.0.0.1:8080/healthz +``` + +## Troubleshooting + +- Logs: `journalctl -u datum-plex-mock -f` +- Stop: `sudo systemctl stop datum-plex-mock` +- Refresh snapshots from the VM: `cd /opt/datum && sudo -u datum /opt/datum/.venv/bin/datum-plex-mock-snapshot` diff --git a/tools/plex_mock/systemd/datum-plex-mock.service b/tools/plex_mock/systemd/datum-plex-mock.service new file mode 100644 index 0000000..6144da9 --- /dev/null +++ b/tools/plex_mock/systemd/datum-plex-mock.service @@ -0,0 +1,29 @@ +[Unit] +Description=Datum Plex-Mimic Mock HTTP Server +After=network.target +Documentation=https://github.com/grace-shane/Datum/issues/92 + +[Service] +Type=simple +User=datum +Group=datum +WorkingDirectory=/opt/datum +# The mock has no Plex env var dependencies (it serves local snapshots, +# writes to its own SQLite). Pulling in .env.local would leak real Plex +# credentials into a process that doesn't need them. Leave unset. +ExecStart=/opt/datum/.venv/bin/datum-plex-mock-serve \ + --host 127.0.0.1 \ + --port 8080 \ + --snapshots /opt/datum/tools/plex_mock/snapshots \ + --db /var/lib/datum/plex-mock-captures.db +Restart=on-failure +RestartSec=5 +# Hardening — mock has no reason to touch anything outside its data dir +ReadWritePaths=/var/lib/datum +PrivateTmp=true +ProtectSystem=strict +ProtectHome=true +NoNewPrivileges=true + +[Install] +WantedBy=multi-user.target diff --git a/validate_library.py b/validate_library.py new file mode 100644 index 0000000..88aa2f0 --- /dev/null +++ b/validate_library.py @@ -0,0 +1,1072 @@ +""" +validate_library.py +Fusion 360 Tool Library Pre-Sync Validator +Grace Engineering — plex-api project +=========================================== +Pre-sync validation gate for Fusion 360 tool library JSON files. Runs +before any data touches Plex or Supabase. Three entry points share one +validation engine: + + 1. CLI — ``python validate_library.py --file `` + 2. Programmatic — ``validate_library(tools=..., library_name=..., ...)`` + called from ``tool_library_loader.load_library()`` as + a pre-sync gate + 3. Flask — ``GET/POST /api/fusion/validate`` in app.py + +A ``FAIL`` aborts the sync. ``WARN`` entries are surfaced in verbose/debug +output and the Flask UI but do not block the sync. + +Full spec: docs/validate_library_spec.md +""" + +from __future__ import annotations + +import argparse +import json +import logging +import sys +from dataclasses import dataclass, field +from enum import Enum +from pathlib import Path +from typing import Any, Literal + +log = logging.getLogger(__name__) + + +# ───────────────────────────────────────────── +# CONSTANTS +# ───────────────────────────────────────────── + +# Known tool types observed in Fusion 360 libraries. Anything outside this +# set triggers UNKNOWN_TYPE_PRESENT (WARN) — the sync will still include +# it unless it matches NON_SYNC_TYPES below. +KNOWN_TOOL_TYPES = { + "flat end mill", + "bull nose end mill", + "drill", + "face mill", + "form mill", + "slot mill", # Fusion name; ingested as "slitting saw" + "slitting saw", + "holder", + "probe", +} + +# Types excluded from sync. Holders are the geometric collision shapes +# and probes are measurement devices; neither represents a purchasable +# cutting consumable in Plex's supply-items schema. +NON_SYNC_TYPES = {"holder", "probe"} + +# ── Geometry bounds ────────────────────────────────────────────────── +# TODO: confirm real shop-floor bounds with Shane before enabling range +# WARN rules. Non-positive FAIL rules (DC <= 0, NOF <= 0) are always +# active regardless of these values. +DC_MIN: float | None = None # cutting diameter min, inches +DC_MAX: float | None = None # cutting diameter max, inches +OAL_MIN: float | None = None # overall length min, inches +OAL_MAX: float | None = None # overall length max, inches +NOF_MIN: int | None = None # number of flutes min +NOF_MAX: int | None = None # number of flutes max + + +# ───────────────────────────────────────────── +# DATA STRUCTURES +# ───────────────────────────────────────────── + +class ValidationMode(Enum): + PRODUCTION = "production" # PASS/FAIL only + VERBOSE = "verbose" # + WARNs + DEBUG = "debug" # + field trace + + +@dataclass +class ValidationIssue: + severity: Literal["FAIL", "WARN"] + rule: str + tool_index: int | None + tool_description: str | None + field: str | None + value: Any + message: str + + def to_dict(self) -> dict: + return { + "severity": self.severity, + "rule": self.rule, + "tool_index": self.tool_index, + "tool_description": self.tool_description, + "field": self.field, + "value": self.value, + "message": self.message, + } + + +@dataclass +class ValidationResult: + library_name: str + passed: bool + tool_count: int + sync_candidate_count: int + issues: list[ValidationIssue] = field(default_factory=list) + debug_trace: list[str] | None = None + + @property + def fails(self) -> list[ValidationIssue]: + return [i for i in self.issues if i.severity == "FAIL"] + + @property + def warns(self) -> list[ValidationIssue]: + return [i for i in self.issues if i.severity == "WARN"] + + def summary(self) -> str: + n_fail = len(self.fails) + n_warn = len(self.warns) + if self.passed and n_warn == 0: + return ( + f"PASS {self.library_name} — " + f"{self.sync_candidate_count} tools valid, ready to sync" + ) + if self.passed: + return ( + f"PASS {self.library_name} — " + f"{self.sync_candidate_count} tools valid " + f"({n_warn} warning{'s' if n_warn != 1 else ''})" + ) + parts = [f"{n_fail} error{'s' if n_fail != 1 else ''}"] + if n_warn: + parts.append(f"{n_warn} warning{'s' if n_warn != 1 else ''}") + return f"FAIL {self.library_name} — FAILED ({', '.join(parts)})" + + def to_dict(self) -> dict: + return { + "library_name": self.library_name, + "passed": self.passed, + "tool_count": self.tool_count, + "sync_candidate_count": self.sync_candidate_count, + "issues": [i.to_dict() for i in self.issues], + "debug_trace": self.debug_trace, + } + + +# ───────────────────────────────────────────── +# SUPPLIER LOOKUP (cached) +# ───────────────────────────────────────────── + +_supplier_cache: list[str] | None = None + + +def _get_supplier_names(client, debug: bool = False) -> list[str]: + """ + Fetch supplier names from ``mdm/v1/suppliers``. + + Cached after first call. Returns empty list on API failure — vendor + checks are silently skipped in that case rather than aborting the + validation run. + """ + global _supplier_cache + if _supplier_cache is not None: + return _supplier_cache + + try: + raw = client.get("mdm", "v1", "suppliers") + except Exception as e: + log.warning("Supplier lookup failed: %s — skipping vendor checks", e) + _supplier_cache = [] + return _supplier_cache + + if raw is None: + log.warning( + "Supplier lookup returned None — skipping vendor checks" + ) + _supplier_cache = [] + return _supplier_cache + + if isinstance(raw, dict): + records = raw.get("data") or raw.get("items") or raw.get("rows") or [] + else: + records = raw or [] + + names: list[str] = [] + for r in records: + if not isinstance(r, dict): + continue + # Try common name field variations + name = ( + r.get("name") + or r.get("supplierName") + or r.get("supplier_name") + or r.get("displayName") + ) + if name: + names.append(str(name)) + + _supplier_cache = names + if debug: + log.debug("Loaded %d supplier names from mdm/v1/suppliers", len(names)) + return names + + +def _reset_supplier_cache() -> None: + """Test helper — reset the module-level cache between runs.""" + global _supplier_cache + _supplier_cache = None + + +def _edit_distance(a: str, b: str) -> int: + """Levenshtein distance — small/simple implementation for debug output.""" + if a == b: + return 0 + if not a: + return len(b) + if not b: + return len(a) + prev = list(range(len(b) + 1)) + for i, ca in enumerate(a, 1): + curr = [i] + [0] * len(b) + for j, cb in enumerate(b, 1): + cost = 0 if ca == cb else 1 + curr[j] = min( + curr[j - 1] + 1, # insert + prev[j] + 1, # delete + prev[j - 1] + cost, # substitute + ) + prev = curr + return prev[-1] + + +def _closest_supplier_names( + target: str, supplier_names: list[str], n: int = 3 +) -> list[tuple[str, int]]: + """Return the n closest supplier names to ``target`` by edit distance.""" + scored = [(name, _edit_distance(target.lower(), name.lower())) + for name in supplier_names] + scored.sort(key=lambda x: x[1]) + return scored[:n] + + +def _match_vendor(vendor: str, supplier_names: list[str]) -> bool: + """Case-insensitive exact match against the supplier master list.""" + if not vendor or not supplier_names: + return False + target = vendor.strip().lower() + return any(target == name.strip().lower() for name in supplier_names) + + +# ───────────────────────────────────────────── +# VALIDATION ENGINE +# ───────────────────────────────────────────── + +def _tool_desc(tool: dict) -> str | None: + """Short description label for issue messages.""" + desc = tool.get("description") + return desc if isinstance(desc, str) and desc else None + + +def _is_sync_candidate(tool: dict) -> bool: + t = tool.get("type") + if not isinstance(t, str): + return True # unknown/missing type — still a candidate; caught by REQUIRED_FIELD + return t.strip().lower() not in NON_SYNC_TYPES + + +def _check_library_rules( + data: Any, + issues: list[ValidationIssue], + cross_library_product_ids: dict[str, str] | None, + debug_trace: list[str] | None, +) -> tuple[list[dict], bool]: + """ + Run library-level rules. Returns (sync_candidates, safe_to_iterate). + + If safe_to_iterate is False, a library-level FAIL blocked iteration + and per-tool rules should NOT run. + """ + if debug_trace is not None: + debug_trace.append("[DEBUG] Running library-level rules") + + # STRUCT_ROOT_KEY — data must be a list (callers pass ``tools``, which + # is already the unwrapped ``data`` array — we check structure here) + if data is None: + issues.append(ValidationIssue( + severity="FAIL", + rule="STRUCT_ROOT_KEY", + tool_index=None, + tool_description=None, + field=None, + value=None, + message='Root "data" key missing — not a valid Fusion tool library', + )) + return [], False + + if not isinstance(data, list): + issues.append(ValidationIssue( + severity="FAIL", + rule="STRUCT_DATA_LIST", + tool_index=None, + tool_description=None, + field=None, + value=type(data).__name__, + message='Root "data" is not a list', + )) + return [], False + + if len(data) == 0: + issues.append(ValidationIssue( + severity="FAIL", + rule="STRUCT_EMPTY", + tool_index=None, + tool_description=None, + field=None, + value=0, + message="Library contains zero entries", + )) + return [], False + + # UNKNOWN_TYPE_PRESENT — WARN only, does not block iteration + for i, tool in enumerate(data): + if not isinstance(tool, dict): + continue + t = tool.get("type") + if isinstance(t, str) and t.strip(): + if t.strip().lower() not in KNOWN_TOOL_TYPES: + issues.append(ValidationIssue( + severity="WARN", + rule="UNKNOWN_TYPE_PRESENT", + tool_index=i, + tool_description=_tool_desc(tool), + field="type", + value=t, + message=( + f'Unknown type "{t}" at index {i} — will be included ' + f"in sync unless filter is updated" + ), + )) + + # Collect sync candidates (skip holders + probes) + sync_candidates = [t for t in data if isinstance(t, dict) and _is_sync_candidate(t)] + + if len(sync_candidates) == 0: + issues.append(ValidationIssue( + severity="FAIL", + rule="SYNC_CANDIDATES_ZERO", + tool_index=None, + tool_description=None, + field=None, + value=0, + message="No syncable tools after filtering — check type values", + )) + return sync_candidates, False + + # DUPLICATE_GUID — across all entries (holders + probes included) + guid_seen: dict[str, int] = {} + for i, tool in enumerate(data): + if not isinstance(tool, dict): + continue + guid = tool.get("guid") + if isinstance(guid, str) and guid: + if guid in guid_seen: + prev = guid_seen[guid] + issues.append(ValidationIssue( + severity="FAIL", + rule="DUPLICATE_GUID", + tool_index=i, + tool_description=_tool_desc(tool), + field="guid", + value=guid, + message=f'Duplicate guid "{guid}" at indexes {prev} and {i}', + )) + else: + guid_seen[guid] = i + + # DUPLICATE_PRODUCT_ID — sync candidates only + pid_seen: dict[str, int] = {} + for i, tool in enumerate(data): + if not isinstance(tool, dict) or not _is_sync_candidate(tool): + continue + pid = tool.get("product-id") + if isinstance(pid, str) and pid: + if pid in pid_seen: + prev = pid_seen[pid] + issues.append(ValidationIssue( + severity="FAIL", + rule="DUPLICATE_PRODUCT_ID", + tool_index=i, + tool_description=_tool_desc(tool), + field="product-id", + value=pid, + message=( + f'Duplicate product-id "{pid}" at indexes {prev} ' + f"and {i} — upsert will collide" + ), + )) + else: + pid_seen[pid] = i + + # CROSS_LIBRARY_DUPLICATE — WARN, multi-library runs only + if cross_library_product_ids is not None: + for i, tool in enumerate(data): + if not isinstance(tool, dict) or not _is_sync_candidate(tool): + continue + pid = tool.get("product-id") + if isinstance(pid, str) and pid and pid in cross_library_product_ids: + other = cross_library_product_ids[pid] + issues.append(ValidationIssue( + severity="WARN", + rule="CROSS_LIBRARY_DUPLICATE", + tool_index=i, + tool_description=_tool_desc(tool), + field="product-id", + value=pid, + message=( + f'product-id "{pid}" also exists in "{other}" — ' + f"check for cross-library collision" + ), + )) + + # If any library-level rule FAILed (DUPLICATE_GUID / DUPLICATE_PRODUCT_ID), + # we still allow iteration so per-tool checks can surface additional + # errors. Only the hard structural failures above abort iteration. + return sync_candidates, True + + +def _check_required_field( + tool: dict, + index: int, + key: str, + issues: list[ValidationIssue], +) -> bool: + val = tool.get(key) + if not isinstance(val, str) or not val.strip(): + issues.append(ValidationIssue( + severity="FAIL", + rule="REQUIRED_FIELD", + tool_index=index, + tool_description=_tool_desc(tool), + field=key, + value=val, + message=( + f"Missing required field '{key}' — " + f"this tool cannot be deduped in Plex" + ), + )) + return False + return True + + +def _check_vendor_rules( + tool: dict, + index: int, + issues: list[ValidationIssue], + supplier_names: list[str] | None, + debug_trace: list[str] | None, +) -> None: + vendor = tool.get("vendor") + + if not isinstance(vendor, str) or not vendor.strip(): + issues.append(ValidationIssue( + severity="WARN", + rule="VENDOR_MISSING", + tool_index=index, + tool_description=_tool_desc(tool), + field="vendor", + value=vendor, + message="Tool has no vendor — supplier linkage will fail on sync", + )) + return + + if supplier_names is None: + return # API check disabled — not our job to warn + + if not supplier_names: + return # API failed or empty — gracefully skip (warning already logged) + + if not _match_vendor(vendor, supplier_names): + issues.append(ValidationIssue( + severity="WARN", + rule="VENDOR_NOT_IN_PLEX", + tool_index=index, + tool_description=_tool_desc(tool), + field="vendor", + value=vendor, + message=( + f"Vendor \"{vendor}\" not found in Plex supplier master — " + f"will fail at sync time" + ), + )) + if debug_trace is not None: + closest = _closest_supplier_names(vendor, supplier_names) + debug_trace.append( + f"[DEBUG] Closest matches to \"{vendor}\": " + + ", ".join(f'"{n}" ({d})' for n, d in closest) + ) + + +def _check_geometry_rules( + tool: dict, + index: int, + issues: list[ValidationIssue], + debug_trace: list[str] | None, +) -> None: + geom = tool.get("geometry") + + if not isinstance(geom, dict): + issues.append(ValidationIssue( + severity="WARN", + rule="GEOMETRY_MISSING", + tool_index=index, + tool_description=_tool_desc(tool), + field="geometry", + value=None, + message="Tool has no geometry block", + )) + return + + # DC — cutting diameter + dc = geom.get("DC") + if dc is None: + issues.append(ValidationIssue( + severity="WARN", + rule="GEOMETRY_DC_MISSING", + tool_index=index, + tool_description=_tool_desc(tool), + field="geometry.DC", + value=None, + message="Cutting diameter (DC) missing", + )) + else: + try: + dc_val = float(dc) + except (TypeError, ValueError): + dc_val = None + if dc_val is None: + issues.append(ValidationIssue( + severity="FAIL", + rule="GEOMETRY_DC_NONPOSITIVE", + tool_index=index, + tool_description=_tool_desc(tool), + field="geometry.DC", + value=dc, + message=f"Cutting diameter must be a number (got {dc!r})", + )) + elif dc_val <= 0: + issues.append(ValidationIssue( + severity="FAIL", + rule="GEOMETRY_DC_NONPOSITIVE", + tool_index=index, + tool_description=_tool_desc(tool), + field="geometry.DC", + value=dc_val, + message=f"Cutting diameter must be > 0 (got {dc_val})", + )) + elif DC_MIN is not None and DC_MAX is not None: + if not (DC_MIN <= dc_val <= DC_MAX): + issues.append(ValidationIssue( + severity="WARN", + rule="GEOMETRY_DC_RANGE", + tool_index=index, + tool_description=_tool_desc(tool), + field="geometry.DC", + value=dc_val, + message=( + f"Cutting diameter {dc_val} outside expected " + f"range [{DC_MIN}, {DC_MAX}]" + ), + )) + elif debug_trace is not None: + debug_trace.append( + "[DEBUG] GEOMETRY_DC_RANGE skipped — DC_MIN/DC_MAX not set" + ) + + # OAL — overall length + oal = geom.get("OAL") + if oal is None: + issues.append(ValidationIssue( + severity="WARN", + rule="GEOMETRY_OAL_MISSING", + tool_index=index, + tool_description=_tool_desc(tool), + field="geometry.OAL", + value=None, + message="Overall length (OAL) missing", + )) + else: + try: + oal_val = float(oal) + except (TypeError, ValueError): + oal_val = None + if oal_val is not None: + # OAL < DC is physically implausible; always active when both present + try: + dc_val_cmp = float(dc) if dc is not None else None + except (TypeError, ValueError): + dc_val_cmp = None + if dc_val_cmp is not None and dc_val_cmp > 0 and oal_val < dc_val_cmp: + issues.append(ValidationIssue( + severity="WARN", + rule="GEOMETRY_OAL_SHORTER_THAN_DC", + tool_index=index, + tool_description=_tool_desc(tool), + field="geometry.OAL", + value=oal_val, + message=( + f"Overall length ({oal_val}) is shorter than " + f"cutting diameter ({dc_val_cmp}) — physically implausible" + ), + )) + if OAL_MIN is not None and OAL_MAX is not None: + if not (OAL_MIN <= oal_val <= OAL_MAX): + issues.append(ValidationIssue( + severity="WARN", + rule="GEOMETRY_OAL_RANGE", + tool_index=index, + tool_description=_tool_desc(tool), + field="geometry.OAL", + value=oal_val, + message=( + f"Overall length {oal_val} outside expected " + f"range [{OAL_MIN}, {OAL_MAX}]" + ), + )) + elif debug_trace is not None: + debug_trace.append( + "[DEBUG] GEOMETRY_OAL_RANGE skipped — OAL_MIN/OAL_MAX not set" + ) + + # NOF — number of flutes + nof = geom.get("NOF") + if nof is None: + issues.append(ValidationIssue( + severity="WARN", + rule="GEOMETRY_NOF_MISSING", + tool_index=index, + tool_description=_tool_desc(tool), + field="geometry.NOF", + value=None, + message="Number of flutes (NOF) missing", + )) + else: + try: + nof_val = float(nof) + except (TypeError, ValueError): + nof_val = None + if nof_val is None: + issues.append(ValidationIssue( + severity="FAIL", + rule="GEOMETRY_NOF_NONPOSITIVE", + tool_index=index, + tool_description=_tool_desc(tool), + field="geometry.NOF", + value=nof, + message=f"Number of flutes must be a number (got {nof!r})", + )) + elif nof_val <= 0: + issues.append(ValidationIssue( + severity="FAIL", + rule="GEOMETRY_NOF_NONPOSITIVE", + tool_index=index, + tool_description=_tool_desc(tool), + field="geometry.NOF", + value=nof_val, + message=f"Number of flutes must be > 0 (got {nof_val})", + )) + elif NOF_MIN is not None and NOF_MAX is not None: + if not (NOF_MIN <= nof_val <= NOF_MAX): + issues.append(ValidationIssue( + severity="WARN", + rule="GEOMETRY_NOF_RANGE", + tool_index=index, + tool_description=_tool_desc(tool), + field="geometry.NOF", + value=nof_val, + message=( + f"Number of flutes {nof_val} outside expected " + f"range [{NOF_MIN}, {NOF_MAX}]" + ), + )) + elif debug_trace is not None: + debug_trace.append( + "[DEBUG] GEOMETRY_NOF_RANGE skipped — NOF_MIN/NOF_MAX not set" + ) + + +def _check_postprocess_rules( + tool: dict, + index: int, + issues: list[ValidationIssue], +) -> None: + pp = tool.get("post-process") + if not isinstance(pp, dict) or "number" not in pp: + issues.append(ValidationIssue( + severity="WARN", + rule="POSTPROCESS_NUMBER_MISSING", + tool_index=index, + tool_description=_tool_desc(tool), + field="post-process.number", + value=None, + message="post-process.number missing — no pocket assignment", + )) + return + + num = pp.get("number") + try: + num_val = float(num) + except (TypeError, ValueError): + issues.append(ValidationIssue( + severity="WARN", + rule="POSTPROCESS_NUMBER_NONPOSITIVE", + tool_index=index, + tool_description=_tool_desc(tool), + field="post-process.number", + value=num, + message=f"post-process.number must be a number (got {num!r})", + )) + return + + if num_val <= 0: + issues.append(ValidationIssue( + severity="WARN", + rule="POSTPROCESS_NUMBER_NONPOSITIVE", + tool_index=index, + tool_description=_tool_desc(tool), + field="post-process.number", + value=num_val, + message=f"post-process.number must be > 0 (got {num_val})", + )) + + +def _check_per_tool_rules( + sync_candidates: list[dict], + all_tools: list[dict], + issues: list[ValidationIssue], + supplier_names: list[str] | None, + debug_trace: list[str] | None, +) -> None: + """Run per-tool rules. Uses the original index from ``all_tools`` for reporting.""" + for i, tool in enumerate(all_tools): + if not isinstance(tool, dict) or not _is_sync_candidate(tool): + continue + + if debug_trace is not None: + desc = _tool_desc(tool) or "(no description)" + debug_trace.append(f'[DEBUG] tool {i} "{desc}"') + + # Required fields + _check_required_field(tool, i, "guid", issues) + _check_required_field(tool, i, "type", issues) + _check_required_field(tool, i, "description", issues) + _check_required_field(tool, i, "product-id", issues) + + # Vendor + _check_vendor_rules(tool, i, issues, supplier_names, debug_trace) + + # Geometry + _check_geometry_rules(tool, i, issues, debug_trace) + + # Post-process + _check_postprocess_rules(tool, i, issues) + + +# ───────────────────────────────────────────── +# PUBLIC API +# ───────────────────────────────────────────── + +def validate_library( + tools: Any, + library_name: str, + mode: ValidationMode = ValidationMode.PRODUCTION, + use_api: bool = False, + client=None, + cross_library_product_ids: dict[str, str] | None = None, +) -> ValidationResult: + """ + Validate a list of Fusion 360 tool objects against the sync rules. + + Parameters + ---------- + tools + List of tool dicts from a Fusion JSON library — the unwrapped + ``data`` array. May be any type; type errors are caught and + returned as STRUCT_* FAILs. + library_name + Short name for the library (used in issue messages + summary). + mode + ValidationMode.PRODUCTION / VERBOSE / DEBUG. Currently affects + only the ``debug_trace`` field and debug-only logging; rule + evaluation is identical in all modes. + use_api + When True, fetches the Plex supplier master and runs + VENDOR_NOT_IN_PLEX checks. Requires ``client``. Defaults to False + to keep the loader fast and offline-safe. + client + PlexClient instance. Required iff ``use_api=True``. + cross_library_product_ids + Optional dict of already-seen product-ids to library names. When + provided, enables CROSS_LIBRARY_DUPLICATE WARN rule. Used by + multi-library batch runs. + + Returns + ------- + ValidationResult + """ + issues: list[ValidationIssue] = [] + debug_trace: list[str] | None = [] if mode == ValidationMode.DEBUG else None + + # Library-level rules + sync_candidates, safe_to_iterate = _check_library_rules( + tools, issues, cross_library_product_ids, debug_trace + ) + + tool_count = len(tools) if isinstance(tools, list) else 0 + sync_candidate_count = len(sync_candidates) + + # Per-tool rules only if library-level checks didn't hard-abort + if safe_to_iterate: + supplier_names: list[str] | None = None + if use_api: + if client is None: + log.warning( + "use_api=True but no client provided — " + "skipping vendor API checks" + ) + supplier_names = None + else: + supplier_names = _get_supplier_names( + client, debug=(mode == ValidationMode.DEBUG) + ) + + _check_per_tool_rules( + sync_candidates, + tools if isinstance(tools, list) else [], + issues, + supplier_names, + debug_trace, + ) + + passed = not any(i.severity == "FAIL" for i in issues) + + return ValidationResult( + library_name=library_name, + passed=passed, + tool_count=tool_count, + sync_candidate_count=sync_candidate_count, + issues=issues, + debug_trace=debug_trace, + ) + + +# ───────────────────────────────────────────── +# CLI OUTPUT FORMATTING +# ───────────────────────────────────────────── + +def _format_issue(issue: ValidationIssue) -> str: + desc = issue.tool_description or "(no description)" + tool_label = ( + f"tool {issue.tool_index} \"{desc}\"" + if issue.tool_index is not None + else "(library-level)" + ) + lines = [ + f" [{issue.severity}] {issue.rule} - {tool_label}", + ] + if issue.field: + lines.append(f" Field: {issue.field}") + if issue.value not in (None, ""): + lines.append(f" Value: {issue.value!r}") + lines.append(f" {issue.message}") + return "\n".join(lines) + + +def format_result( + result: ValidationResult, + mode: ValidationMode = ValidationMode.PRODUCTION, +) -> str: + """Human-readable CLI output for a single ValidationResult.""" + if result.passed and not result.warns: + return ( + f"[PASS] {result.library_name} - " + f"{result.sync_candidate_count} tools valid, ready to sync" + ) + + header_parts = [] + n_fail = len(result.fails) + n_warn = len(result.warns) + if n_fail: + header_parts.append(f"{n_fail} error{'s' if n_fail != 1 else ''}") + if n_warn and mode != ValidationMode.PRODUCTION: + header_parts.append(f"{n_warn} warning{'s' if n_warn != 1 else ''}") + + status = "PASS" if result.passed else "FAIL" + header = ( + f"[{status}] {result.library_name}" + + (f" - {'FAILED' if not result.passed else 'passed'} " + f"({', '.join(header_parts)})" if header_parts else "") + ) + body_lines: list[str] = [header, ""] + + # Show FAILs always + for issue in result.fails: + body_lines.append(_format_issue(issue)) + body_lines.append("") + + # Show WARNs only in verbose/debug + if mode != ValidationMode.PRODUCTION: + for issue in result.warns: + body_lines.append(_format_issue(issue)) + body_lines.append("") + + # Debug trace + if mode == ValidationMode.DEBUG and result.debug_trace: + body_lines.append(" Debug trace:") + for line in result.debug_trace: + body_lines.append(f" {line}") + + return "\n".join(body_lines).rstrip() + "\n" + + +# ───────────────────────────────────────────── +# CLI ENTRY POINT +# ───────────────────────────────────────────── + +def _load_json_file(path: Path) -> list[dict] | None: + """Load a Fusion JSON file and return the ``data`` array (or raw on error).""" + try: + with open(path, "r", encoding="utf-8") as f: + raw = json.load(f) + except (FileNotFoundError, PermissionError, json.JSONDecodeError) as e: + print(f"ERROR: Failed to load {path}: {e}", file=sys.stderr) + return None + + if not isinstance(raw, dict): + # Pass the raw thing through; the validator will FAIL on STRUCT_* + return raw # type: ignore[return-value] + + return raw.get("data") + + +def _cli_main(argv: list[str] | None = None) -> int: + # Force stdout to UTF-8 so em-dashes in messages don't blow up the + # Windows cp1252 console. Matches the pattern in app.py (PR #22). + try: + sys.stdout.reconfigure(encoding="utf-8") + except Exception: + pass + + parser = argparse.ArgumentParser( + description="Validate Fusion 360 tool library JSON file(s) for sync" + ) + parser.add_argument( + "--file", "-f", + help="Path to a single JSON file to validate", + ) + parser.add_argument( + "--verbose", "-v", + action="store_true", + help="Show WARN issues in addition to FAILs", + ) + parser.add_argument( + "--debug", "-d", + action="store_true", + help="Show full field trace and supplier list", + ) + parser.add_argument( + "--no-api", + action="store_true", + help="Skip live Plex supplier lookup (offline mode)", + ) + args = parser.parse_args(argv) + + if args.debug: + mode = ValidationMode.DEBUG + logging.basicConfig(level=logging.DEBUG) + elif args.verbose: + mode = ValidationMode.VERBOSE + logging.basicConfig(level=logging.INFO) + else: + mode = ValidationMode.PRODUCTION + logging.basicConfig(level=logging.WARNING) + + # File resolution + if args.file: + path = Path(args.file) + if not path.exists(): + print(f"ERROR: File not found: {path}", file=sys.stderr) + return 2 + files = [path] + else: + # Default to all libraries in the ADC CAMTools directory + try: + from tool_library_loader import CAM_TOOLS_DIR + except ImportError: + print( + "ERROR: tool_library_loader not importable and no --file given", + file=sys.stderr, + ) + return 2 + if not CAM_TOOLS_DIR.exists(): + print( + f"ERROR: CAMTools directory not found: {CAM_TOOLS_DIR}", + file=sys.stderr, + ) + return 2 + files = sorted(CAM_TOOLS_DIR.glob("*.json")) + if not files: + print(f"ERROR: No .json files in {CAM_TOOLS_DIR}", file=sys.stderr) + return 2 + + # Optional API client for supplier lookup + client = None + use_api = not args.no_api + if use_api: + try: + from plex_api import PlexClient, API_KEY, API_SECRET, TENANT_ID, USE_TEST + if not API_KEY: + print( + "WARNING: PLEX_API_KEY not set — disabling API vendor checks", + file=sys.stderr, + ) + use_api = False + else: + client = PlexClient( + api_key=API_KEY, + api_secret=API_SECRET, + tenant_id=TENANT_ID, + use_test=USE_TEST, + ) + except Exception as e: + print( + f"WARNING: Plex client setup failed ({e}) — disabling API checks", + file=sys.stderr, + ) + use_api = False + + # Run validation across all files with cross-library tracking + cross_library: dict[str, str] = {} + all_results: list[ValidationResult] = [] + for path in files: + tools = _load_json_file(path) + result = validate_library( + tools=tools, + library_name=path.stem, + mode=mode, + use_api=use_api, + client=client, + cross_library_product_ids=dict(cross_library) if cross_library else None, + ) + all_results.append(result) + print(format_result(result, mode)) + + # Update cross-library tracking with this library's product-ids + if isinstance(tools, list): + for tool in tools: + if not isinstance(tool, dict) or not _is_sync_candidate(tool): + continue + pid = tool.get("product-id") + if isinstance(pid, str) and pid and pid not in cross_library: + cross_library[pid] = path.stem + + any_failed = any(not r.passed for r in all_results) + return 1 if any_failed else 0 + + +if __name__ == "__main__": + sys.exit(_cli_main()) diff --git a/web/.env.example b/web/.env.example new file mode 100644 index 0000000..bc336e2 --- /dev/null +++ b/web/.env.example @@ -0,0 +1,2 @@ +VITE_SUPABASE_URL=https://crimblieyypiyarssoel.supabase.co +VITE_SUPABASE_ANON_KEY=your-anon-key-here diff --git a/web/.gitignore b/web/.gitignore new file mode 100644 index 0000000..a547bf3 --- /dev/null +++ b/web/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/web/components.json b/web/components.json new file mode 100644 index 0000000..15addee --- /dev/null +++ b/web/components.json @@ -0,0 +1,25 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "base-nova", + "rsc": false, + "tsx": true, + "tailwind": { + "config": "", + "css": "src/index.css", + "baseColor": "neutral", + "cssVariables": true, + "prefix": "" + }, + "iconLibrary": "lucide", + "rtl": false, + "aliases": { + "components": "@/components", + "utils": "@/lib/utils", + "ui": "@/components/ui", + "lib": "@/lib", + "hooks": "@/hooks" + }, + "menuColor": "default", + "menuAccent": "subtle", + "registries": {} +} diff --git a/web/eslint.config.js b/web/eslint.config.js new file mode 100644 index 0000000..5e6b472 --- /dev/null +++ b/web/eslint.config.js @@ -0,0 +1,23 @@ +import js from '@eslint/js' +import globals from 'globals' +import reactHooks from 'eslint-plugin-react-hooks' +import reactRefresh from 'eslint-plugin-react-refresh' +import tseslint from 'typescript-eslint' +import { defineConfig, globalIgnores } from 'eslint/config' + +export default defineConfig([ + globalIgnores(['dist']), + { + files: ['**/*.{ts,tsx}'], + extends: [ + js.configs.recommended, + tseslint.configs.recommended, + reactHooks.configs.flat.recommended, + reactRefresh.configs.vite, + ], + languageOptions: { + ecmaVersion: 2020, + globals: globals.browser, + }, + }, +]) diff --git a/web/index.html b/web/index.html new file mode 100644 index 0000000..c13a2c6 --- /dev/null +++ b/web/index.html @@ -0,0 +1,13 @@ + + + + + + + Datum + + +
+ + + diff --git a/web/package-lock.json b/web/package-lock.json new file mode 100644 index 0000000..9cd4489 --- /dev/null +++ b/web/package-lock.json @@ -0,0 +1,7101 @@ +{ + "name": "web", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "web", + "version": "0.0.0", + "dependencies": { + "@base-ui/react": "^1.3.0", + "@fontsource-variable/geist": "^5.2.8", + "@supabase/supabase-js": "^2.103.0", + "@tailwindcss/vite": "^4.2.2", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "lucide-react": "^1.8.0", + "react": "^19.2.4", + "react-dom": "^19.2.4", + "react-router-dom": "^7.14.0", + "shadcn": "^4.2.0", + "tailwind-merge": "^3.5.0", + "tailwindcss": "^4.2.2", + "tw-animate-css": "^1.4.0" + }, + "devDependencies": { + "@eslint/js": "^9.39.4", + "@types/node": "^24.12.2", + "@types/react": "^19.2.14", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react": "^6.0.1", + "eslint": "^9.39.4", + "eslint-plugin-react-hooks": "^7.0.1", + "eslint-plugin-react-refresh": "^0.5.2", + "globals": "^17.4.0", + "typescript": "~6.0.2", + "typescript-eslint": "^8.58.0", + "vite": "^8.0.4" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.1", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.27.3", + "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.27.3.tgz", + "integrity": "sha512-fXSwMQqitTGeHLBC08Eq5yXz2m37E4pJX1qAU1+2cNedz/ifv/bVXft90VeSav5nFO61EcNgwr0aJxbyPaWBPg==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.27.3" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.28.6.tgz", + "integrity": "sha512-dTOdvsjnG3xNT9Y0AUg1wAl38y+4Rl4sf9caSQZOXdNqVn+H+HbbJ4IyyHaIqNR6SW9oJpA/RuRjsjCw2IdIow==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.3", + "@babel/helper-member-expression-to-functions": "^7.28.5", + "@babel/helper-optimise-call-expression": "^7.27.1", + "@babel/helper-replace-supers": "^7.28.6", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", + "@babel/traverse": "^7.28.6", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.28.5.tgz", + "integrity": "sha512-cwM7SBRZcPCLgl8a7cY0soT1SptSzAlMH39vwiRpOQkJlh53r5hdHwLSCZpQdVLT39sZt+CRpNwYG4Y2v77atg==", + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.5", + "@babel/types": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.27.1.tgz", + "integrity": "sha512-URMGH08NzYFhubNSGJrpUEphGKQwMQYBySzat5cAByY1/YgIRkULnIy3tAMeszlL/so2HbeilYloUmSpd7GdVw==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.28.6.tgz", + "integrity": "sha512-mq8e+laIk94/yFec3DxSjCRD2Z0TAjhVbEJY3UQrlwVo15Lmt7C2wAUbK4bjnTs4APkwsYLTahXRraQXhb1WCg==", + "license": "MIT", + "dependencies": { + "@babel/helper-member-expression-to-functions": "^7.28.5", + "@babel/helper-optimise-call-expression": "^7.27.1", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.27.1.tgz", + "integrity": "sha512-Tub4ZKEXqbPjXgWLl2+3JpQAYBJ8+ikpQ2Ocj/q/r0LwE3UhENh7EUabyHjz2kCEsrRY83ew2DQdHluuiDQFzg==", + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.29.2.tgz", + "integrity": "sha512-HoGuUs4sCZNezVEKdVcwqmZN8GoHirLUcLaYVNBK2J0DadGtdcqgr3BCbvH8+XUo4NGjNl3VOtSjEKNzqfFgKw==", + "license": "MIT", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.2.tgz", + "integrity": "sha512-4GgRzy/+fsBa72/RZVJmGKPmZu9Byn8o4MoLpmNe1m8ZfYnz5emHLQz3U4gLud6Zwl0RZIcgiLD7Uq7ySFuDLA==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-syntax-jsx": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.28.6.tgz", + "integrity": "sha512-wgEmr06G6sIpqr8YDwA2dSRTE3bJ+V0IfpzfSY3Lfgd7YWOaAdlykvJi13ZKBt8cZHfgH1IXN+CL656W3uUa4w==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-typescript": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.28.6.tgz", + "integrity": "sha512-+nDNmQye7nlnuuHDboPbGm00Vqg3oO8niRRL27/4LYHUsHYh0zJ1xWOz0uRwNFmM1Avzk8wZbc6rdiYhomzv/A==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.28.6.tgz", + "integrity": "sha512-jppVbf8IV9iWWwWTQIxJMAJCWBuuKx71475wHwYytrRGQ2CWiDvYlADQno3tcYpS/T2UUWFQp3nVtYfK/YBQrA==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typescript": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.28.6.tgz", + "integrity": "sha512-0YWL2RFxOqEm9Efk5PvreamxPME8OyY0wM5wh5lHjF+VtVhdneCWGzZeSqzOfiobVqQaNCd2z0tQvnI9DaPWPw==", + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.27.3", + "@babel/helper-create-class-features-plugin": "^7.28.6", + "@babel/helper-plugin-utils": "^7.28.6", + "@babel/helper-skip-transparent-expression-wrappers": "^7.27.1", + "@babel/plugin-syntax-typescript": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-typescript": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.28.5.tgz", + "integrity": "sha512-+bQy5WOI2V6LJZpPVxY+yp66XdZ2yifu0Mc1aP5CQKgjn4QM5IN2i5fAZ4xKop47pr8rpVhiAeu+nDQa12C8+g==", + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1", + "@babel/helper-validator-option": "^7.27.1", + "@babel/plugin-syntax-jsx": "^7.27.1", + "@babel/plugin-transform-modules-commonjs": "^7.27.1", + "@babel/plugin-transform-typescript": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.29.2.tgz", + "integrity": "sha512-JiDShH45zKHWyGe4ZNVRrCjBz8Nh9TMmZG1kh4QTK8hCBTWBi8Da+i7s1fJw7/lYpM4ccepSNfqzZ/QvABBi5g==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@base-ui/react": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@base-ui/react/-/react-1.3.0.tgz", + "integrity": "sha512-FwpKqZbPz14AITp1CVgf4AjhKPe1OeeVKSBMdgD10zbFlj3QSWelmtCMLi2+/PFZZcIm3l87G7rwtCZJwHyXWA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.28.6", + "@base-ui/utils": "0.2.6", + "@floating-ui/react-dom": "^2.1.8", + "@floating-ui/utils": "^0.2.11", + "tabbable": "^6.4.0", + "use-sync-external-store": "^1.6.0" + }, + "engines": { + "node": ">=14.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/mui-org" + }, + "peerDependencies": { + "@types/react": "^17 || ^18 || ^19", + "react": "^17 || ^18 || ^19", + "react-dom": "^17 || ^18 || ^19" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@base-ui/utils": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/@base-ui/utils/-/utils-0.2.6.tgz", + "integrity": "sha512-yQ+qeuqohwhsNpoYDqqXaLllYAkPCP4vYdDrVo8FQXaAPfHWm1pG/Vm+jmGTA5JFS0BAIjookyapuJFY8F9PIw==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.28.6", + "@floating-ui/utils": "^0.2.11", + "reselect": "^5.1.1", + "use-sync-external-store": "^1.6.0" + }, + "peerDependencies": { + "@types/react": "^17 || ^18 || ^19", + "react": "^17 || ^18 || ^19", + "react-dom": "^17 || ^18 || ^19" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@dotenvx/dotenvx": { + "version": "1.61.0", + "resolved": "https://registry.npmjs.org/@dotenvx/dotenvx/-/dotenvx-1.61.0.tgz", + "integrity": "sha512-utL3cpZoFzflyqUkjYbxYujI6STBTmO5LFn4bbin/NZnRWN6wQ7eErhr3/Vpa5h/jicPFC6kTa42r940mQftJQ==", + "license": "BSD-3-Clause", + "dependencies": { + "commander": "^11.1.0", + "dotenv": "^17.2.1", + "eciesjs": "^0.4.10", + "execa": "^5.1.1", + "fdir": "^6.2.0", + "ignore": "^5.3.0", + "object-treeify": "1.1.33", + "picomatch": "^4.0.2", + "which": "^4.0.0", + "yocto-spinner": "^1.1.0" + }, + "bin": { + "dotenvx": "src/cli/dotenvx.js" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/@dotenvx/dotenvx/node_modules/commander": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/commander/-/commander-11.1.0.tgz", + "integrity": "sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==", + "license": "MIT", + "engines": { + "node": ">=16" + } + }, + "node_modules/@dotenvx/dotenvx/node_modules/execa": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/@dotenvx/dotenvx/node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@dotenvx/dotenvx/node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/@dotenvx/dotenvx/node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@dotenvx/dotenvx/node_modules/isexe": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-3.1.5.tgz", + "integrity": "sha512-6B3tLtFqtQS4ekarvLVMZ+X+VlvQekbe4taUkf/rhVO3d/h0M2rfARm/pXLcPEsjjMsFgrFgSrhQIxcSVrBz8w==", + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/@dotenvx/dotenvx/node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@dotenvx/dotenvx/node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@dotenvx/dotenvx/node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "license": "ISC" + }, + "node_modules/@dotenvx/dotenvx/node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/@dotenvx/dotenvx/node_modules/which": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/which/-/which-4.0.0.tgz", + "integrity": "sha512-GlaYyEb07DPxYCKhKzplCWBJtvxZcZMrL+4UkrTSJHHPyZU4mYYTv3qaOe77H7EODLSSopAUFAc6W8U4yqvscg==", + "license": "ISC", + "dependencies": { + "isexe": "^3.1.1" + }, + "bin": { + "node-which": "bin/which.js" + }, + "engines": { + "node": "^16.13.0 || >=18.0.0" + } + }, + "node_modules/@ecies/ciphers": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/@ecies/ciphers/-/ciphers-0.2.6.tgz", + "integrity": "sha512-patgsRPKGkhhoBjETV4XxD0En4ui5fbX0hzayqI3M8tvNMGUoUvmyYAIWwlxBc1KX5cturfqByYdj5bYGRpN9g==", + "license": "MIT", + "engines": { + "bun": ">=1", + "deno": ">=2.7.10", + "node": ">=16" + }, + "peerDependencies": { + "@noble/ciphers": "^1.0.0" + } + }, + "node_modules/@emnapi/core": { + "version": "1.9.2", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.9.2.tgz", + "integrity": "sha512-UC+ZhH3XtczQYfOlu3lNEkdW/p4dsJ1r/bP7H8+rhao3TTTMO1ATq/4DdIi23XuGoFY+Cz0JmCbdVl0hz9jZcA==", + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/wasi-threads": "1.2.1", + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/runtime": { + "version": "1.9.2", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.9.2.tgz", + "integrity": "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw==", + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@emnapi/wasi-threads": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.2.1.tgz", + "integrity": "sha512-uTII7OYF+/Mes/MrcIOYp5yOtSMLBWSIoLPpcgwipoiKbli6k322tcoFsxoIIxPDqW01SQGAgko4EzZi2BNv2w==", + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/config-array": { + "version": "0.21.2", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.2.tgz", + "integrity": "sha512-nJl2KGTlrf9GjLimgIru+V/mzgSK0ABCDQRvxw5BjURL7WfH5uoWmizbH7QB6MmnMBd8cIC9uceWnezL1VZWWw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/object-schema": "^2.1.7", + "debug": "^4.3.1", + "minimatch": "^3.1.5" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/config-helpers": { + "version": "0.4.2", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.4.2.tgz", + "integrity": "sha512-gBrxN88gOIf3R7ja5K9slwNayVcZgK6SOUORm2uBzTeIEfeVaIhOpCtTox3P6R7o2jLFwLFTLnC7kU/RGcYEgw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/core": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.17.0.tgz", + "integrity": "sha512-yL/sLrpmtDaFEiUj1osRP4TI2MDz1AddJL+jZ7KSqvBuliN4xqYY54IfdN8qD8Toa6g1iloph1fxQNkjOxrrpQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@types/json-schema": "^7.0.15" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "3.3.5", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-3.3.5.tgz", + "integrity": "sha512-4IlJx0X0qftVsN5E+/vGujTRIFtwuLbNsVUe7TO6zYPDR1O6nFwvwhIKEKSrl6dZchmYBITazxKoUYOjdtjlRg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.14.0", + "debug": "^4.3.2", + "espree": "^10.0.1", + "globals": "^14.0.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.1", + "minimatch": "^3.1.5", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/globals": { + "version": "14.0.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", + "integrity": "sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@eslint/js": { + "version": "9.39.4", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.39.4.tgz", + "integrity": "sha512-nE7DEIchvtiFTwBw4Lfbu59PG+kCofhjsKaCWzxTpt4lfRjRMqG6uMBzKXuEcyXhOHoUp9riAm7/aWYGhXZ9cw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + } + }, + "node_modules/@eslint/object-schema": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/@eslint/object-schema/-/object-schema-2.1.7.tgz", + "integrity": "sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@eslint/plugin-kit": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.4.1.tgz", + "integrity": "sha512-43/qtrDUokr7LJqoF2c3+RInu/t4zfrpYdoSDfYyhg52rwLV6TnOvdG4fXm7IkSB3wErkcmJS9iEhjVtOSEjjA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@eslint/core": "^0.17.0", + "levn": "^0.4.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + } + }, + "node_modules/@floating-ui/core": { + "version": "1.7.5", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.7.5.tgz", + "integrity": "sha512-1Ih4WTWyw0+lKyFMcBHGbb5U5FtuHJuujoyyr5zTaWS5EYMeT6Jb2AuDeftsCsEuchO+mM2ij5+q9crhydzLhQ==", + "license": "MIT", + "dependencies": { + "@floating-ui/utils": "^0.2.11" + } + }, + "node_modules/@floating-ui/dom": { + "version": "1.7.6", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.7.6.tgz", + "integrity": "sha512-9gZSAI5XM36880PPMm//9dfiEngYoC6Am2izES1FF406YFsjvyBMmeJ2g4SAju3xWwtuynNRFL2s9hgxpLI5SQ==", + "license": "MIT", + "dependencies": { + "@floating-ui/core": "^1.7.5", + "@floating-ui/utils": "^0.2.11" + } + }, + "node_modules/@floating-ui/react-dom": { + "version": "2.1.8", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.1.8.tgz", + "integrity": "sha512-cC52bHwM/n/CxS87FH0yWdngEZrjdtLW/qVruo68qg+prK7ZQ4YGdut2GyDVpoGeAYe/h899rVeOVm6Oi40k2A==", + "license": "MIT", + "dependencies": { + "@floating-ui/dom": "^1.7.6" + }, + "peerDependencies": { + "react": ">=16.8.0", + "react-dom": ">=16.8.0" + } + }, + "node_modules/@floating-ui/utils": { + "version": "0.2.11", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.11.tgz", + "integrity": "sha512-RiB/yIh78pcIxl6lLMG0CgBXAZ2Y0eVHqMPYugu+9U0AeT6YBeiJpf7lbdJNIugFP5SIjwNRgo4DhR1Qxi26Gg==", + "license": "MIT" + }, + "node_modules/@fontsource-variable/geist": { + "version": "5.2.8", + "resolved": "https://registry.npmjs.org/@fontsource-variable/geist/-/geist-5.2.8.tgz", + "integrity": "sha512-cJ6m9e+8MQ5dCYJsLylfZrgBh6KkG4bOLckB35Tr9J/EqdkEM6QllH5PxqP1dhTvFup+HtMRPuz9xOjxXJggxw==", + "license": "OFL-1.1", + "funding": { + "url": "https://github.com/sponsors/ayuhito" + } + }, + "node_modules/@hono/node-server": { + "version": "1.19.13", + "resolved": "https://registry.npmjs.org/@hono/node-server/-/node-server-1.19.13.tgz", + "integrity": "sha512-TsQLe4i2gvoTtrHje625ngThGBySOgSK3Xo2XRYOdqGN1teR8+I7vchQC46uLJi8OF62YTYA3AhSpumtkhsaKQ==", + "license": "MIT", + "engines": { + "node": ">=18.14.1" + }, + "peerDependencies": { + "hono": "^4" + } + }, + "node_modules/@humanfs/core": { + "version": "0.19.1", + "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", + "integrity": "sha512-5DyQ4+1JEUzejeK1JGICcideyfUbGixgS9jNgex5nqkW+cY7WZhxBigmieN5Qnw9ZosSNVC9KQKyb+GUaGyKUA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanfs/node": { + "version": "0.16.7", + "resolved": "https://registry.npmjs.org/@humanfs/node/-/node-0.16.7.tgz", + "integrity": "sha512-/zUx+yOsIrG4Y43Eh2peDeKCxlRt/gET6aHfaKpuq267qXdYDFViVHfMaLyygZOnl0kGWxFIgsBy8QFuTLUXEQ==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanfs/core": "^0.19.1", + "@humanwhocodes/retry": "^0.4.0" + }, + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/retry": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/retry/-/retry-0.4.3.tgz", + "integrity": "sha512-bV0Tgo9K4hfPCek+aMAn81RppFKv2ySDQeMoSZuvTASywNTnVJCArCZE2FWqpvIatKu7VMRLWlR1EazvVhDyhQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18.18" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@inquirer/ansi": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/@inquirer/ansi/-/ansi-1.0.2.tgz", + "integrity": "sha512-S8qNSZiYzFd0wAcyG5AXCvUHC5Sr7xpZ9wZ2py9XR88jUz8wooStVx5M6dRzczbBWjic9NP7+rY0Xi7qqK/aMQ==", + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/@inquirer/confirm": { + "version": "5.1.21", + "resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-5.1.21.tgz", + "integrity": "sha512-KR8edRkIsUayMXV+o3Gv+q4jlhENF9nMYUZs9PA2HzrXeHI8M5uDag70U7RJn9yyiMZSbtF5/UexBtAVtZGSbQ==", + "license": "MIT", + "dependencies": { + "@inquirer/core": "^10.3.2", + "@inquirer/type": "^3.0.10" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/core": { + "version": "10.3.2", + "resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.3.2.tgz", + "integrity": "sha512-43RTuEbfP8MbKzedNqBrlhhNKVwoK//vUFNW3Q3vZ88BLcrs4kYpGg+B2mm5p2K/HfygoCxuKwJJiv8PbGmE0A==", + "license": "MIT", + "dependencies": { + "@inquirer/ansi": "^1.0.2", + "@inquirer/figures": "^1.0.15", + "@inquirer/type": "^3.0.10", + "cli-width": "^4.1.0", + "mute-stream": "^2.0.0", + "signal-exit": "^4.1.0", + "wrap-ansi": "^6.2.0", + "yoctocolors-cjs": "^2.1.3" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@inquirer/figures": { + "version": "1.0.15", + "resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.15.tgz", + "integrity": "sha512-t2IEY+unGHOzAaVM5Xx6DEWKeXlDDcNPeDyUpsRc6CUhBfU3VQOEl+Vssh7VNp1dR8MdUJBWhuObjXCsVpjN5g==", + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/@inquirer/type": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@inquirer/type/-/type-3.0.10.tgz", + "integrity": "sha512-BvziSRxfz5Ov8ch0z/n3oijRSEcEsHnhggm4xFZe93DHcUCTlutlq9Ox4SVENAfcRD22UQq7T/atg9Wr3k09eA==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@types/node": ">=18" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + } + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@modelcontextprotocol/sdk": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.29.0.tgz", + "integrity": "sha512-zo37mZA9hJWpULgkRpowewez1y6ML5GsXJPY8FI0tBBCd77HEvza4jDqRKOXgHNn867PVGCyTdzqpz0izu5ZjQ==", + "license": "MIT", + "dependencies": { + "@hono/node-server": "^1.19.9", + "ajv": "^8.17.1", + "ajv-formats": "^3.0.1", + "content-type": "^1.0.5", + "cors": "^2.8.5", + "cross-spawn": "^7.0.5", + "eventsource": "^3.0.2", + "eventsource-parser": "^3.0.0", + "express": "^5.2.1", + "express-rate-limit": "^8.2.1", + "hono": "^4.11.4", + "jose": "^6.1.3", + "json-schema-typed": "^8.0.2", + "pkce-challenge": "^5.0.0", + "raw-body": "^3.0.0", + "zod": "^3.25 || ^4.0", + "zod-to-json-schema": "^3.25.1" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@cfworker/json-schema": "^4.1.1", + "zod": "^3.25 || ^4.0" + }, + "peerDependenciesMeta": { + "@cfworker/json-schema": { + "optional": true + }, + "zod": { + "optional": false + } + } + }, + "node_modules/@modelcontextprotocol/sdk/node_modules/ajv": { + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.18.0.tgz", + "integrity": "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/@modelcontextprotocol/sdk/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "license": "MIT" + }, + "node_modules/@mswjs/interceptors": { + "version": "0.41.3", + "resolved": "https://registry.npmjs.org/@mswjs/interceptors/-/interceptors-0.41.3.tgz", + "integrity": "sha512-cXu86tF4VQVfwz8W1SPbhoRyHJkti6mjH/XJIxp40jhO4j2k1m4KYrEykxqWPkFF3vrK4rgQppBh//AwyGSXPA==", + "license": "MIT", + "dependencies": { + "@open-draft/deferred-promise": "^2.2.0", + "@open-draft/logger": "^0.3.0", + "@open-draft/until": "^2.0.0", + "is-node-process": "^1.2.0", + "outvariant": "^1.4.3", + "strict-event-emitter": "^0.5.1" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@napi-rs/wasm-runtime": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@napi-rs/wasm-runtime/-/wasm-runtime-1.1.3.tgz", + "integrity": "sha512-xK9sGVbJWYb08+mTJt3/YV24WxvxpXcXtP6B172paPZ+Ts69Re9dAr7lKwJoeIx8OoeuimEiRZ7umkiUVClmmQ==", + "license": "MIT", + "optional": true, + "dependencies": { + "@tybys/wasm-util": "^0.10.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Brooooooklyn" + }, + "peerDependencies": { + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1" + } + }, + "node_modules/@noble/ciphers": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@noble/ciphers/-/ciphers-1.3.0.tgz", + "integrity": "sha512-2I0gnIVPtfnMw9ee9h1dJG7tp81+8Ob3OJb3Mv37rx5L40/b0i7djjCVvGOVqc9AEIQyvyu1i6ypKdFw8R8gQw==", + "license": "MIT", + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@noble/curves": { + "version": "1.9.7", + "resolved": "https://registry.npmjs.org/@noble/curves/-/curves-1.9.7.tgz", + "integrity": "sha512-gbKGcRUYIjA3/zCCNaWDciTMFI0dCkvou3TL8Zmy5Nc7sJ47a0jtOeZoTaMxkuqRo9cRhjOdZJXegxYE5FN/xw==", + "license": "MIT", + "dependencies": { + "@noble/hashes": "1.8.0" + }, + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@noble/hashes": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz", + "integrity": "sha512-jCs9ldd7NwzpgXDIf6P3+NrHh9/sD6CQdxHyjQI+h/6rDNo88ypBxxz45UDuZHz9r3tNz7N/VInSVoVdtXEI4A==", + "license": "MIT", + "engines": { + "node": "^14.21.3 || >=16" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@open-draft/deferred-promise": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@open-draft/deferred-promise/-/deferred-promise-2.2.0.tgz", + "integrity": "sha512-CecwLWx3rhxVQF6V4bAgPS5t+So2sTbPgAzafKkVizyi7tlwpcFpdFqq+wqF2OwNBmqFuu6tOyouTuxgpMfzmA==", + "license": "MIT" + }, + "node_modules/@open-draft/logger": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@open-draft/logger/-/logger-0.3.0.tgz", + "integrity": "sha512-X2g45fzhxH238HKO4xbSr7+wBS8Fvw6ixhTDuvLd5mqh6bJJCFAPwU9mPDxbcrRtfxv4u5IHCEH77BmxvXmmxQ==", + "license": "MIT", + "dependencies": { + "is-node-process": "^1.2.0", + "outvariant": "^1.4.0" + } + }, + "node_modules/@open-draft/until": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@open-draft/until/-/until-2.1.0.tgz", + "integrity": "sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==", + "license": "MIT" + }, + "node_modules/@oxc-project/types": { + "version": "0.124.0", + "resolved": "https://registry.npmjs.org/@oxc-project/types/-/types-0.124.0.tgz", + "integrity": "sha512-VBFWMTBvHxS11Z5Lvlr3IWgrwhMTXV+Md+EQF0Xf60+wAdsGFTBx7X7K/hP4pi8N7dcm1RvcHwDxZ16Qx8keUg==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/Boshen" + } + }, + "node_modules/@rolldown/binding-android-arm64": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-android-arm64/-/binding-android-arm64-1.0.0-rc.15.tgz", + "integrity": "sha512-YYe6aWruPZDtHNpwu7+qAHEMbQ/yRl6atqb/AhznLTnD3UY99Q1jE7ihLSahNWkF4EqRPVC4SiR4O0UkLK02tA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-darwin-arm64": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-darwin-arm64/-/binding-darwin-arm64-1.0.0-rc.15.tgz", + "integrity": "sha512-oArR/ig8wNTPYsXL+Mzhs0oxhxfuHRfG7Ikw7jXsw8mYOtk71W0OkF2VEVh699pdmzjPQsTjlD1JIOoHkLP1Fg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-darwin-x64": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-darwin-x64/-/binding-darwin-x64-1.0.0-rc.15.tgz", + "integrity": "sha512-YzeVqOqjPYvUbJSWJ4EDL8ahbmsIXQpgL3JVipmN+MX0XnXMeWomLN3Fb+nwCmP/jfyqte5I3XRSm7OfQrbyxw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-freebsd-x64": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-freebsd-x64/-/binding-freebsd-x64-1.0.0-rc.15.tgz", + "integrity": "sha512-9Erhx956jeQ0nNTyif1+QWAXDRD38ZNjr//bSHrt6wDwB+QkAfl2q6Mn1k6OBPerznjRmbM10lgRb1Pli4xZPw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-arm-gnueabihf": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm-gnueabihf/-/binding-linux-arm-gnueabihf-1.0.0-rc.15.tgz", + "integrity": "sha512-cVwk0w8QbZJGTnP/AHQBs5yNwmpgGYStL88t4UIaqcvYJWBfS0s3oqVLZPwsPU6M0zlW4GqjP0Zq5MnAGwFeGA==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-arm64-gnu": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-1.0.0-rc.15.tgz", + "integrity": "sha512-eBZ/u8iAK9SoHGanqe/jrPnY0JvBN6iXbVOsbO38mbz+ZJsaobExAm1Iu+rxa4S1l2FjG0qEZn4Rc6X8n+9M+w==", + "cpu": [ + "arm64" + ], + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-arm64-musl": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-arm64-musl/-/binding-linux-arm64-musl-1.0.0-rc.15.tgz", + "integrity": "sha512-ZvRYMGrAklV9PEkgt4LQM6MjQX2P58HPAuecwYObY2DhS2t35R0I810bKi0wmaYORt6m/2Sm+Z+nFgb0WhXNcQ==", + "cpu": [ + "arm64" + ], + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-ppc64-gnu": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-ppc64-gnu/-/binding-linux-ppc64-gnu-1.0.0-rc.15.tgz", + "integrity": "sha512-VDpgGBzgfg5hLg+uBpCLoFG5kVvEyafmfxGUV0UHLcL5irxAK7PKNeC2MwClgk6ZAiNhmo9FLhRYgvMmedLtnQ==", + "cpu": [ + "ppc64" + ], + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-s390x-gnu": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-s390x-gnu/-/binding-linux-s390x-gnu-1.0.0-rc.15.tgz", + "integrity": "sha512-y1uXY3qQWCzcPgRJATPSOUP4tCemh4uBdY7e3EZbVwCJTY3gLJWnQABgeUetvED+bt1FQ01OeZwvhLS2bpNrAQ==", + "cpu": [ + "s390x" + ], + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-x64-gnu": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-x64-gnu/-/binding-linux-x64-gnu-1.0.0-rc.15.tgz", + "integrity": "sha512-023bTPBod7J3Y/4fzAN6QtpkSABR0rigtrwaP+qSEabUh5zf6ELr9Nc7GujaROuPY3uwdSIXWrvhn1KxOvurWA==", + "cpu": [ + "x64" + ], + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-linux-x64-musl": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-linux-x64-musl/-/binding-linux-x64-musl-1.0.0-rc.15.tgz", + "integrity": "sha512-witB2O0/hU4CgfOOKUoeFgQ4GktPi1eEbAhaLAIpgD6+ZnhcPkUtPsoKKHRzmOoWPZue46IThdSgdo4XneOLYw==", + "cpu": [ + "x64" + ], + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-openharmony-arm64": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-openharmony-arm64/-/binding-openharmony-arm64-1.0.0-rc.15.tgz", + "integrity": "sha512-UCL68NJ0Ud5zRipXZE9dF5PmirzJE4E4BCIOOssEnM7wLDsxjc6Qb0sGDxTNRTP53I6MZpygyCpY8Aa8sPfKPg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-wasm32-wasi": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-wasm32-wasi/-/binding-wasm32-wasi-1.0.0-rc.15.tgz", + "integrity": "sha512-ApLruZq/ig+nhaE7OJm4lDjayUnOHVUa77zGeqnqZ9pn0ovdVbbNPerVibLXDmWeUZXjIYIT8V3xkT58Rm9u5Q==", + "cpu": [ + "wasm32" + ], + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "1.9.2", + "@emnapi/runtime": "1.9.2", + "@napi-rs/wasm-runtime": "^1.1.3" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@rolldown/binding-win32-arm64-msvc": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-1.0.0-rc.15.tgz", + "integrity": "sha512-KmoUoU7HnN+Si5YWJigfTws1jz1bKBYDQKdbLspz0UaqjjFkddHsqorgiW1mxcAj88lYUE6NC/zJNwT+SloqtA==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/binding-win32-x64-msvc": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/binding-win32-x64-msvc/-/binding-win32-x64-msvc-1.0.0-rc.15.tgz", + "integrity": "sha512-3P2A8L+x75qavWLe/Dll3EYBJLQmtkJN8rfh+U/eR3MqMgL/h98PhYI+JFfXuDPgPeCB7iZAKiqii5vqOvnA0g==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^20.19.0 || >=22.12.0" + } + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-rc.7", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-rc.7.tgz", + "integrity": "sha512-qujRfC8sFVInYSPPMLQByRh7zhwkGFS4+tyMQ83srV1qrxL4g8E2tyxVVyxd0+8QeBM1mIk9KbWxkegRr76XzA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@sec-ant/readable-stream": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/@sec-ant/readable-stream/-/readable-stream-0.4.1.tgz", + "integrity": "sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==", + "license": "MIT" + }, + "node_modules/@sindresorhus/merge-streams": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/merge-streams/-/merge-streams-4.0.0.tgz", + "integrity": "sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@supabase/auth-js": { + "version": "2.103.0", + "resolved": "https://registry.npmjs.org/@supabase/auth-js/-/auth-js-2.103.0.tgz", + "integrity": "sha512-6zAanO6c+6gpHOlt5Lb9TlBBkJdZiUWkWCJKAxzkywBDcwaHlLJKXnjQGX6GyVCyKRR1e7sTq4re/yRTH6U/9A==", + "license": "MIT", + "dependencies": { + "tslib": "2.8.1" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@supabase/functions-js": { + "version": "2.103.0", + "resolved": "https://registry.npmjs.org/@supabase/functions-js/-/functions-js-2.103.0.tgz", + "integrity": "sha512-YrneV2NjskUkkmkZ2Jt2n3elBgbWzV4Y1M9MM370z2Zd5ZPFqFbY8KIoPwuNjtAGE9YrpKBxnbZqeF07BiN9Og==", + "license": "MIT", + "dependencies": { + "tslib": "2.8.1" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@supabase/phoenix": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/@supabase/phoenix/-/phoenix-0.4.0.tgz", + "integrity": "sha512-RHSx8bHS02xwfHdAbX5Lpbo6PXbgyf7lTaXTlwtFDPwOIw64NnVRwFAXGojHhjtVYI+PEPNSWwkL90f4agN3bw==", + "license": "MIT" + }, + "node_modules/@supabase/postgrest-js": { + "version": "2.103.0", + "resolved": "https://registry.npmjs.org/@supabase/postgrest-js/-/postgrest-js-2.103.0.tgz", + "integrity": "sha512-rC3sRxYdPZymkp2CZR1MiNQgbOleD01bGsW8VxEKRR5nMkLZ1NgAS1QTQf78Wh30czFyk505ZYr9Od8/mWT2TA==", + "license": "MIT", + "dependencies": { + "tslib": "2.8.1" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@supabase/realtime-js": { + "version": "2.103.0", + "resolved": "https://registry.npmjs.org/@supabase/realtime-js/-/realtime-js-2.103.0.tgz", + "integrity": "sha512-gcPtXzZ6izyyBVf2of7K3dEt8CScPJn8VcSlQq6oWL9QoE1kqfQl0oFrOMHd5qrcADewxI7OxxosLB8W4XqtIQ==", + "license": "MIT", + "dependencies": { + "@supabase/phoenix": "^0.4.0", + "@types/ws": "^8.18.1", + "tslib": "2.8.1", + "ws": "^8.18.2" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@supabase/storage-js": { + "version": "2.103.0", + "resolved": "https://registry.npmjs.org/@supabase/storage-js/-/storage-js-2.103.0.tgz", + "integrity": "sha512-DHmlvdAXwtOmZNbkIZi4lkobPR3XjIzoOgzoz5duMf6G+sDeY015YrzMJCnqdccuYr7X5x4yYuSwF//RoN2dvQ==", + "license": "MIT", + "dependencies": { + "iceberg-js": "^0.8.1", + "tslib": "2.8.1" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@supabase/supabase-js": { + "version": "2.103.0", + "resolved": "https://registry.npmjs.org/@supabase/supabase-js/-/supabase-js-2.103.0.tgz", + "integrity": "sha512-j/6q5+LtXbR/YOLSLhy7Na74RD1cV2v+KwIIuuqMEjk1JpLEEyu0ynwDHpGoxMncDQl+R5FogaVqZm+85lZvtw==", + "license": "MIT", + "dependencies": { + "@supabase/auth-js": "2.103.0", + "@supabase/functions-js": "2.103.0", + "@supabase/postgrest-js": "2.103.0", + "@supabase/realtime-js": "2.103.0", + "@supabase/storage-js": "2.103.0" + }, + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/@tailwindcss/node": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.2.2.tgz", + "integrity": "sha512-pXS+wJ2gZpVXqFaUEjojq7jzMpTGf8rU6ipJz5ovJV6PUGmlJ+jvIwGrzdHdQ80Sg+wmQxUFuoW1UAAwHNEdFA==", + "license": "MIT", + "dependencies": { + "@jridgewell/remapping": "^2.3.5", + "enhanced-resolve": "^5.19.0", + "jiti": "^2.6.1", + "lightningcss": "1.32.0", + "magic-string": "^0.30.21", + "source-map-js": "^1.2.1", + "tailwindcss": "4.2.2" + } + }, + "node_modules/@tailwindcss/oxide": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.2.2.tgz", + "integrity": "sha512-qEUA07+E5kehxYp9BVMpq9E8vnJuBHfJEC0vPC5e7iL/hw7HR61aDKoVoKzrG+QKp56vhNZe4qwkRmMC0zDLvg==", + "license": "MIT", + "engines": { + "node": ">= 20" + }, + "optionalDependencies": { + "@tailwindcss/oxide-android-arm64": "4.2.2", + "@tailwindcss/oxide-darwin-arm64": "4.2.2", + "@tailwindcss/oxide-darwin-x64": "4.2.2", + "@tailwindcss/oxide-freebsd-x64": "4.2.2", + "@tailwindcss/oxide-linux-arm-gnueabihf": "4.2.2", + "@tailwindcss/oxide-linux-arm64-gnu": "4.2.2", + "@tailwindcss/oxide-linux-arm64-musl": "4.2.2", + "@tailwindcss/oxide-linux-x64-gnu": "4.2.2", + "@tailwindcss/oxide-linux-x64-musl": "4.2.2", + "@tailwindcss/oxide-wasm32-wasi": "4.2.2", + "@tailwindcss/oxide-win32-arm64-msvc": "4.2.2", + "@tailwindcss/oxide-win32-x64-msvc": "4.2.2" + } + }, + "node_modules/@tailwindcss/oxide-android-arm64": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.2.2.tgz", + "integrity": "sha512-dXGR1n+P3B6748jZO/SvHZq7qBOqqzQ+yFrXpoOWWALWndF9MoSKAT3Q0fYgAzYzGhxNYOoysRvYlpixRBBoDg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-darwin-arm64": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.2.2.tgz", + "integrity": "sha512-iq9Qjr6knfMpZHj55/37ouZeykwbDqF21gPFtfnhCCKGDcPI/21FKC9XdMO/XyBM7qKORx6UIhGgg6jLl7BZlg==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-darwin-x64": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.2.2.tgz", + "integrity": "sha512-BlR+2c3nzc8f2G639LpL89YY4bdcIdUmiOOkv2GQv4/4M0vJlpXEa0JXNHhCHU7VWOKWT/CjqHdTP8aUuDJkuw==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-freebsd-x64": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.2.2.tgz", + "integrity": "sha512-YUqUgrGMSu2CDO82hzlQ5qSb5xmx3RUrke/QgnoEx7KvmRJHQuZHZmZTLSuuHwFf0DJPybFMXMYf+WJdxHy/nQ==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.2.2.tgz", + "integrity": "sha512-FPdhvsW6g06T9BWT0qTwiVZYE2WIFo2dY5aCSpjG/S/u1tby+wXoslXS0kl3/KXnULlLr1E3NPRRw0g7t2kgaQ==", + "cpu": [ + "arm" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-gnu": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.2.2.tgz", + "integrity": "sha512-4og1V+ftEPXGttOO7eCmW7VICmzzJWgMx+QXAJRAhjrSjumCwWqMfkDrNu1LXEQzNAwz28NCUpucgQPrR4S2yw==", + "cpu": [ + "arm64" + ], + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-linux-arm64-musl": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.2.2.tgz", + "integrity": "sha512-oCfG/mS+/+XRlwNjnsNLVwnMWYH7tn/kYPsNPh+JSOMlnt93mYNCKHYzylRhI51X+TbR+ufNhhKKzm6QkqX8ag==", + "cpu": [ + "arm64" + ], + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-gnu": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.2.2.tgz", + "integrity": "sha512-rTAGAkDgqbXHNp/xW0iugLVmX62wOp2PoE39BTCGKjv3Iocf6AFbRP/wZT/kuCxC9QBh9Pu8XPkv/zCZB2mcMg==", + "cpu": [ + "x64" + ], + "libc": [ + "glibc" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-linux-x64-musl": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.2.2.tgz", + "integrity": "sha512-XW3t3qwbIwiSyRCggeO2zxe3KWaEbM0/kW9e8+0XpBgyKU4ATYzcVSMKteZJ1iukJ3HgHBjbg9P5YPRCVUxlnQ==", + "cpu": [ + "x64" + ], + "libc": [ + "musl" + ], + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.2.2.tgz", + "integrity": "sha512-eKSztKsmEsn1O5lJ4ZAfyn41NfG7vzCg496YiGtMDV86jz1q/irhms5O0VrY6ZwTUkFy/EKG3RfWgxSI3VbZ8Q==", + "bundleDependencies": [ + "@napi-rs/wasm-runtime", + "@emnapi/core", + "@emnapi/runtime", + "@tybys/wasm-util", + "@emnapi/wasi-threads", + "tslib" + ], + "cpu": [ + "wasm32" + ], + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.8.1", + "@emnapi/runtime": "^1.8.1", + "@emnapi/wasi-threads": "^1.1.0", + "@napi-rs/wasm-runtime": "^1.1.1", + "@tybys/wasm-util": "^0.10.1", + "tslib": "^2.8.1" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.2.2.tgz", + "integrity": "sha512-qPmaQM4iKu5mxpsrWZMOZRgZv1tOZpUm+zdhhQP0VhJfyGGO3aUKdbh3gDZc/dPLQwW4eSqWGrrcWNBZWUWaXQ==", + "cpu": [ + "arm64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/oxide-win32-x64-msvc": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.2.2.tgz", + "integrity": "sha512-1T/37VvI7WyH66b+vqHj/cLwnCxt7Qt3WFu5Q8hk65aOvlwAhs7rAp1VkulBJw/N4tMirXjVnylTR72uI0HGcA==", + "cpu": [ + "x64" + ], + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 20" + } + }, + "node_modules/@tailwindcss/vite": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/@tailwindcss/vite/-/vite-4.2.2.tgz", + "integrity": "sha512-mEiF5HO1QqCLXoNEfXVA1Tzo+cYsrqV7w9Juj2wdUFyW07JRenqMG225MvPwr3ZD9N1bFQj46X7r33iHxLUW0w==", + "license": "MIT", + "dependencies": { + "@tailwindcss/node": "4.2.2", + "@tailwindcss/oxide": "4.2.2", + "tailwindcss": "4.2.2" + }, + "peerDependencies": { + "vite": "^5.2.0 || ^6 || ^7 || ^8" + } + }, + "node_modules/@ts-morph/common": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/@ts-morph/common/-/common-0.27.0.tgz", + "integrity": "sha512-Wf29UqxWDpc+i61k3oIOzcUfQt79PIT9y/MWfAGlrkjg6lBC1hwDECLXPVJAhWjiGbfBCxZd65F/LIZF3+jeJQ==", + "license": "MIT", + "dependencies": { + "fast-glob": "^3.3.3", + "minimatch": "^10.0.1", + "path-browserify": "^1.0.1" + } + }, + "node_modules/@ts-morph/common/node_modules/balanced-match": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", + "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==", + "license": "MIT", + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/@ts-morph/common/node_modules/brace-expansion": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.5.tgz", + "integrity": "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==", + "license": "MIT", + "dependencies": { + "balanced-match": "^4.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/@ts-morph/common/node_modules/minimatch": { + "version": "10.2.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.5.tgz", + "integrity": "sha512-MULkVLfKGYDFYejP07QOurDLLQpcjk7Fw+7jXS2R2czRQzR56yHRveU5NDJEOviH+hETZKSkIk5c+T23GjFUMg==", + "license": "BlueOak-1.0.0", + "dependencies": { + "brace-expansion": "^5.0.5" + }, + "engines": { + "node": "18 || 20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@tybys/wasm-util": { + "version": "0.10.1", + "resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.1.tgz", + "integrity": "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==", + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "24.12.2", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.12.2.tgz", + "integrity": "sha512-A1sre26ke7HDIuY/M23nd9gfB+nrmhtYyMINbjI1zHJxYteKR6qSMX56FsmjMcDb3SMcjJg5BiRRgOCC/yBD0g==", + "license": "MIT", + "dependencies": { + "undici-types": "~7.16.0" + } + }, + "node_modules/@types/react": { + "version": "19.2.14", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz", + "integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "19.2.3", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz", + "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@types/react": "^19.2.0" + } + }, + "node_modules/@types/statuses": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@types/statuses/-/statuses-2.0.6.tgz", + "integrity": "sha512-xMAgYwceFhRA2zY+XbEA7mxYbA093wdiW8Vu6gZPGWy9cmOyU9XesH1tNcEWsKFd5Vzrqx5T3D38PWx1FIIXkA==", + "license": "MIT" + }, + "node_modules/@types/validate-npm-package-name": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/validate-npm-package-name/-/validate-npm-package-name-4.0.2.tgz", + "integrity": "sha512-lrpDziQipxCEeK5kWxvljWYhUvOiB2A9izZd9B2AFarYAkqZshb4lPbRs7zKEic6eGtH8V/2qJW+dPp9OtF6bw==", + "license": "MIT" + }, + "node_modules/@types/ws": { + "version": "8.18.1", + "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz", + "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==", + "license": "MIT", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "8.58.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.58.1.tgz", + "integrity": "sha512-eSkwoemjo76bdXl2MYqtxg51HNwUSkWfODUOQ3PaTLZGh9uIWWFZIjyjaJnex7wXDu+TRx+ATsnSxdN9YWfRTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.12.2", + "@typescript-eslint/scope-manager": "8.58.1", + "@typescript-eslint/type-utils": "8.58.1", + "@typescript-eslint/utils": "8.58.1", + "@typescript-eslint/visitor-keys": "8.58.1", + "ignore": "^7.0.5", + "natural-compare": "^1.4.0", + "ts-api-utils": "^2.5.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^8.58.1", + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.1.0" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "8.58.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.58.1.tgz", + "integrity": "sha512-gGkiNMPqerb2cJSVcruigx9eHBlLG14fSdPdqMoOcBfh+vvn4iCq2C8MzUB89PrxOXk0y3GZ1yIWb9aOzL93bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/scope-manager": "8.58.1", + "@typescript-eslint/types": "8.58.1", + "@typescript-eslint/typescript-estree": "8.58.1", + "@typescript-eslint/visitor-keys": "8.58.1", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.1.0" + } + }, + "node_modules/@typescript-eslint/project-service": { + "version": "8.58.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.58.1.tgz", + "integrity": "sha512-gfQ8fk6cxhtptek+/8ZIqw8YrRW5048Gug8Ts5IYcMLCw18iUgrZAEY/D7s4hkI0FxEfGakKuPK/XUMPzPxi5g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/tsconfig-utils": "^8.58.1", + "@typescript-eslint/types": "^8.58.1", + "debug": "^4.4.3" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.1.0" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "8.58.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.58.1.tgz", + "integrity": "sha512-TPYUEqJK6avLcEjumWsIuTpuYODTTDAtoMdt8ZZa93uWMTX13Nb8L5leSje1NluammvU+oI3QRr5lLXPgihX3w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.58.1", + "@typescript-eslint/visitor-keys": "8.58.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/tsconfig-utils": { + "version": "8.58.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.58.1.tgz", + "integrity": "sha512-JAr2hOIct2Q+qk3G+8YFfqkqi7sC86uNryT+2i5HzMa2MPjw4qNFvtjnw1IiA1rP7QhNKVe21mSSLaSjwA1Olw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.1.0" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "8.58.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.58.1.tgz", + "integrity": "sha512-HUFxvTJVroT+0rXVJC7eD5zol6ID+Sn5npVPWoFuHGg9Ncq5Q4EYstqR+UOqaNRFXi5TYkpXXkLhoCHe3G0+7w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.58.1", + "@typescript-eslint/typescript-estree": "8.58.1", + "@typescript-eslint/utils": "8.58.1", + "debug": "^4.4.3", + "ts-api-utils": "^2.5.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.1.0" + } + }, + "node_modules/@typescript-eslint/types": { + "version": "8.58.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.58.1.tgz", + "integrity": "sha512-io/dV5Aw5ezwzfPBBWLoT+5QfVtP8O7q4Kftjn5azJ88bYyp/ZMCsyW1lpKK46EXJcaYMZ1JtYj+s/7TdzmQMw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "8.58.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.58.1.tgz", + "integrity": "sha512-w4w7WR7GHOjqqPnvAYbazq+Y5oS68b9CzasGtnd6jIeOIeKUzYzupGTB2T4LTPSv4d+WPeccbxuneTFHYgAAWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/project-service": "8.58.1", + "@typescript-eslint/tsconfig-utils": "8.58.1", + "@typescript-eslint/types": "8.58.1", + "@typescript-eslint/visitor-keys": "8.58.1", + "debug": "^4.4.3", + "minimatch": "^10.2.2", + "semver": "^7.7.3", + "tinyglobby": "^0.2.15", + "ts-api-utils": "^2.5.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "typescript": ">=4.8.4 <6.1.0" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/balanced-match": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", + "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { + "version": "5.0.5", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.5.tgz", + "integrity": "sha512-VZznLgtwhn+Mact9tfiwx64fA9erHH/MCXEUfB/0bX/6Fz6ny5EGTXYltMocqg4xFAQZtnO3DHWWXi8RiuN7cQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^4.0.2" + }, + "engines": { + "node": "18 || 20 || >=22" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { + "version": "10.2.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.5.tgz", + "integrity": "sha512-MULkVLfKGYDFYejP07QOurDLLQpcjk7Fw+7jXS2R2czRQzR56yHRveU5NDJEOviH+hETZKSkIk5c+T23GjFUMg==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "brace-expansion": "^5.0.5" + }, + "engines": { + "node": "18 || 20 || >=22" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "8.58.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.58.1.tgz", + "integrity": "sha512-Ln8R0tmWC7pTtLOzgJzYTXSCjJ9rDNHAqTaVONF4FEi2qwce8mD9iSOxOpLFFvWp/wBFlew0mjM1L1ihYWfBdQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.9.1", + "@typescript-eslint/scope-manager": "8.58.1", + "@typescript-eslint/types": "8.58.1", + "@typescript-eslint/typescript-estree": "8.58.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.1.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "8.58.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.58.1.tgz", + "integrity": "sha512-y+vH7QE8ycjoa0bWciFg7OpFcipUuem1ujhrdLtq1gByKwfbC7bPeKsiny9e0urg93DqwGcHey+bGRKCnF1nZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "8.58.1", + "eslint-visitor-keys": "^5.0.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-5.0.1.tgz", + "integrity": "sha512-tD40eHxA35h0PEIZNeIjkHoDR4YjjJp34biM0mDvplBe//mB+IHCqHDGV7pxF+7MklTvighcCPPZC7ynWyjdTA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@vitejs/plugin-react": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-6.0.1.tgz", + "integrity": "sha512-l9X/E3cDb+xY3SWzlG1MOGt2usfEHGMNIaegaUGFsLkb3RCn/k8/TOXBcab+OndDI4TBtktT8/9BwwW8Vi9KUQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@rolldown/pluginutils": "1.0.0-rc.7" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "peerDependencies": { + "@rolldown/plugin-babel": "^0.1.7 || ^0.2.0", + "babel-plugin-react-compiler": "^1.0.0", + "vite": "^8.0.0" + }, + "peerDependenciesMeta": { + "@rolldown/plugin-babel": { + "optional": true + }, + "babel-plugin-react-compiler": { + "optional": true + } + } + }, + "node_modules/accepts": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/accepts/-/accepts-2.0.0.tgz", + "integrity": "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==", + "license": "MIT", + "dependencies": { + "mime-types": "^3.0.0", + "negotiator": "^1.0.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/acorn": { + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz", + "integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/ajv": { + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz", + "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-3.0.1.tgz", + "integrity": "sha512-8iUql50EUR+uUcdRQ3HDqa6EVyo3docL8g5WJ3FNcWmu62IbkGUue/pEyLBW8VGKKucTPgqeks4fIU1DA4yowQ==", + "license": "MIT", + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-formats/node_modules/ajv": { + "version": "8.18.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.18.0.tgz", + "integrity": "sha512-PlXPeEWMXMZ7sPYOHqmDyCJzcfNrUr3fGNKtezX14ykXOEIvyK81d+qydx89KY5O71FKMPaQ2vBfBFI5NHR63A==", + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats/node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "license": "MIT" + }, + "node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "license": "Python-2.0" + }, + "node_modules/ast-types": { + "version": "0.16.1", + "resolved": "https://registry.npmjs.org/ast-types/-/ast-types-0.16.1.tgz", + "integrity": "sha512-6t10qk83GOG8p0vKmaCr8eiilZwO171AvbROMtvvNiwrTly62t+7XkA8RdIIVbpMhCASAsxgAzdRSwh6nw/5Dg==", + "license": "MIT", + "dependencies": { + "tslib": "^2.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.10.17", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.17.tgz", + "integrity": "sha512-HdrkN8eVG2CXxeifv/VdJ4A4RSra1DTW8dc/hdxzhGHN8QePs6gKaWM9pHPcpCoxYZJuOZ8drHmbdpLHjCYjLA==", + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/body-parser": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-2.2.2.tgz", + "integrity": "sha512-oP5VkATKlNwcgvxi0vM0p/D3n2C3EReYVX+DNYs5TjZFn/oQt2j+4sVJtSMr18pdRr8wjTcBl6LoV+FUwzPmNA==", + "license": "MIT", + "dependencies": { + "bytes": "^3.1.2", + "content-type": "^1.0.5", + "debug": "^4.4.3", + "http-errors": "^2.0.0", + "iconv-lite": "^0.7.0", + "on-finished": "^2.4.1", + "qs": "^6.14.1", + "raw-body": "^3.0.1", + "type-is": "^2.0.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.13.tgz", + "integrity": "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.2.tgz", + "integrity": "sha512-48xSriZYYg+8qXna9kwqjIVzuQxi+KYWp2+5nCYnYKPTr0LvD89Jqk2Or5ogxz0NUMfIjhh2lIUX/LyX9B4oIg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.10.12", + "caniuse-lite": "^1.0.30001782", + "electron-to-chromium": "^1.5.328", + "node-releases": "^2.0.36", + "update-browserslist-db": "^1.2.3" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/bundle-name": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-4.1.0.tgz", + "integrity": "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==", + "license": "MIT", + "dependencies": { + "run-applescript": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/bytes": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", + "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001787", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001787.tgz", + "integrity": "sha512-mNcrMN9KeI68u7muanUpEejSLghOKlVhRqS/Za2IeyGllJ9I9otGpR9g3nsw7n4W378TE/LyIteA0+/FOZm4Kg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/class-variance-authority": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz", + "integrity": "sha512-Ka+9Trutv7G8M6WT6SeiRWz792K5qEqIGEGzXKhAE6xOWAY6pPH8U+9IY3oCMv6kqTmLsv7Xh/2w2RigkePMsg==", + "license": "Apache-2.0", + "dependencies": { + "clsx": "^2.1.1" + }, + "funding": { + "url": "https://polar.sh/cva" + } + }, + "node_modules/cli-cursor": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-5.0.0.tgz", + "integrity": "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==", + "license": "MIT", + "dependencies": { + "restore-cursor": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-spinners": { + "version": "2.9.2", + "resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-width": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz", + "integrity": "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==", + "license": "ISC", + "engines": { + "node": ">= 12" + } + }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/cliui/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/cliui/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/code-block-writer": { + "version": "13.0.3", + "resolved": "https://registry.npmjs.org/code-block-writer/-/code-block-writer-13.0.3.tgz", + "integrity": "sha512-Oofo0pq3IKnsFtuHqSF7TqBfr71aeyZDVJ0HpmqB7FBM2qEigL0iPONSCZSO9pE9dZTAxANe5XHG9Uy0YMv8cg==", + "license": "MIT" + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT" + }, + "node_modules/commander": { + "version": "14.0.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-14.0.3.tgz", + "integrity": "sha512-H+y0Jo/T1RZ9qPP4Eh1pkcQcLRglraJaSLoyOtHxu6AapkjWVCy2Sit1QQ4x3Dng8qDlSsZEet7g5Pq06MvTgw==", + "license": "MIT", + "engines": { + "node": ">=20" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/content-disposition": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-1.1.0.tgz", + "integrity": "sha512-5jRCH9Z/+DRP7rkvY83B+yGIGX96OYdJmzngqnw2SBSxqCFPd0w2km3s5iawpGX8krnwSGmF0FW5Nhr0Hfai3g==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/content-type": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "license": "MIT" + }, + "node_modules/cookie": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.2.2.tgz", + "integrity": "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==", + "license": "MIT", + "engines": { + "node": ">=6.6.0" + } + }, + "node_modules/cors": { + "version": "2.8.6", + "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.6.tgz", + "integrity": "sha512-tJtZBBHA6vjIAaF6EnIaq6laBBP9aq/Y3ouVJjEfoHbRBcHBAHYcMh/w8LDrk2PvIMMq8gmopa5D4V8RmbrxGw==", + "license": "MIT", + "dependencies": { + "object-assign": "^4", + "vary": "^1" + }, + "engines": { + "node": ">= 0.10" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/cosmiconfig": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-9.0.1.tgz", + "integrity": "sha512-hr4ihw+DBqcvrsEDioRO31Z17x71pUYoNe/4h6Z0wB72p7MU7/9gH8Q3s12NFhHPfYBBOV3qyfUxmr/Yn3shnQ==", + "license": "MIT", + "dependencies": { + "env-paths": "^2.2.1", + "import-fresh": "^3.3.0", + "js-yaml": "^4.1.0", + "parse-json": "^5.2.0" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/d-fischer" + }, + "peerDependencies": { + "typescript": ">=4.9.5" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/data-uri-to-buffer": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", + "integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/dedent": { + "version": "1.7.2", + "resolved": "https://registry.npmjs.org/dedent/-/dedent-1.7.2.tgz", + "integrity": "sha512-WzMx3mW98SN+zn3hgemf4OzdmyNhhhKz5Ay0pUfQiMQ3e1g+xmTJWp/pKdwKVXhdSkAEGIIzqeuWrL3mV/AXbA==", + "license": "MIT", + "peerDependencies": { + "babel-plugin-macros": "^3.1.0" + }, + "peerDependenciesMeta": { + "babel-plugin-macros": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/default-browser": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/default-browser/-/default-browser-5.5.0.tgz", + "integrity": "sha512-H9LMLr5zwIbSxrmvikGuI/5KGhZ8E2zH3stkMgM5LpOWDutGM2JZaj460Udnf1a+946zc7YBgrqEWwbk7zHvGw==", + "license": "MIT", + "dependencies": { + "bundle-name": "^4.1.0", + "default-browser-id": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/default-browser-id": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-5.0.1.tgz", + "integrity": "sha512-x1VCxdX4t+8wVfd1so/9w+vQ4vx7lKd2Qp5tDRutErwmR85OgmfX7RlLRMWafRMY7hbEiXIbudNrjOAPa/hL8Q==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/define-lazy-prop": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz", + "integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", + "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/detect-libc": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", + "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + "license": "Apache-2.0", + "engines": { + "node": ">=8" + } + }, + "node_modules/diff": { + "version": "8.0.4", + "resolved": "https://registry.npmjs.org/diff/-/diff-8.0.4.tgz", + "integrity": "sha512-DPi0FmjiSU5EvQV0++GFDOJ9ASQUVFh5kD+OzOnYdi7n3Wpm9hWWGfB/O2blfHcMVTL5WkQXSnRiK9makhrcnw==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/dotenv": { + "version": "17.4.1", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-17.4.1.tgz", + "integrity": "sha512-k8DaKGP6r1G30Lx8V4+pCsLzKr8vLmV2paqEj1Y55GdAgJuIqpRp5FfajGF8KtwMxCz9qJc6wUIJnm053d/WCw==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://dotenvx.com" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/eciesjs": { + "version": "0.4.18", + "resolved": "https://registry.npmjs.org/eciesjs/-/eciesjs-0.4.18.tgz", + "integrity": "sha512-wG99Zcfcys9fZux7Cft8BAX/YrOJLJSZ3jyYPfhZHqN2E+Ffx+QXBDsv3gubEgPtV6dTzJMSQUwk1H98/t/0wQ==", + "license": "MIT", + "dependencies": { + "@ecies/ciphers": "^0.2.5", + "@noble/ciphers": "^1.3.0", + "@noble/curves": "^1.9.7", + "@noble/hashes": "^1.8.0" + }, + "engines": { + "bun": ">=1", + "deno": ">=2", + "node": ">=16" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", + "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", + "license": "MIT" + }, + "node_modules/electron-to-chromium": { + "version": "1.5.334", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.334.tgz", + "integrity": "sha512-mgjZAz7Jyx1SRCwEpy9wefDS7GvNPazLthHg8eQMJ76wBdGQQDW33TCrUTvQ4wzpmOrv2zrFoD3oNufMdyMpog==", + "license": "ISC" + }, + "node_modules/emoji-regex": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", + "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==", + "license": "MIT" + }, + "node_modules/encodeurl": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", + "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.20.1", + "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.20.1.tgz", + "integrity": "sha512-Qohcme7V1inbAfvjItgw0EaxVX5q2rdVEZHRBrEQdRZTssLDGsL8Lwrznl8oQ/6kuTJONLaDcGjkNP247XEhcA==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.3.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/env-paths": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz", + "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", + "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", + "license": "MIT" + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "9.39.4", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.39.4.tgz", + "integrity": "sha512-XoMjdBOwe/esVgEvLmNsD3IRHkm7fbKIUGvrleloJXUZgDHig2IPWNniv+GwjyJXzuNqVjlr5+4yVUZjycJwfQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.8.0", + "@eslint-community/regexpp": "^4.12.1", + "@eslint/config-array": "^0.21.2", + "@eslint/config-helpers": "^0.4.2", + "@eslint/core": "^0.17.0", + "@eslint/eslintrc": "^3.3.5", + "@eslint/js": "9.39.4", + "@eslint/plugin-kit": "^0.4.1", + "@humanfs/node": "^0.16.6", + "@humanwhocodes/module-importer": "^1.0.1", + "@humanwhocodes/retry": "^0.4.2", + "@types/estree": "^1.0.6", + "ajv": "^6.14.0", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.6", + "debug": "^4.3.2", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^8.4.0", + "eslint-visitor-keys": "^4.2.1", + "espree": "^10.4.0", + "esquery": "^1.5.0", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^8.0.0", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.5", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://eslint.org/donate" + }, + "peerDependencies": { + "jiti": "*" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + } + } + }, + "node_modules/eslint-plugin-react-hooks": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-7.0.1.tgz", + "integrity": "sha512-O0d0m04evaNzEPoSW+59Mezf8Qt0InfgGIBJnpC0h3NH/WjUAR7BIKUfysC6todmtiZ/A0oUVS8Gce0WhBrHsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/core": "^7.24.4", + "@babel/parser": "^7.24.4", + "hermes-parser": "^0.25.1", + "zod": "^3.25.0 || ^4.0.0", + "zod-validation-error": "^3.5.0 || ^4.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" + } + }, + "node_modules/eslint-plugin-react-refresh": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.5.2.tgz", + "integrity": "sha512-hmgTH57GfzoTFjVN0yBwTggnsVUF2tcqi7RJZHqi9lIezSs4eFyAMktA68YD4r5kNw1mxyY4dmkyoFDb3FIqrA==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "eslint": "^9 || ^10" + } + }, + "node_modules/eslint-scope": { + "version": "8.4.0", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", + "integrity": "sha512-sNXOfKCn74rt8RICKMvJS7XKV/Xk9kA7DyJr8mJik3S7Cwgy3qlkkmyS2uQB3jiJg6VNdZd/pDBJu0nvG2NlTg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/espree": { + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", + "integrity": "sha512-j6PAQ2uUr79PZhBjP5C5fhl8e39FmRnOjsD5lGnWrFU8i2G776tBK7+nP8KuQUTTyAZUwfQqXAgrVH5MbH9CYQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.15.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^4.2.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/etag": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", + "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/eventsource": { + "version": "3.0.7", + "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-3.0.7.tgz", + "integrity": "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==", + "license": "MIT", + "dependencies": { + "eventsource-parser": "^3.0.1" + }, + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/eventsource-parser": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-3.0.6.tgz", + "integrity": "sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==", + "license": "MIT", + "engines": { + "node": ">=18.0.0" + } + }, + "node_modules/execa": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/execa/-/execa-9.6.1.tgz", + "integrity": "sha512-9Be3ZoN4LmYR90tUoVu2te2BsbzHfhJyfEiAVfz7N5/zv+jduIfLrV2xdQXOHbaD6KgpGdO9PRPM1Y4Q9QkPkA==", + "license": "MIT", + "dependencies": { + "@sindresorhus/merge-streams": "^4.0.0", + "cross-spawn": "^7.0.6", + "figures": "^6.1.0", + "get-stream": "^9.0.0", + "human-signals": "^8.0.1", + "is-plain-obj": "^4.1.0", + "is-stream": "^4.0.1", + "npm-run-path": "^6.0.0", + "pretty-ms": "^9.2.0", + "signal-exit": "^4.1.0", + "strip-final-newline": "^4.0.0", + "yoctocolors": "^2.1.1" + }, + "engines": { + "node": "^18.19.0 || >=20.5.0" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/express": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz", + "integrity": "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==", + "license": "MIT", + "dependencies": { + "accepts": "^2.0.0", + "body-parser": "^2.2.1", + "content-disposition": "^1.0.0", + "content-type": "^1.0.5", + "cookie": "^0.7.1", + "cookie-signature": "^1.2.1", + "debug": "^4.4.0", + "depd": "^2.0.0", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "finalhandler": "^2.1.0", + "fresh": "^2.0.0", + "http-errors": "^2.0.0", + "merge-descriptors": "^2.0.0", + "mime-types": "^3.0.0", + "on-finished": "^2.4.1", + "once": "^1.4.0", + "parseurl": "^1.3.3", + "proxy-addr": "^2.0.7", + "qs": "^6.14.0", + "range-parser": "^1.2.1", + "router": "^2.2.0", + "send": "^1.1.0", + "serve-static": "^2.2.0", + "statuses": "^2.0.1", + "type-is": "^2.0.1", + "vary": "^1.1.2" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/express-rate-limit": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-8.3.2.tgz", + "integrity": "sha512-77VmFeJkO0/rvimEDuUC5H30oqUC4EyOhyGccfqoLebB0oiEYfM7nwPrsDsBL1gsTpwfzX8SFy2MT3TDyRq+bg==", + "license": "MIT", + "dependencies": { + "ip-address": "10.1.0" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/express-rate-limit" + }, + "peerDependencies": { + "express": ">= 4.11" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/fast-uri/-/fast-uri-3.1.0.tgz", + "integrity": "sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/fetch-blob": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz", + "integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "paypal", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "dependencies": { + "node-domexception": "^1.0.0", + "web-streams-polyfill": "^3.0.3" + }, + "engines": { + "node": "^12.20 || >= 14.13" + } + }, + "node_modules/figures": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-6.1.0.tgz", + "integrity": "sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg==", + "license": "MIT", + "dependencies": { + "is-unicode-supported": "^2.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/file-entry-cache": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-8.0.0.tgz", + "integrity": "sha512-XXTUwCvisa5oacNGRP9SfNtYBNAMi+RPwBFmblZEF7N7swHYQS6/Zfk7SRwx4D5j3CH211YNRco1DEMNVfZCnQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^4.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/finalhandler": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.1.tgz", + "integrity": "sha512-S8KoZgRZN+a5rNwqTxlZZePjT/4cnm0ROV70LedRHZ0p8u9fRID0hJUZQpkKLzro8LfmC8sx23bY6tVNxv8pQA==", + "license": "MIT", + "dependencies": { + "debug": "^4.4.0", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "on-finished": "^2.4.1", + "parseurl": "^1.3.3", + "statuses": "^2.0.1" + }, + "engines": { + "node": ">= 18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-4.0.1.tgz", + "integrity": "sha512-f7ccFPK3SXFHpx15UIGyRJ/FJQctuKZ0zVuN3frBo4HnK3cay9VEW0R6yPYFHC0AgqhukPzKjq22t5DmAyqGyw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.4" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/flatted": { + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.2.tgz", + "integrity": "sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA==", + "dev": true, + "license": "ISC" + }, + "node_modules/formdata-polyfill": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", + "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", + "license": "MIT", + "dependencies": { + "fetch-blob": "^3.1.2" + }, + "engines": { + "node": ">=12.20.0" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", + "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/fresh/-/fresh-2.0.0.tgz", + "integrity": "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/fs-extra": { + "version": "11.3.4", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.3.4.tgz", + "integrity": "sha512-CTXd6rk/M3/ULNQj8FBqBWHYBVYybQ3VPBw0xGKFe3tuH7ytT6ACnvzpIQ3UZtB8yvUKC2cXn1a+x+5EVQLovA==", + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=14.14" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/fuzzysort": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/fuzzysort/-/fuzzysort-3.1.0.tgz", + "integrity": "sha512-sR9BNCjBg6LNgwvxlBd0sBABvQitkLzoVY9MYYROQVX/FvfJ4Mai9LsGhDgd8qYdds0bY77VzYd5iuB+v5rwQQ==", + "license": "MIT" + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-east-asian-width": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.5.0.tgz", + "integrity": "sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-own-enumerable-keys": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/get-own-enumerable-keys/-/get-own-enumerable-keys-1.0.0.tgz", + "integrity": "sha512-PKsK2FSrQCyxcGHsGrLDcK0lx+0Ke+6e8KFFozA9/fIQLhQzPaRvJFdcz7+Axg3jUH/Mq+NI4xa5u/UT2tQskA==", + "license": "MIT", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-9.0.1.tgz", + "integrity": "sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA==", + "license": "MIT", + "dependencies": { + "@sec-ant/readable-stream": "^0.4.1", + "is-stream": "^4.0.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/globals": { + "version": "17.4.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-17.4.0.tgz", + "integrity": "sha512-hjrNztw/VajQwOLsMNT1cbJiH2muO3OROCHnbehc8eY5JyD2gqz4AcMHPqgaOR59DjgUjYAYLeH699g/eWi2jw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "license": "ISC" + }, + "node_modules/graphql": { + "version": "16.13.2", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.13.2.tgz", + "integrity": "sha512-5bJ+nf/UCpAjHM8i06fl7eLyVC9iuNAjm9qzkiu2ZGhM0VscSvS6WDPfAwkdkBuoXGM9FJSbKl6wylMwP9Ktig==", + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/headers-polyfill": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/headers-polyfill/-/headers-polyfill-4.0.3.tgz", + "integrity": "sha512-IScLbePpkvO846sIwOtOTDjutRMWdXdJmXdMvk6gCBHxFO8d+QKOQedyZSxFTTFYRSmlgSTDtXqqq4pcenBXLQ==", + "license": "MIT" + }, + "node_modules/hermes-estree": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/hermes-estree/-/hermes-estree-0.25.1.tgz", + "integrity": "sha512-0wUoCcLp+5Ev5pDW2OriHC2MJCbwLwuRx+gAqMTOkGKJJiBCLjtrvy4PWUGn6MIVefecRpzoOZ/UV6iGdOr+Cw==", + "dev": true, + "license": "MIT" + }, + "node_modules/hermes-parser": { + "version": "0.25.1", + "resolved": "https://registry.npmjs.org/hermes-parser/-/hermes-parser-0.25.1.tgz", + "integrity": "sha512-6pEjquH3rqaI6cYAXYPcz9MS4rY6R4ngRgrgfDshRptUZIc3lw0MCIJIGDj9++mfySOuPTHB4nrSW99BCvOPIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "hermes-estree": "0.25.1" + } + }, + "node_modules/hono": { + "version": "4.12.12", + "resolved": "https://registry.npmjs.org/hono/-/hono-4.12.12.tgz", + "integrity": "sha512-p1JfQMKaceuCbpJKAPKVqyqviZdS0eUxH9v82oWo1kb9xjQ5wA6iP3FNVAPDFlz5/p7d45lO+BpSk1tuSZMF4Q==", + "license": "MIT", + "engines": { + "node": ">=16.9.0" + } + }, + "node_modules/http-errors": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", + "license": "MIT", + "dependencies": { + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" + }, + "engines": { + "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/human-signals": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-8.0.1.tgz", + "integrity": "sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ==", + "license": "Apache-2.0", + "engines": { + "node": ">=18.18.0" + } + }, + "node_modules/iceberg-js": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/iceberg-js/-/iceberg-js-0.8.1.tgz", + "integrity": "sha512-1dhVQZXhcHje7798IVM+xoo/1ZdVfzOMIc8/rgVSijRK38EDqOJoGula9N/8ZI5RD8QTxNQtK/Gozpr+qUqRRA==", + "license": "MIT", + "engines": { + "node": ">=20.0.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.7.2.tgz", + "integrity": "sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw==", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/ip-address": { + "version": "10.1.0", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-10.1.0.tgz", + "integrity": "sha512-XXADHxXmvT9+CRxhXg56LJovE+bmWnEWB78LB83VZTprKTmaC5QfruXocxzTZ2Kl0DNwKuBdlIhjL8LeY8Sf8Q==", + "license": "MIT", + "engines": { + "node": ">= 12" + } + }, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "license": "MIT" + }, + "node_modules/is-docker": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz", + "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==", + "license": "MIT", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-in-ssh": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-in-ssh/-/is-in-ssh-1.0.0.tgz", + "integrity": "sha512-jYa6Q9rH90kR1vKB6NM7qqd1mge3Fx4Dhw5TVlK1MUBqhEOuCagrEHMevNuCcbECmXZ0ThXkRm+Ymr51HwEPAw==", + "license": "MIT", + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-inside-container": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz", + "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==", + "license": "MIT", + "dependencies": { + "is-docker": "^3.0.0" + }, + "bin": { + "is-inside-container": "cli.js" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-interactive": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-interactive/-/is-interactive-2.0.0.tgz", + "integrity": "sha512-qP1vozQRI+BMOPcjFzrjXuQvdak2pHNUMZoeG2eRbiSqyvbEf/wQtEOTOX1guk6E3t36RkaqiSt8A/6YElNxLQ==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-node-process": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-node-process/-/is-node-process-1.2.0.tgz", + "integrity": "sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw==", + "license": "MIT" + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-obj": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-3.0.0.tgz", + "integrity": "sha512-IlsXEHOjtKhpN8r/tRFj2nDyTmHvcfNeu/nrRIcXE17ROeatXchkojffa1SpdqW4cr/Fj6QkEf/Gn4zf6KKvEQ==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-promise": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz", + "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==", + "license": "MIT" + }, + "node_modules/is-regexp": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-3.1.0.tgz", + "integrity": "sha512-rbku49cWloU5bSMI+zaRaXdQHXnthP6DZ/vLnfdSKyL4zUzuWnomtOEiZZOd+ioQ+avFo/qau3KPTc7Fjy1uPA==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-stream": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-4.0.1.tgz", + "integrity": "sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-unicode-supported": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-2.1.0.tgz", + "integrity": "sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-wsl": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.1.tgz", + "integrity": "sha512-e6rvdUCiQCAuumZslxRJWR/Doq4VpPR82kqclvcS0efgt430SlGIk05vdCN58+VrzgtIcfNODjozVielycD4Sw==", + "license": "MIT", + "dependencies": { + "is-inside-container": "^1.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "license": "ISC" + }, + "node_modules/jiti": { + "version": "2.6.1", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz", + "integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==", + "license": "MIT", + "bin": { + "jiti": "lib/jiti-cli.mjs" + } + }, + "node_modules/jose": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/jose/-/jose-6.2.2.tgz", + "integrity": "sha512-d7kPDd34KO/YnzaDOlikGpOurfF0ByC2sEV4cANCtdqLlTfBlw2p14O/5d/zv40gJPbIQxfES3nSx1/oYNyuZQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/panva" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-typed": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/json-schema-typed/-/json-schema-typed-8.0.2.tgz", + "integrity": "sha512-fQhoXdcvc3V28x7C7BMs4P5+kNlgUURe2jmUT1T//oBRMDrqy1QPelJimwZGo7Hg9VPV3EQV5Bnq4hbFy2vetA==", + "license": "BSD-2-Clause" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonfile": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", + "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", + "license": "MIT", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/kleur": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", + "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lightningcss": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.32.0.tgz", + "integrity": "sha512-NXYBzinNrblfraPGyrbPoD19C1h9lfI/1mzgWYvXUTe414Gz/X1FD2XBZSZM7rRTrMA8JL3OtAaGifrIKhQ5yQ==", + "license": "MPL-2.0", + "dependencies": { + "detect-libc": "^2.0.3" + }, + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + }, + "optionalDependencies": { + "lightningcss-android-arm64": "1.32.0", + "lightningcss-darwin-arm64": "1.32.0", + "lightningcss-darwin-x64": "1.32.0", + "lightningcss-freebsd-x64": "1.32.0", + "lightningcss-linux-arm-gnueabihf": "1.32.0", + "lightningcss-linux-arm64-gnu": "1.32.0", + "lightningcss-linux-arm64-musl": "1.32.0", + "lightningcss-linux-x64-gnu": "1.32.0", + "lightningcss-linux-x64-musl": "1.32.0", + "lightningcss-win32-arm64-msvc": "1.32.0", + "lightningcss-win32-x64-msvc": "1.32.0" + } + }, + "node_modules/lightningcss-android-arm64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.32.0.tgz", + "integrity": "sha512-YK7/ClTt4kAK0vo6w3X+Pnm0D2cf2vPHbhOXdoNti1Ga0al1P4TBZhwjATvjNwLEBCnKvjJc2jQgHXH0NEwlAg==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-arm64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.32.0.tgz", + "integrity": "sha512-RzeG9Ju5bag2Bv1/lwlVJvBE3q6TtXskdZLLCyfg5pt+HLz9BqlICO7LZM7VHNTTn/5PRhHFBSjk5lc4cmscPQ==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-darwin-x64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.32.0.tgz", + "integrity": "sha512-U+QsBp2m/s2wqpUYT/6wnlagdZbtZdndSmut/NJqlCcMLTWp5muCrID+K5UJ6jqD2BFshejCYXniPDbNh73V8w==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-freebsd-x64": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.32.0.tgz", + "integrity": "sha512-JCTigedEksZk3tHTTthnMdVfGf61Fky8Ji2E4YjUTEQX14xiy/lTzXnu1vwiZe3bYe0q+SpsSH/CTeDXK6WHig==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm-gnueabihf": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.32.0.tgz", + "integrity": "sha512-x6rnnpRa2GL0zQOkt6rts3YDPzduLpWvwAF6EMhXFVZXD4tPrBkEFqzGowzCsIWsPjqSK+tyNEODUBXeeVHSkw==", + "cpu": [ + "arm" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-gnu": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.32.0.tgz", + "integrity": "sha512-0nnMyoyOLRJXfbMOilaSRcLH3Jw5z9HDNGfT/gwCPgaDjnx0i8w7vBzFLFR1f6CMLKF8gVbebmkUN3fa/kQJpQ==", + "cpu": [ + "arm64" + ], + "libc": [ + "glibc" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-arm64-musl": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.32.0.tgz", + "integrity": "sha512-UpQkoenr4UJEzgVIYpI80lDFvRmPVg6oqboNHfoH4CQIfNA+HOrZ7Mo7KZP02dC6LjghPQJeBsvXhJod/wnIBg==", + "cpu": [ + "arm64" + ], + "libc": [ + "musl" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-gnu": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.32.0.tgz", + "integrity": "sha512-V7Qr52IhZmdKPVr+Vtw8o+WLsQJYCTd8loIfpDaMRWGUZfBOYEJeyJIkqGIDMZPwPx24pUMfwSxxI8phr/MbOA==", + "cpu": [ + "x64" + ], + "libc": [ + "glibc" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-linux-x64-musl": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.32.0.tgz", + "integrity": "sha512-bYcLp+Vb0awsiXg/80uCRezCYHNg1/l3mt0gzHnWV9XP1W5sKa5/TCdGWaR/zBM2PeF/HbsQv/j2URNOiVuxWg==", + "cpu": [ + "x64" + ], + "libc": [ + "musl" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-arm64-msvc": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.32.0.tgz", + "integrity": "sha512-8SbC8BR40pS6baCM8sbtYDSwEVQd4JlFTOlaD3gWGHfThTcABnNDBda6eTZeqbofalIJhFx0qKzgHJmcPTnGdw==", + "cpu": [ + "arm64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lightningcss-win32-x64-msvc": { + "version": "1.32.0", + "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.32.0.tgz", + "integrity": "sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q==", + "cpu": [ + "x64" + ], + "license": "MPL-2.0", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">= 12.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/parcel" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/log-symbols": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-6.0.0.tgz", + "integrity": "sha512-i24m8rpwhmPIS4zscNzK6MSEhk0DUWa/8iYQWxhffV8jkI4Phvs3F+quL5xvS0gdQR0FyTCMMH33Y78dDTzzIw==", + "license": "MIT", + "dependencies": { + "chalk": "^5.3.0", + "is-unicode-supported": "^1.3.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/log-symbols/node_modules/chalk": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.6.2.tgz", + "integrity": "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==", + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/log-symbols/node_modules/is-unicode-supported": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-1.3.0.tgz", + "integrity": "sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/lucide-react": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-1.8.0.tgz", + "integrity": "sha512-WuvlsjngSk7TnTBJ1hsCy3ql9V9VOdcPkd3PKcSmM34vJD8KG6molxz7m7zbYFgICwsanQWmJ13JlYs4Zp7Arw==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/media-typer": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-1.1.0.tgz", + "integrity": "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/merge-descriptors": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-2.0.0.tgz", + "integrity": "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "license": "MIT" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/micromatch/node_modules/picomatch": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz", + "integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==", + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/mime-db": { + "version": "1.54.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", + "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.2.tgz", + "integrity": "sha512-Lbgzdk0h4juoQ9fCKXW4by0UJqj+nOOrI9MJ1sSj4nI8aI2eo1qmvQEie4VD1glsS250n15LsWsYtCugiStS5A==", + "license": "MIT", + "dependencies": { + "mime-db": "^1.54.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/mimic-function": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/mimic-function/-/mimic-function-5.0.1.tgz", + "integrity": "sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/minimatch": { + "version": "3.1.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz", + "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/msw": { + "version": "2.13.2", + "resolved": "https://registry.npmjs.org/msw/-/msw-2.13.2.tgz", + "integrity": "sha512-go2H1TIERKkC48pXiwec5l6sbNqYuvqOk3/vHGo1Zd+pq/H63oFawDQerH+WQdUw/flJFHDG7F+QdWMwhntA/A==", + "hasInstallScript": true, + "license": "MIT", + "dependencies": { + "@inquirer/confirm": "^5.0.0", + "@mswjs/interceptors": "^0.41.2", + "@open-draft/deferred-promise": "^2.2.0", + "@types/statuses": "^2.0.6", + "cookie": "^1.0.2", + "graphql": "^16.12.0", + "headers-polyfill": "^4.0.2", + "is-node-process": "^1.2.0", + "outvariant": "^1.4.3", + "path-to-regexp": "^6.3.0", + "picocolors": "^1.1.1", + "rettime": "^0.10.1", + "statuses": "^2.0.2", + "strict-event-emitter": "^0.5.1", + "tough-cookie": "^6.0.0", + "type-fest": "^5.2.0", + "until-async": "^3.0.2", + "yargs": "^17.7.2" + }, + "bin": { + "msw": "cli/index.js" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/mswjs" + }, + "peerDependencies": { + "typescript": ">= 4.8.x" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/msw/node_modules/cookie": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz", + "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/mute-stream": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-2.0.0.tgz", + "integrity": "sha512-WWdIxpyjEn+FhQJQQv9aQAYlHoNVdzIzUySNV1gHUPDSdZJ3yZn7pAAbQcV7B56Mvu881q9FZV+0Vx2xC44VWA==", + "license": "ISC", + "engines": { + "node": "^18.17.0 || >=20.5.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/negotiator": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", + "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + "deprecated": "Use your platform's native DOMException instead", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "license": "MIT", + "engines": { + "node": ">=10.5.0" + } + }, + "node_modules/node-fetch": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz", + "integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", + "license": "MIT", + "dependencies": { + "data-uri-to-buffer": "^4.0.0", + "fetch-blob": "^3.1.4", + "formdata-polyfill": "^4.0.10" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/node-fetch" + } + }, + "node_modules/node-releases": { + "version": "2.0.37", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.37.tgz", + "integrity": "sha512-1h5gKZCF+pO/o3Iqt5Jp7wc9rH3eJJ0+nh/CIoiRwjRxde/hAHyLPXYN4V3CqKAbiZPSeJFSWHmJsbkicta0Eg==", + "license": "MIT" + }, + "node_modules/npm-run-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-6.0.0.tgz", + "integrity": "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==", + "license": "MIT", + "dependencies": { + "path-key": "^4.0.0", + "unicorn-magic": "^0.3.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path/node_modules/path-key": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", + "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.4", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", + "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-treeify": { + "version": "1.1.33", + "resolved": "https://registry.npmjs.org/object-treeify/-/object-treeify-1.1.33.tgz", + "integrity": "sha512-EFVjAYfzWqWsBMRHPMAXLCDIJnpMhdWAqR7xG6M6a2cs6PMFpl/+Z20w9zDW4vkxOFfddegBKq9Rehd0bxWE7A==", + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", + "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/onetime/-/onetime-7.0.0.tgz", + "integrity": "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==", + "license": "MIT", + "dependencies": { + "mimic-function": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/open": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/open/-/open-11.0.0.tgz", + "integrity": "sha512-smsWv2LzFjP03xmvFoJ331ss6h+jixfA4UUV/Bsiyuu4YJPfN+FIQGOIiv4w9/+MoHkfkJ22UIaQWRVFRfH6Vw==", + "license": "MIT", + "dependencies": { + "default-browser": "^5.4.0", + "define-lazy-prop": "^3.0.0", + "is-in-ssh": "^1.0.0", + "is-inside-container": "^1.0.0", + "powershell-utils": "^0.1.0", + "wsl-utils": "^0.3.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/ora": { + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/ora/-/ora-8.2.0.tgz", + "integrity": "sha512-weP+BZ8MVNnlCm8c0Qdc1WSWq4Qn7I+9CJGm7Qali6g44e/PUzbjNqJX5NJ9ljlNMosfJvg1fKEGILklK9cwnw==", + "license": "MIT", + "dependencies": { + "chalk": "^5.3.0", + "cli-cursor": "^5.0.0", + "cli-spinners": "^2.9.2", + "is-interactive": "^2.0.0", + "is-unicode-supported": "^2.0.0", + "log-symbols": "^6.0.0", + "stdin-discarder": "^0.2.2", + "string-width": "^7.2.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora/node_modules/chalk": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.6.2.tgz", + "integrity": "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==", + "license": "MIT", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/outvariant": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/outvariant/-/outvariant-1.4.3.tgz", + "integrity": "sha512-+Sl2UErvtsoajRDKCE5/dBz4DIvHXQQnAxtQTF04OJxY0+DyZXSo5P5Bb7XYWOh81syohlYL24hbDwxedPUJCA==", + "license": "MIT" + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse-ms": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-ms/-/parse-ms-4.0.0.tgz", + "integrity": "sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", + "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-browserify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", + "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==", + "license": "MIT" + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-to-regexp": { + "version": "6.3.0", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz", + "integrity": "sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ==", + "license": "MIT" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pkce-challenge": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/pkce-challenge/-/pkce-challenge-5.0.1.tgz", + "integrity": "sha512-wQ0b/W4Fr01qtpHlqSqspcj3EhBvimsdh0KlHhH8HRZnMsEa0ea2fTULOXOS9ccQr3om+GcGRk4e+isrZWV8qQ==", + "license": "MIT", + "engines": { + "node": ">=16.20.0" + } + }, + "node_modules/postcss": { + "version": "8.5.9", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.9.tgz", + "integrity": "sha512-7a70Nsot+EMX9fFU3064K/kdHWZqGVY+BADLyXc8Dfv+mTLLVl6JzJpPaCZ2kQL9gIJvKXSLMHhqdRRjwQeFtw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-7.1.1.tgz", + "integrity": "sha512-orRsuYpJVw8LdAwqqLykBj9ecS5/cRHlI5+nvTo8LcCKmzDmqVORXtOIYEEQuL9D4BxtA1lm5isAqzQZCoQ6Eg==", + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/powershell-utils": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/powershell-utils/-/powershell-utils-0.1.0.tgz", + "integrity": "sha512-dM0jVuXJPsDN6DvRpea484tCUaMiXWjuCn++HGTqUWzGDjv5tZkEZldAJ/UMlqRYGFrD/etByo4/xOuC/snX2A==", + "license": "MIT", + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/pretty-ms": { + "version": "9.3.0", + "resolved": "https://registry.npmjs.org/pretty-ms/-/pretty-ms-9.3.0.tgz", + "integrity": "sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ==", + "license": "MIT", + "dependencies": { + "parse-ms": "^4.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "license": "MIT", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/prompts/node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", + "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", + "license": "MIT", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/qs": { + "version": "6.15.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.15.1.tgz", + "integrity": "sha512-6YHEFRL9mfgcAvql/XhwTvf5jKcOiiupt2FiJxHkiX1z4j7WL8J/jRHYLluORvc1XxB5rV20KoeK00gVJamspg==", + "license": "BSD-3-Clause", + "dependencies": { + "side-channel": "^1.1.0" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/range-parser": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", + "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.2.tgz", + "integrity": "sha512-K5zQjDllxWkf7Z5xJdV0/B0WTNqx6vxG70zJE4N0kBs4LovmEYWJzQGxC9bS9RAKu3bgM40lrd5zoLJ12MQ5BA==", + "license": "MIT", + "dependencies": { + "bytes": "~3.1.2", + "http-errors": "~2.0.1", + "iconv-lite": "~0.7.0", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/react": { + "version": "19.2.5", + "resolved": "https://registry.npmjs.org/react/-/react-19.2.5.tgz", + "integrity": "sha512-llUJLzz1zTUBrskt2pwZgLq59AemifIftw4aB7JxOqf1HY2FDaGDxgwpAPVzHU1kdWabH7FauP4i1oEeer2WCA==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "19.2.5", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.5.tgz", + "integrity": "sha512-J5bAZz+DXMMwW/wV3xzKke59Af6CHY7G4uYLN1OvBcKEsWOs4pQExj86BBKamxl/Ik5bx9whOrvBlSDfWzgSag==", + "license": "MIT", + "dependencies": { + "scheduler": "^0.27.0" + }, + "peerDependencies": { + "react": "^19.2.5" + } + }, + "node_modules/react-router": { + "version": "7.14.0", + "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.14.0.tgz", + "integrity": "sha512-m/xR9N4LQLmAS0ZhkY2nkPA1N7gQ5TUVa5n8TgANuDTARbn1gt+zLPXEm7W0XDTbrQ2AJSJKhoa6yx1D8BcpxQ==", + "license": "MIT", + "dependencies": { + "cookie": "^1.0.1", + "set-cookie-parser": "^2.6.0" + }, + "engines": { + "node": ">=20.0.0" + }, + "peerDependencies": { + "react": ">=18", + "react-dom": ">=18" + }, + "peerDependenciesMeta": { + "react-dom": { + "optional": true + } + } + }, + "node_modules/react-router-dom": { + "version": "7.14.0", + "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-7.14.0.tgz", + "integrity": "sha512-2G3ajSVSZMEtmTjIklRWlNvo8wICEpLihfD/0YMDxbWK2UyP5EGfnoIn9AIQGnF3G/FX0MRbHXdFcD+rL1ZreQ==", + "license": "MIT", + "dependencies": { + "react-router": "7.14.0" + }, + "engines": { + "node": ">=20.0.0" + }, + "peerDependencies": { + "react": ">=18", + "react-dom": ">=18" + } + }, + "node_modules/react-router/node_modules/cookie": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz", + "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/recast": { + "version": "0.23.11", + "resolved": "https://registry.npmjs.org/recast/-/recast-0.23.11.tgz", + "integrity": "sha512-YTUo+Flmw4ZXiWfQKGcwwc11KnoRAYgzAE2E7mXKCjSviTKShtxBsN6YUUBB2gtaBzKzeKunxhUwNHQuRryhWA==", + "license": "MIT", + "dependencies": { + "ast-types": "^0.16.1", + "esprima": "~4.0.0", + "source-map": "~0.6.1", + "tiny-invariant": "^1.3.3", + "tslib": "^2.0.1" + }, + "engines": { + "node": ">= 4" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/reselect": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/reselect/-/reselect-5.1.1.tgz", + "integrity": "sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==", + "license": "MIT" + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/restore-cursor": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz", + "integrity": "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==", + "license": "MIT", + "dependencies": { + "onetime": "^7.0.0", + "signal-exit": "^4.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/rettime": { + "version": "0.10.1", + "resolved": "https://registry.npmjs.org/rettime/-/rettime-0.10.1.tgz", + "integrity": "sha512-uyDrIlUEH37cinabq0AX4QbgV4HbFZ/gqoiunWQ1UqBtRvTTytwhNYjE++pO/MjPTZL5KQCf2bEoJ/BJNVQ5Kw==", + "license": "MIT" + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rolldown": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/rolldown/-/rolldown-1.0.0-rc.15.tgz", + "integrity": "sha512-Ff31guA5zT6WjnGp0SXw76X6hzGRk/OQq2hE+1lcDe+lJdHSgnSX6nK3erbONHyCbpSj9a9E+uX/OvytZoWp2g==", + "license": "MIT", + "dependencies": { + "@oxc-project/types": "=0.124.0", + "@rolldown/pluginutils": "1.0.0-rc.15" + }, + "bin": { + "rolldown": "bin/cli.mjs" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "optionalDependencies": { + "@rolldown/binding-android-arm64": "1.0.0-rc.15", + "@rolldown/binding-darwin-arm64": "1.0.0-rc.15", + "@rolldown/binding-darwin-x64": "1.0.0-rc.15", + "@rolldown/binding-freebsd-x64": "1.0.0-rc.15", + "@rolldown/binding-linux-arm-gnueabihf": "1.0.0-rc.15", + "@rolldown/binding-linux-arm64-gnu": "1.0.0-rc.15", + "@rolldown/binding-linux-arm64-musl": "1.0.0-rc.15", + "@rolldown/binding-linux-ppc64-gnu": "1.0.0-rc.15", + "@rolldown/binding-linux-s390x-gnu": "1.0.0-rc.15", + "@rolldown/binding-linux-x64-gnu": "1.0.0-rc.15", + "@rolldown/binding-linux-x64-musl": "1.0.0-rc.15", + "@rolldown/binding-openharmony-arm64": "1.0.0-rc.15", + "@rolldown/binding-wasm32-wasi": "1.0.0-rc.15", + "@rolldown/binding-win32-arm64-msvc": "1.0.0-rc.15", + "@rolldown/binding-win32-x64-msvc": "1.0.0-rc.15" + } + }, + "node_modules/rolldown/node_modules/@rolldown/pluginutils": { + "version": "1.0.0-rc.15", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-rc.15.tgz", + "integrity": "sha512-UromN0peaE53IaBRe9W7CjrZgXl90fqGpK+mIZbA3qSTeYqg3pqpROBdIPvOG3F5ereDHNwoHBI2e50n1BDr1g==", + "license": "MIT" + }, + "node_modules/router": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/router/-/router-2.2.0.tgz", + "integrity": "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==", + "license": "MIT", + "dependencies": { + "debug": "^4.4.0", + "depd": "^2.0.0", + "is-promise": "^4.0.0", + "parseurl": "^1.3.3", + "path-to-regexp": "^8.0.0" + }, + "engines": { + "node": ">= 18" + } + }, + "node_modules/router/node_modules/path-to-regexp": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.4.2.tgz", + "integrity": "sha512-qRcuIdP69NPm4qbACK+aDogI5CBDMi1jKe0ry5rSQJz8JVLsC7jV8XpiJjGRLLol3N+R5ihGYcrPLTno6pAdBA==", + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/run-applescript": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-7.1.0.tgz", + "integrity": "sha512-DPe5pVFaAsinSaV6QjQ6gdiedWDcRCbUuiQfQa2wmWV7+xC9bGulGI8+TdRmoFkAPaBXk8CrAbnlY2ISniJ47Q==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "license": "MIT" + }, + "node_modules/scheduler": { + "version": "0.27.0", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz", + "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/send": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/send/-/send-1.2.1.tgz", + "integrity": "sha512-1gnZf7DFcoIcajTjTwjwuDjzuz4PPcY2StKPlsGAQ1+YH20IRVrBaXSWmdjowTJ6u8Rc01PoYOGHXfP1mYcZNQ==", + "license": "MIT", + "dependencies": { + "debug": "^4.4.3", + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "etag": "^1.8.1", + "fresh": "^2.0.0", + "http-errors": "^2.0.1", + "mime-types": "^3.0.2", + "ms": "^2.1.3", + "on-finished": "^2.4.1", + "range-parser": "^1.2.1", + "statuses": "^2.0.2" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/serve-static": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-2.2.1.tgz", + "integrity": "sha512-xRXBn0pPqQTVQiC8wyQrKs2MOlX24zQ0POGaj0kultvoOCstBQM5yvOhAVSUwOMjQtTvsPWoNCHfPGwaaQJhTw==", + "license": "MIT", + "dependencies": { + "encodeurl": "^2.0.0", + "escape-html": "^1.0.3", + "parseurl": "^1.3.3", + "send": "^1.2.0" + }, + "engines": { + "node": ">= 18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/set-cookie-parser": { + "version": "2.7.2", + "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.2.tgz", + "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==", + "license": "MIT" + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", + "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", + "license": "ISC" + }, + "node_modules/shadcn": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/shadcn/-/shadcn-4.2.0.tgz", + "integrity": "sha512-ZDuV340itidaUd4Gi1BxQX+Y7Ush6BHp6URZBM2RyxUUBZ6yFtOWIr4nVY+Ro+YRSpo82v7JrsmtcU5xoBCMJQ==", + "license": "MIT", + "dependencies": { + "@babel/core": "^7.28.0", + "@babel/parser": "^7.28.0", + "@babel/plugin-transform-typescript": "^7.28.0", + "@babel/preset-typescript": "^7.27.1", + "@dotenvx/dotenvx": "^1.48.4", + "@modelcontextprotocol/sdk": "^1.26.0", + "@types/validate-npm-package-name": "^4.0.2", + "browserslist": "^4.26.2", + "commander": "^14.0.0", + "cosmiconfig": "^9.0.0", + "dedent": "^1.6.0", + "deepmerge": "^4.3.1", + "diff": "^8.0.2", + "execa": "^9.6.0", + "fast-glob": "^3.3.3", + "fs-extra": "^11.3.1", + "fuzzysort": "^3.1.0", + "https-proxy-agent": "^7.0.6", + "kleur": "^4.1.5", + "msw": "^2.10.4", + "node-fetch": "^3.3.2", + "open": "^11.0.0", + "ora": "^8.2.0", + "postcss": "^8.5.6", + "postcss-selector-parser": "^7.1.0", + "prompts": "^2.4.2", + "recast": "^0.23.11", + "stringify-object": "^5.0.0", + "tailwind-merge": "^3.0.1", + "ts-morph": "^26.0.0", + "tsconfig-paths": "^4.2.0", + "validate-npm-package-name": "^7.0.1", + "zod": "^3.24.1", + "zod-to-json-schema": "^3.24.6" + }, + "bin": { + "shadcn": "dist/index.js" + } + }, + "node_modules/shadcn/node_modules/zod": { + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/side-channel": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", + "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.3", + "side-channel-list": "^1.0.0", + "side-channel-map": "^1.0.1", + "side-channel-weakmap": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-list": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.1.tgz", + "integrity": "sha512-mjn/0bi/oUURjc5Xl7IaWi/OJJJumuoJFQJfDDyO46+hBWsfaVM65TBHq2eoZBhzl9EchxOijpkbRC8SVBQU0w==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "object-inspect": "^1.13.4" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-map": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", + "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel-weakmap": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", + "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.5", + "object-inspect": "^1.13.3", + "side-channel-map": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", + "license": "MIT" + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/statuses": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", + "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/stdin-discarder": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/stdin-discarder/-/stdin-discarder-0.2.2.tgz", + "integrity": "sha512-UhDfHmA92YAlNnCfhmq0VeNL5bDbiZGg7sZ2IvPsXubGkiNa9EC+tUTsjBRsYUAz87btI6/1wf4XoVvQ3uRnmQ==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strict-event-emitter": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/strict-event-emitter/-/strict-event-emitter-0.5.1.tgz", + "integrity": "sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ==", + "license": "MIT" + }, + "node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/stringify-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-5.0.0.tgz", + "integrity": "sha512-zaJYxz2FtcMb4f+g60KsRNFOpVMUyuJgA51Zi5Z1DOTC3S59+OQiVOzE9GZt0x72uBGWKsQIuBKeF9iusmKFsg==", + "license": "BSD-2-Clause", + "dependencies": { + "get-own-enumerable-keys": "^1.0.0", + "is-obj": "^3.0.0", + "is-regexp": "^3.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/yeoman/stringify-object?sponsor=1" + } + }, + "node_modules/strip-ansi": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.2.0.tgz", + "integrity": "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.2.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-final-newline": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-4.0.0.tgz", + "integrity": "sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/tabbable": { + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/tabbable/-/tabbable-6.4.0.tgz", + "integrity": "sha512-05PUHKSNE8ou2dwIxTngl4EzcnsCDZGJ/iCLtDflR/SHB/ny14rXc+qU5P4mG9JkusiV7EivzY9Mhm55AzAvCg==", + "license": "MIT" + }, + "node_modules/tagged-tag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/tagged-tag/-/tagged-tag-1.0.0.tgz", + "integrity": "sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng==", + "license": "MIT", + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/tailwind-merge": { + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.5.0.tgz", + "integrity": "sha512-I8K9wewnVDkL1NTGoqWmVEIlUcB9gFriAEkXkfCjX5ib8ezGxtR3xD7iZIxrfArjEsH7F1CHD4RFUtxefdqV/A==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/dcastil" + } + }, + "node_modules/tailwindcss": { + "version": "4.2.2", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.2.2.tgz", + "integrity": "sha512-KWBIxs1Xb6NoLdMVqhbhgwZf2PGBpPEiwOqgI4pFIYbNTfBXiKYyWoTsXgBQ9WFg/OlhnvHaY+AEpW7wSmFo2Q==", + "license": "MIT" + }, + "node_modules/tapable": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.2.tgz", + "integrity": "sha512-1MOpMXuhGzGL5TTCZFItxCc0AARf1EZFQkGqMm7ERKj8+Hgr5oLvJOVFcC+lRmR8hCe2S3jC4T5D7Vg/d7/fhA==", + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.16", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.16.tgz", + "integrity": "sha512-pn99VhoACYR8nFHhxqix+uvsbXineAasWm5ojXoN8xEwK5Kd3/TrhNn1wByuD52UxWRLy8pu+kRMniEi6Eq9Zg==", + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.4" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tldts": { + "version": "7.0.28", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.28.tgz", + "integrity": "sha512-+Zg3vWhRUv8B1maGSTFdev9mjoo8Etn2Ayfs4cnjlD3CsGkxXX4QyW3j2WJ0wdjYcYmy7Lx2RDsZMhgCWafKIw==", + "license": "MIT", + "dependencies": { + "tldts-core": "^7.0.28" + }, + "bin": { + "tldts": "bin/cli.js" + } + }, + "node_modules/tldts-core": { + "version": "7.0.28", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.28.tgz", + "integrity": "sha512-7W5Efjhsc3chVdFhqtaU0KtK32J37Zcr9RKtID54nG+tIpcY79CQK/veYPODxtD/LJ4Lue66jvrQzIX2Z2/pUQ==", + "license": "MIT" + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", + "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/tough-cookie": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-6.0.1.tgz", + "integrity": "sha512-LktZQb3IeoUWB9lqR5EWTHgW/VTITCXg4D21M+lvybRVdylLrRMnqaIONLVb5mav8vM19m44HIcGq4qASeu2Qw==", + "license": "BSD-3-Clause", + "dependencies": { + "tldts": "^7.0.5" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/ts-api-utils": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.5.0.tgz", + "integrity": "sha512-OJ/ibxhPlqrMM0UiNHJ/0CKQkoKF243/AEmplt3qpRgkW8VG7IfOS41h7V8TjITqdByHzrjcS/2si+y4lIh8NA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, + "node_modules/ts-morph": { + "version": "26.0.0", + "resolved": "https://registry.npmjs.org/ts-morph/-/ts-morph-26.0.0.tgz", + "integrity": "sha512-ztMO++owQnz8c/gIENcM9XfCEzgoGphTv+nKpYNM1bgsdOVC/jRZuEBf6N+mLLDNg68Kl+GgUZfOySaRiG1/Ug==", + "license": "MIT", + "dependencies": { + "@ts-morph/common": "~0.27.0", + "code-block-writer": "^13.0.3" + } + }, + "node_modules/tsconfig-paths": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-4.2.0.tgz", + "integrity": "sha512-NoZ4roiN7LnbKn9QqE1amc9DJfzvZXxF4xDavcOWt1BPkdx+m+0gJuPM+S0vCe7zTJMYUP0R8pO2XMr+Y8oLIg==", + "license": "MIT", + "dependencies": { + "json5": "^2.2.2", + "minimist": "^1.2.6", + "strip-bom": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "license": "0BSD" + }, + "node_modules/tw-animate-css": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/tw-animate-css/-/tw-animate-css-1.4.0.tgz", + "integrity": "sha512-7bziOlRqH0hJx80h/3mbicLW7o8qLsH5+RaLR2t+OHM3D0JlWGODQKQ4cxbK7WlvmUxpcj6Kgu6EKqjrGFe3QQ==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/Wombosvideo" + } + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-5.5.0.tgz", + "integrity": "sha512-PlBfpQwiUvGViBNX84Yxwjsdhd1TUlXr6zjX7eoirtCPIr08NAmxwa+fcYBTeRQxHo9YC9wwF3m9i700sHma8g==", + "license": "(MIT OR CC0-1.0)", + "dependencies": { + "tagged-tag": "^1.0.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/type-is": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-2.0.1.tgz", + "integrity": "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==", + "license": "MIT", + "dependencies": { + "content-type": "^1.0.5", + "media-typer": "^1.1.0", + "mime-types": "^3.0.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/typescript": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-6.0.2.tgz", + "integrity": "sha512-bGdAIrZ0wiGDo5l8c++HWtbaNCWTS4UTv7RaTH/ThVIgjkveJt83m74bBHMJkuCbslY8ixgLBVZJIOiQlQTjfQ==", + "devOptional": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/typescript-eslint": { + "version": "8.58.1", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.58.1.tgz", + "integrity": "sha512-gf6/oHChByg9HJvhMO1iBexJh12AqqTfnuxscMDOVqfJW3htsdRJI/GfPpHTTcyeB8cSTUY2JcZmVgoyPqcrDg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/eslint-plugin": "8.58.1", + "@typescript-eslint/parser": "8.58.1", + "@typescript-eslint/typescript-estree": "8.58.1", + "@typescript-eslint/utils": "8.58.1" + }, + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.57.0 || ^9.0.0 || ^10.0.0", + "typescript": ">=4.8.4 <6.1.0" + } + }, + "node_modules/undici-types": { + "version": "7.16.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz", + "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", + "license": "MIT" + }, + "node_modules/unicorn-magic": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.3.0.tgz", + "integrity": "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/universalify": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", + "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", + "license": "MIT", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", + "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/until-async": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/until-async/-/until-async-3.0.2.tgz", + "integrity": "sha512-IiSk4HlzAMqTUseHHe3VhIGyuFmN90zMTpD3Z3y8jeQbzLIq500MVM7Jq2vUAnTKAFPJrqwkzr6PoTcPhGcOiw==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/kettanaito" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/use-sync-external-store": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz", + "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==", + "license": "MIT", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, + "node_modules/validate-npm-package-name": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/validate-npm-package-name/-/validate-npm-package-name-7.0.2.tgz", + "integrity": "sha512-hVDIBwsRruT73PbK7uP5ebUt+ezEtCmzZz3F59BSr2F6OVFnJ/6h8liuvdLrQ88Xmnk6/+xGGuq+pG9WwTuy3A==", + "license": "ISC", + "engines": { + "node": "^20.17.0 || >=22.9.0" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", + "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/vite": { + "version": "8.0.8", + "resolved": "https://registry.npmjs.org/vite/-/vite-8.0.8.tgz", + "integrity": "sha512-dbU7/iLVa8KZALJyLOBOQ88nOXtNG8vxKuOT4I2mD+Ya70KPceF4IAmDsmU0h1Qsn5bPrvsY9HJstCRh3hG6Uw==", + "license": "MIT", + "dependencies": { + "lightningcss": "^1.32.0", + "picomatch": "^4.0.4", + "postcss": "^8.5.8", + "rolldown": "1.0.0-rc.15", + "tinyglobby": "^0.2.15" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^20.19.0 || >=22.12.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^20.19.0 || >=22.12.0", + "@vitejs/devtools": "^0.1.0", + "esbuild": "^0.27.0 || ^0.28.0", + "jiti": ">=1.21.0", + "less": "^4.0.0", + "sass": "^1.70.0", + "sass-embedded": "^1.70.0", + "stylus": ">=0.54.8", + "sugarss": "^5.0.0", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "@vitejs/devtools": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/web-streams-polyfill": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", + "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/wrap-ansi/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "license": "ISC" + }, + "node_modules/ws": { + "version": "8.20.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.20.0.tgz", + "integrity": "sha512-sAt8BhgNbzCtgGbt2OxmpuryO63ZoDk/sqaB/znQm94T4fCEsy/yV+7CdC1kJhOU9lboAEU7R3kquuycDoibVA==", + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/wsl-utils": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/wsl-utils/-/wsl-utils-0.3.1.tgz", + "integrity": "sha512-g/eziiSUNBSsdDJtCLB8bdYEUMj4jR7AGeUo96p/3dTafgjHhpF4RiCFPiRILwjQoDXx5MqkBr4fwWtR3Ky4Wg==", + "license": "MIT", + "dependencies": { + "is-wsl": "^3.1.0", + "powershell-utils": "^0.1.0" + }, + "engines": { + "node": ">=20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "license": "ISC" + }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "license": "MIT", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/yargs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yocto-spinner": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/yocto-spinner/-/yocto-spinner-1.1.0.tgz", + "integrity": "sha512-/BY0AUXnS7IKO354uLLA2eRcWiqDifEbd6unXCsOxkFDAkhgUL3PH9X2bFoaU0YchnDXsF+iKleeTLJGckbXfA==", + "license": "MIT", + "dependencies": { + "yoctocolors": "^2.1.1" + }, + "engines": { + "node": ">=18.19" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yoctocolors": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/yoctocolors/-/yoctocolors-2.1.2.tgz", + "integrity": "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yoctocolors-cjs": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/yoctocolors-cjs/-/yoctocolors-cjs-2.1.3.tgz", + "integrity": "sha512-U/PBtDf35ff0D8X8D0jfdzHYEPFxAI7jJlxZXwCSez5M3190m+QobIfh+sWDWSHMCWWJN2AWamkegn6vr6YBTw==", + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zod": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz", + "integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zod-to-json-schema": { + "version": "3.25.2", + "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.25.2.tgz", + "integrity": "sha512-O/PgfnpT1xKSDeQYSCfRI5Gy3hPf91mKVDuYLUHZJMiDFptvP41MSnWofm8dnCm0256ZNfZIM7DSzuSMAFnjHA==", + "license": "ISC", + "peerDependencies": { + "zod": "^3.25.28 || ^4" + } + }, + "node_modules/zod-validation-error": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/zod-validation-error/-/zod-validation-error-4.0.2.tgz", + "integrity": "sha512-Q6/nZLe6jxuU80qb/4uJ4t5v2VEZ44lzQjPDhYJNztRQ4wyWc6VF3D3Kb/fAuPetZQnhS3hnajCf9CsWesghLQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18.0.0" + }, + "peerDependencies": { + "zod": "^3.25.0 || ^4.0.0" + } + } + } +} diff --git a/web/package.json b/web/package.json new file mode 100644 index 0000000..c8d3026 --- /dev/null +++ b/web/package.json @@ -0,0 +1,42 @@ +{ + "name": "web", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc -b && vite build", + "lint": "eslint .", + "preview": "vite preview" + }, + "dependencies": { + "@base-ui/react": "^1.3.0", + "@fontsource-variable/geist": "^5.2.8", + "@supabase/supabase-js": "^2.103.0", + "@tailwindcss/vite": "^4.2.2", + "class-variance-authority": "^0.7.1", + "clsx": "^2.1.1", + "lucide-react": "^1.8.0", + "react": "^19.2.4", + "react-dom": "^19.2.4", + "react-router-dom": "^7.14.0", + "shadcn": "^4.2.0", + "tailwind-merge": "^3.5.0", + "tailwindcss": "^4.2.2", + "tw-animate-css": "^1.4.0" + }, + "devDependencies": { + "@eslint/js": "^9.39.4", + "@types/node": "^24.12.2", + "@types/react": "^19.2.14", + "@types/react-dom": "^19.2.3", + "@vitejs/plugin-react": "^6.0.1", + "eslint": "^9.39.4", + "eslint-plugin-react-hooks": "^7.0.1", + "eslint-plugin-react-refresh": "^0.5.2", + "globals": "^17.4.0", + "typescript": "~6.0.2", + "typescript-eslint": "^8.58.0", + "vite": "^8.0.4" + } +} diff --git a/web/public/favicon.svg b/web/public/favicon.svg new file mode 100644 index 0000000..ce4e815 --- /dev/null +++ b/web/public/favicon.svg @@ -0,0 +1 @@ +🎯 diff --git a/web/public/icons.svg b/web/public/icons.svg new file mode 100644 index 0000000..e952219 --- /dev/null +++ b/web/public/icons.svg @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/web/src/App.tsx b/web/src/App.tsx new file mode 100644 index 0000000..e9ef88b --- /dev/null +++ b/web/src/App.tsx @@ -0,0 +1,25 @@ +import { BrowserRouter, Routes, Route } from "react-router-dom"; +import { Layout } from "@/components/Layout"; +import { ToolsPage } from "@/pages/ToolsPage"; +import { ToolDetailPage } from "@/pages/ToolDetailPage"; +import { LibrariesPage } from "@/pages/LibrariesPage"; +import { RecentPage } from "@/pages/RecentPage"; +import { ScriptsPage } from "@/pages/ScriptsPage"; +import { BuildLibraryPage } from "@/pages/BuildLibraryPage"; + +export default function App() { + return ( + + + }> + } /> + } /> + } /> + } /> + } /> + } /> + + + + ); +} diff --git a/web/src/components/Layout.tsx b/web/src/components/Layout.tsx new file mode 100644 index 0000000..4b0506f --- /dev/null +++ b/web/src/components/Layout.tsx @@ -0,0 +1,54 @@ +import { Link, Outlet, useLocation } from "react-router-dom"; +import { cn } from "@/lib/utils"; + +const navItems = [ + { to: "/", label: "Tools" }, + { to: "/libraries", label: "Libraries" }, + { to: "/scripts", label: "Scripts" }, + { to: "/build-library", label: "Build Library" }, +]; + +export function Layout() { + const location = useLocation(); + + return ( +
+
+
+ + Datum + + + + Grace Engineering + +
+
+
+ +
+
+ ); +} diff --git a/web/src/components/ui/badge.tsx b/web/src/components/ui/badge.tsx new file mode 100644 index 0000000..b20959d --- /dev/null +++ b/web/src/components/ui/badge.tsx @@ -0,0 +1,52 @@ +import { mergeProps } from "@base-ui/react/merge-props" +import { useRender } from "@base-ui/react/use-render" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const badgeVariants = cva( + "group/badge inline-flex h-5 w-fit shrink-0 items-center justify-center gap-1 overflow-hidden rounded-4xl border border-transparent px-2 py-0.5 text-xs font-medium whitespace-nowrap transition-all focus-visible:border-ring focus-visible:ring-[3px] focus-visible:ring-ring/50 has-data-[icon=inline-end]:pr-1.5 has-data-[icon=inline-start]:pl-1.5 aria-invalid:border-destructive aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 [&>svg]:pointer-events-none [&>svg]:size-3!", + { + variants: { + variant: { + default: "bg-primary text-primary-foreground [a]:hover:bg-primary/80", + secondary: + "bg-secondary text-secondary-foreground [a]:hover:bg-secondary/80", + destructive: + "bg-destructive/10 text-destructive focus-visible:ring-destructive/20 dark:bg-destructive/20 dark:focus-visible:ring-destructive/40 [a]:hover:bg-destructive/20", + outline: + "border-border text-foreground [a]:hover:bg-muted [a]:hover:text-muted-foreground", + ghost: + "hover:bg-muted hover:text-muted-foreground dark:hover:bg-muted/50", + link: "text-primary underline-offset-4 hover:underline", + }, + }, + defaultVariants: { + variant: "default", + }, + } +) + +function Badge({ + className, + variant = "default", + render, + ...props +}: useRender.ComponentProps<"span"> & VariantProps) { + return useRender({ + defaultTagName: "span", + props: mergeProps<"span">( + { + className: cn(badgeVariants({ variant }), className), + }, + props + ), + render, + state: { + slot: "badge", + variant, + }, + }) +} + +export { Badge, badgeVariants } diff --git a/web/src/components/ui/button.tsx b/web/src/components/ui/button.tsx new file mode 100644 index 0000000..09df753 --- /dev/null +++ b/web/src/components/ui/button.tsx @@ -0,0 +1,58 @@ +import { Button as ButtonPrimitive } from "@base-ui/react/button" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const buttonVariants = cva( + "group/button inline-flex shrink-0 items-center justify-center rounded-lg border border-transparent bg-clip-padding text-sm font-medium whitespace-nowrap transition-all outline-none select-none focus-visible:border-ring focus-visible:ring-3 focus-visible:ring-ring/50 active:not-aria-[haspopup]:translate-y-px disabled:pointer-events-none disabled:opacity-50 aria-invalid:border-destructive aria-invalid:ring-3 aria-invalid:ring-destructive/20 dark:aria-invalid:border-destructive/50 dark:aria-invalid:ring-destructive/40 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4", + { + variants: { + variant: { + default: "bg-primary text-primary-foreground [a]:hover:bg-primary/80", + outline: + "border-border bg-background hover:bg-muted hover:text-foreground aria-expanded:bg-muted aria-expanded:text-foreground dark:border-input dark:bg-input/30 dark:hover:bg-input/50", + secondary: + "bg-secondary text-secondary-foreground hover:bg-secondary/80 aria-expanded:bg-secondary aria-expanded:text-secondary-foreground", + ghost: + "hover:bg-muted hover:text-foreground aria-expanded:bg-muted aria-expanded:text-foreground dark:hover:bg-muted/50", + destructive: + "bg-destructive/10 text-destructive hover:bg-destructive/20 focus-visible:border-destructive/40 focus-visible:ring-destructive/20 dark:bg-destructive/20 dark:hover:bg-destructive/30 dark:focus-visible:ring-destructive/40", + link: "text-primary underline-offset-4 hover:underline", + }, + size: { + default: + "h-8 gap-1.5 px-2.5 has-data-[icon=inline-end]:pr-2 has-data-[icon=inline-start]:pl-2", + xs: "h-6 gap-1 rounded-[min(var(--radius-md),10px)] px-2 text-xs in-data-[slot=button-group]:rounded-lg has-data-[icon=inline-end]:pr-1.5 has-data-[icon=inline-start]:pl-1.5 [&_svg:not([class*='size-'])]:size-3", + sm: "h-7 gap-1 rounded-[min(var(--radius-md),12px)] px-2.5 text-[0.8rem] in-data-[slot=button-group]:rounded-lg has-data-[icon=inline-end]:pr-1.5 has-data-[icon=inline-start]:pl-1.5 [&_svg:not([class*='size-'])]:size-3.5", + lg: "h-9 gap-1.5 px-2.5 has-data-[icon=inline-end]:pr-2 has-data-[icon=inline-start]:pl-2", + icon: "size-8", + "icon-xs": + "size-6 rounded-[min(var(--radius-md),10px)] in-data-[slot=button-group]:rounded-lg [&_svg:not([class*='size-'])]:size-3", + "icon-sm": + "size-7 rounded-[min(var(--radius-md),12px)] in-data-[slot=button-group]:rounded-lg", + "icon-lg": "size-9", + }, + }, + defaultVariants: { + variant: "default", + size: "default", + }, + } +) + +function Button({ + className, + variant = "default", + size = "default", + ...props +}: ButtonPrimitive.Props & VariantProps) { + return ( + + ) +} + +export { Button, buttonVariants } diff --git a/web/src/components/ui/card.tsx b/web/src/components/ui/card.tsx new file mode 100644 index 0000000..40cac5f --- /dev/null +++ b/web/src/components/ui/card.tsx @@ -0,0 +1,103 @@ +import * as React from "react" + +import { cn } from "@/lib/utils" + +function Card({ + className, + size = "default", + ...props +}: React.ComponentProps<"div"> & { size?: "default" | "sm" }) { + return ( +
img:first-child]:pt-0 data-[size=sm]:gap-3 data-[size=sm]:py-3 data-[size=sm]:has-data-[slot=card-footer]:pb-0 *:[img:first-child]:rounded-t-xl *:[img:last-child]:rounded-b-xl", + className + )} + {...props} + /> + ) +} + +function CardHeader({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function CardTitle({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function CardDescription({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function CardAction({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function CardContent({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +function CardFooter({ className, ...props }: React.ComponentProps<"div">) { + return ( +
+ ) +} + +export { + Card, + CardHeader, + CardFooter, + CardTitle, + CardAction, + CardDescription, + CardContent, +} diff --git a/web/src/components/ui/input.tsx b/web/src/components/ui/input.tsx new file mode 100644 index 0000000..7d21bab --- /dev/null +++ b/web/src/components/ui/input.tsx @@ -0,0 +1,20 @@ +import * as React from "react" +import { Input as InputPrimitive } from "@base-ui/react/input" + +import { cn } from "@/lib/utils" + +function Input({ className, type, ...props }: React.ComponentProps<"input">) { + return ( + + ) +} + +export { Input } diff --git a/web/src/components/ui/separator.tsx b/web/src/components/ui/separator.tsx new file mode 100644 index 0000000..6e1369e --- /dev/null +++ b/web/src/components/ui/separator.tsx @@ -0,0 +1,25 @@ +"use client" + +import { Separator as SeparatorPrimitive } from "@base-ui/react/separator" + +import { cn } from "@/lib/utils" + +function Separator({ + className, + orientation = "horizontal", + ...props +}: SeparatorPrimitive.Props) { + return ( + + ) +} + +export { Separator } diff --git a/web/src/components/ui/table.tsx b/web/src/components/ui/table.tsx new file mode 100644 index 0000000..ac9585e --- /dev/null +++ b/web/src/components/ui/table.tsx @@ -0,0 +1,114 @@ +import * as React from "react" + +import { cn } from "@/lib/utils" + +function Table({ className, ...props }: React.ComponentProps<"table">) { + return ( +
+
+ + ) +} + +function TableHeader({ className, ...props }: React.ComponentProps<"thead">) { + return ( + + ) +} + +function TableBody({ className, ...props }: React.ComponentProps<"tbody">) { + return ( + + ) +} + +function TableFooter({ className, ...props }: React.ComponentProps<"tfoot">) { + return ( + tr]:last:border-b-0", + className + )} + {...props} + /> + ) +} + +function TableRow({ className, ...props }: React.ComponentProps<"tr">) { + return ( + + ) +} + +function TableHead({ className, ...props }: React.ComponentProps<"th">) { + return ( +
+ ) +} + +function TableCell({ className, ...props }: React.ComponentProps<"td">) { + return ( + + ) +} + +function TableCaption({ + className, + ...props +}: React.ComponentProps<"caption">) { + return ( +
+ ) +} + +export { + Table, + TableHeader, + TableBody, + TableFooter, + TableHead, + TableRow, + TableCell, + TableCaption, +} diff --git a/web/src/components/ui/tabs.tsx b/web/src/components/ui/tabs.tsx new file mode 100644 index 0000000..2adaeb6 --- /dev/null +++ b/web/src/components/ui/tabs.tsx @@ -0,0 +1,80 @@ +import { Tabs as TabsPrimitive } from "@base-ui/react/tabs" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +function Tabs({ + className, + orientation = "horizontal", + ...props +}: TabsPrimitive.Root.Props) { + return ( + + ) +} + +const tabsListVariants = cva( + "group/tabs-list inline-flex w-fit items-center justify-center rounded-lg p-[3px] text-muted-foreground group-data-horizontal/tabs:h-8 group-data-vertical/tabs:h-fit group-data-vertical/tabs:flex-col data-[variant=line]:rounded-none", + { + variants: { + variant: { + default: "bg-muted", + line: "gap-1 bg-transparent", + }, + }, + defaultVariants: { + variant: "default", + }, + } +) + +function TabsList({ + className, + variant = "default", + ...props +}: TabsPrimitive.List.Props & VariantProps) { + return ( + + ) +} + +function TabsTrigger({ className, ...props }: TabsPrimitive.Tab.Props) { + return ( + + ) +} + +function TabsContent({ className, ...props }: TabsPrimitive.Panel.Props) { + return ( + + ) +} + +export { Tabs, TabsList, TabsTrigger, TabsContent, tabsListVariants } diff --git a/web/src/index.css b/web/src/index.css new file mode 100644 index 0000000..fb3c7e9 --- /dev/null +++ b/web/src/index.css @@ -0,0 +1,130 @@ +@import "tailwindcss"; +@import "tw-animate-css"; +@import "shadcn/tailwind.css"; +@import "@fontsource-variable/geist"; + +@custom-variant dark (&:is(.dark *)); + +@theme inline { + --font-heading: var(--font-sans); + --font-sans: 'Geist Variable', sans-serif; + --color-sidebar-ring: var(--sidebar-ring); + --color-sidebar-border: var(--sidebar-border); + --color-sidebar-accent-foreground: var(--sidebar-accent-foreground); + --color-sidebar-accent: var(--sidebar-accent); + --color-sidebar-primary-foreground: var(--sidebar-primary-foreground); + --color-sidebar-primary: var(--sidebar-primary); + --color-sidebar-foreground: var(--sidebar-foreground); + --color-sidebar: var(--sidebar); + --color-chart-5: var(--chart-5); + --color-chart-4: var(--chart-4); + --color-chart-3: var(--chart-3); + --color-chart-2: var(--chart-2); + --color-chart-1: var(--chart-1); + --color-ring: var(--ring); + --color-input: var(--input); + --color-border: var(--border); + --color-destructive: var(--destructive); + --color-accent-foreground: var(--accent-foreground); + --color-accent: var(--accent); + --color-muted-foreground: var(--muted-foreground); + --color-muted: var(--muted); + --color-secondary-foreground: var(--secondary-foreground); + --color-secondary: var(--secondary); + --color-primary-foreground: var(--primary-foreground); + --color-primary: var(--primary); + --color-popover-foreground: var(--popover-foreground); + --color-popover: var(--popover); + --color-card-foreground: var(--card-foreground); + --color-card: var(--card); + --color-foreground: var(--foreground); + --color-background: var(--background); + --radius-sm: calc(var(--radius) * 0.6); + --radius-md: calc(var(--radius) * 0.8); + --radius-lg: var(--radius); + --radius-xl: calc(var(--radius) * 1.4); + --radius-2xl: calc(var(--radius) * 1.8); + --radius-3xl: calc(var(--radius) * 2.2); + --radius-4xl: calc(var(--radius) * 2.6); +} + +:root { + --background: oklch(1 0 0); + --foreground: oklch(0.145 0 0); + --card: oklch(1 0 0); + --card-foreground: oklch(0.145 0 0); + --popover: oklch(1 0 0); + --popover-foreground: oklch(0.145 0 0); + --primary: oklch(0.205 0 0); + --primary-foreground: oklch(0.985 0 0); + --secondary: oklch(0.97 0 0); + --secondary-foreground: oklch(0.205 0 0); + --muted: oklch(0.97 0 0); + --muted-foreground: oklch(0.556 0 0); + --accent: oklch(0.97 0 0); + --accent-foreground: oklch(0.205 0 0); + --destructive: oklch(0.577 0.245 27.325); + --border: oklch(0.922 0 0); + --input: oklch(0.922 0 0); + --ring: oklch(0.708 0 0); + --chart-1: oklch(0.87 0 0); + --chart-2: oklch(0.556 0 0); + --chart-3: oklch(0.439 0 0); + --chart-4: oklch(0.371 0 0); + --chart-5: oklch(0.269 0 0); + --radius: 0.625rem; + --sidebar: oklch(0.985 0 0); + --sidebar-foreground: oklch(0.145 0 0); + --sidebar-primary: oklch(0.205 0 0); + --sidebar-primary-foreground: oklch(0.985 0 0); + --sidebar-accent: oklch(0.97 0 0); + --sidebar-accent-foreground: oklch(0.205 0 0); + --sidebar-border: oklch(0.922 0 0); + --sidebar-ring: oklch(0.708 0 0); +} + +.dark { + --background: oklch(0.145 0 0); + --foreground: oklch(0.985 0 0); + --card: oklch(0.205 0 0); + --card-foreground: oklch(0.985 0 0); + --popover: oklch(0.205 0 0); + --popover-foreground: oklch(0.985 0 0); + --primary: oklch(0.922 0 0); + --primary-foreground: oklch(0.205 0 0); + --secondary: oklch(0.269 0 0); + --secondary-foreground: oklch(0.985 0 0); + --muted: oklch(0.269 0 0); + --muted-foreground: oklch(0.708 0 0); + --accent: oklch(0.269 0 0); + --accent-foreground: oklch(0.985 0 0); + --destructive: oklch(0.704 0.191 22.216); + --border: oklch(1 0 0 / 10%); + --input: oklch(1 0 0 / 15%); + --ring: oklch(0.556 0 0); + --chart-1: oklch(0.87 0 0); + --chart-2: oklch(0.556 0 0); + --chart-3: oklch(0.439 0 0); + --chart-4: oklch(0.371 0 0); + --chart-5: oklch(0.269 0 0); + --sidebar: oklch(0.205 0 0); + --sidebar-foreground: oklch(0.985 0 0); + --sidebar-primary: oklch(0.488 0.243 264.376); + --sidebar-primary-foreground: oklch(0.985 0 0); + --sidebar-accent: oklch(0.269 0 0); + --sidebar-accent-foreground: oklch(0.985 0 0); + --sidebar-border: oklch(1 0 0 / 10%); + --sidebar-ring: oklch(0.556 0 0); +} + +@layer base { + * { + @apply border-border outline-ring/50; + } + body { + @apply bg-background text-foreground; + } + html { + @apply font-sans; + } +} \ No newline at end of file diff --git a/web/src/lib/supabase.ts b/web/src/lib/supabase.ts new file mode 100644 index 0000000..41d7b28 --- /dev/null +++ b/web/src/lib/supabase.ts @@ -0,0 +1,12 @@ +import { createClient } from "@supabase/supabase-js"; + +const supabaseUrl = import.meta.env.VITE_SUPABASE_URL as string; +const supabaseAnonKey = import.meta.env.VITE_SUPABASE_ANON_KEY as string; + +if (!supabaseUrl || !supabaseAnonKey) { + throw new Error( + "Missing VITE_SUPABASE_URL or VITE_SUPABASE_ANON_KEY in environment" + ); +} + +export const supabase = createClient(supabaseUrl, supabaseAnonKey); diff --git a/web/src/lib/types.ts b/web/src/lib/types.ts new file mode 100644 index 0000000..9b51966 --- /dev/null +++ b/web/src/lib/types.ts @@ -0,0 +1,102 @@ +export interface Library { + id: string; + library_name: string; + vendor: string | null; + file_path: string | null; + file_hash: string | null; + tool_count: number; + unit_original: string | null; + source_modified_at: string | null; + ingested_at: string; + created_at: string; +} + +export interface Tool { + id: string; + fusion_guid: string; + library_id: string | null; + + // Identity + vendor: string; + product_id: string; + description: string; + type: string; + bmc: string | null; + grade: string | null; + unit_original: string | null; + product_link: string | null; + + // Geometry (mm) + geo_dc: number | null; + geo_nof: number | null; + geo_oal: number | null; + geo_lcf: number | null; + geo_lb: number | null; + geo_sfdm: number | null; + geo_sig: number | null; + geo_re: number | null; + + // Post-process + pp_number: number | null; + pp_comment: string | null; + + // Plex sync + plex_supply_item_id: string | null; + plex_synced_at: string | null; + + // Inventory qty (populated by datum-sync-inventory) + qty_on_hand: number | null; + qty_tracked: boolean | null; + qty_synced_at: string | null; + + // Timestamps + created_at: string; + updated_at: string; + + // Joined + libraries?: Pick | null; +} + +export interface PlexSupplyItem { + fusion_guid: string; + category: string; + description: string | null; + item_group: string | null; + inventory_unit: string; + supply_item_number: string | null; + item_type: string; + plex_id: string | null; + posted_to_plex_at: string | null; +} + +export interface ReferenceRow { + id: string; + catalog_name: string; + vendor: string; + product_id: string; + description: string; + type: string; + geo_dc: number | null; + geo_nof: number | null; + geo_oal: number | null; + geo_lcf: number | null; + geo_sig: number | null; + unit_original: string | null; +} + +export interface CuttingPreset { + id: string; + tool_id: string; + preset_guid: string | null; + name: string | null; + description: string | null; + material_category: string | null; + material_query: string | null; + v_c: number | null; + v_f: number | null; + f_z: number | null; + f_n: number | null; + n: number | null; + tool_coolant: string | null; + created_at: string; +} diff --git a/web/src/lib/utils.ts b/web/src/lib/utils.ts new file mode 100644 index 0000000..087c624 --- /dev/null +++ b/web/src/lib/utils.ts @@ -0,0 +1,31 @@ +import { clsx, type ClassValue } from "clsx" +import { twMerge } from "tailwind-merge" + +export function cn(...inputs: ClassValue[]) { + return twMerge(clsx(inputs)) +} + +/** + * Returns a short human-readable "time ago" string for an ISO timestamp. + * e.g. "just now", "5 min ago", "3 hours ago", "2 days ago", "3 weeks ago". + * Returns "—" for null/undefined/invalid input. + */ +export function relativeTime(iso: string | null | undefined, now = Date.now()): string { + if (!iso) return "—"; + const t = new Date(iso).getTime(); + if (isNaN(t)) return "—"; + const diff = Math.max(0, now - t); + const mins = Math.floor(diff / 60_000); + if (mins < 1) return "just now"; + if (mins < 60) return `${mins} min${mins === 1 ? "" : "s"} ago`; + const hrs = Math.floor(mins / 60); + if (hrs < 24) return `${hrs} hour${hrs === 1 ? "" : "s"} ago`; + const days = Math.floor(hrs / 24); + if (days < 7) return `${days} day${days === 1 ? "" : "s"} ago`; + const weeks = Math.floor(days / 7); + if (weeks < 5) return `${weeks} week${weeks === 1 ? "" : "s"} ago`; + const months = Math.floor(days / 30); + if (months < 12) return `${months} month${months === 1 ? "" : "s"} ago`; + const years = Math.floor(days / 365); + return `${years} year${years === 1 ? "" : "s"} ago`; +} diff --git a/web/src/main.tsx b/web/src/main.tsx new file mode 100644 index 0000000..12fa35b --- /dev/null +++ b/web/src/main.tsx @@ -0,0 +1,10 @@ +import { StrictMode } from "react"; +import { createRoot } from "react-dom/client"; +import "./index.css"; +import App from "./App"; + +createRoot(document.getElementById("root")!).render( + + + +); diff --git a/web/src/pages/BuildLibraryPage.tsx b/web/src/pages/BuildLibraryPage.tsx new file mode 100644 index 0000000..158cb90 --- /dev/null +++ b/web/src/pages/BuildLibraryPage.tsx @@ -0,0 +1,423 @@ +import { useEffect, useRef, useState } from "react"; +import { supabase } from "@/lib/supabase"; +import type { ReferenceRow } from "@/lib/types"; +import { Input } from "@/components/ui/input"; +import { Badge } from "@/components/ui/badge"; +import { Button } from "@/components/ui/button"; +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table"; + +const MM_PER_INCH = 25.4; +const PAGE_SIZE = 50; + +// ── Fusion 360 JSON export helpers ────────────────────── +function refToFusionTool(ref: ReferenceRow): Record { + const isInches = ref.unit_original?.toLowerCase() === "inches"; + const scale = isInches ? 1 / MM_PER_INCH : 1; // DB stores mm; convert back if original was inches + const unit = isInches ? "inches" : "millimeters"; + + return { + guid: crypto.randomUUID(), + type: ref.type, + unit, + vendor: ref.vendor, + "product-id": ref.product_id, + description: ref.description, + BMC: "carbide", + GRADE: "", + geometry: { + ...(ref.geo_dc != null && { DC: ref.geo_dc * scale }), + ...(ref.geo_nof != null && { NOF: ref.geo_nof }), + ...(ref.geo_oal != null && { OAL: ref.geo_oal * scale }), + ...(ref.geo_lcf != null && { LCF: ref.geo_lcf * scale }), + ...(ref.geo_sig != null && { SIG: ref.geo_sig }), + CSP: false, + HAND: true, + }, + "post-process": { + number: 0, + turret: 0, + "diameter-offset": 0, + "length-offset": 0, + live: true, + "break-control": false, + "manual-tool-change": false, + comment: "", + }, + "start-values": { presets: [] }, + expressions: {}, + }; +} + +function downloadJson(data: unknown, filename: string) { + const blob = new Blob([JSON.stringify(data, null, 2)], { type: "application/json" }); + const url = URL.createObjectURL(blob); + const a = document.createElement("a"); + a.href = url; + a.download = filename; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); +} + +// ── Multi-select dropdown (reused pattern) ────────────── +function FilterDropdown({ + label: dropLabel, + options, + selected, + onToggle, + onClear, +}: { + label: string; + options: string[]; + selected: Set; + onToggle: (val: string) => void; + onClear: () => void; +}) { + const [open, setOpen] = useState(false); + const ref = useRef(null); + + useEffect(() => { + function handleClick(e: MouseEvent) { + if (ref.current && !ref.current.contains(e.target as Node)) setOpen(false); + } + document.addEventListener("mousedown", handleClick); + return () => document.removeEventListener("mousedown", handleClick); + }, []); + + const display = + selected.size === 0 + ? dropLabel + : selected.size === 1 + ? [...selected][0] + : `${selected.size} selected`; + + return ( +
+ + {open && ( +
+ {selected.size > 0 && ( + + )} + {options.map((opt) => ( + + ))} +
+ )} +
+ ); +} + +// ── Main page ─────────────────────────────────────────── +export function BuildLibraryPage() { + const [results, setResults] = useState([]); + const [cart, setCart] = useState>(new Map()); + const [search, setSearch] = useState(""); + const [vendorFilter, setVendorFilter] = useState>(new Set()); + const [typeFilter, setTypeFilter] = useState>(new Set()); + const [loading, setLoading] = useState(false); + const [totalCount, setTotalCount] = useState(null); + const [page, setPage] = useState(0); + const [vendors, setVendors] = useState([]); + const [types, setTypes] = useState([]); + const [libraryName, setLibraryName] = useState("My Library"); + + // Fetch distinct vendors and types on mount + useEffect(() => { + async function fetchMeta() { + const [vRes, tRes] = await Promise.all([ + supabase.from("reference_catalog").select("vendor").limit(1000), + supabase.from("reference_catalog").select("type").limit(1000), + ]); + if (vRes.data) { + const unique = [...new Set(vRes.data.map((r: { vendor: string }) => r.vendor))].sort(); + setVendors(unique); + } + if (tRes.data) { + const unique = [...new Set(tRes.data.map((r: { type: string }) => r.type))].sort(); + setTypes(unique); + } + } + fetchMeta(); + }, []); + + // Search the catalog + useEffect(() => { + const timer = setTimeout(() => { + fetchResults(0); + }, 300); // debounce + return () => clearTimeout(timer); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [search, vendorFilter, typeFilter]); + + async function fetchResults(pageNum: number) { + setLoading(true); + setPage(pageNum); + + let query = supabase + .from("reference_catalog") + .select("*", { count: "exact" }) + .order("vendor") + .order("product_id") + .range(pageNum * PAGE_SIZE, (pageNum + 1) * PAGE_SIZE - 1); + + if (search.trim()) { + // Use ilike for text search on description and product_id + const q = `%${search.trim()}%`; + query = query.or(`description.ilike.${q},product_id.ilike.${q}`); + } + + if (vendorFilter.size > 0) { + query = query.in("vendor", [...vendorFilter]); + } + if (typeFilter.size > 0) { + query = query.in("type", [...typeFilter]); + } + + const { data, count, error } = await query; + if (error) { + console.error("Reference catalog query failed:", error); + } else { + setResults(data ?? []); + setTotalCount(count); + } + setLoading(false); + } + + function toggleCart(row: ReferenceRow) { + setCart((prev) => { + const next = new Map(prev); + if (next.has(row.id)) { + next.delete(row.id); + } else { + next.set(row.id, row); + } + return next; + }); + } + + function addAllVisible() { + setCart((prev) => { + const next = new Map(prev); + for (const r of results) next.set(r.id, r); + return next; + }); + } + + function handleExport() { + const tools = [...cart.values()].map(refToFusionTool); + downloadJson({ data: tools, version: 2 }, `${libraryName.replace(/\s+/g, "_")}.json`); + } + + function toggleFilter(set: Set, val: string, setter: (s: Set) => void) { + const next = new Set(set); + if (next.has(val)) next.delete(val); + else next.add(val); + setter(next); + } + + function fmtMm(val: number | null): string { + if (val == null) return "\u2014"; + return val.toFixed(2); + } + + const totalPages = totalCount != null ? Math.ceil(totalCount / PAGE_SIZE) : 0; + + return ( +
+
+

+ Build a Library{" "} + + {totalCount != null && `(${totalCount.toLocaleString()} tools)`} + +

+
+ setLibraryName(e.target.value)} + className="w-48" + /> + +
+
+ + {cart.size > 0 && ( +
+ {cart.size} tool{cart.size !== 1 ? "s" : ""} in library + +
+ )} + +
+ setSearch(e.target.value)} + className="max-w-sm" + /> + toggleFilter(vendorFilter, v, setVendorFilter)} + onClear={() => setVendorFilter(new Set())} + /> + toggleFilter(typeFilter, v, setTypeFilter)} + onClear={() => setTypeFilter(new Set())} + /> + {results.length > 0 && ( + + )} +
+ +
+ + + + + Description + Part # + Vendor + Type + Dia (mm) + OAL (mm) + Flutes + + + + {loading ? ( + + + Searching... + + + ) : results.length === 0 ? ( + + + {search || vendorFilter.size || typeFilter.size + ? "No tools match your search." + : "Search the reference catalog to find tools."} + + + ) : ( + results.map((row) => { + const inCart = cart.has(row.id); + return ( + toggleCart(row)} + > + + toggleCart(row)} + className="rounded" + /> + + + + {row.description} + + + {row.product_id} + {row.vendor} + + {row.type} + + {fmtMm(row.geo_dc)} + {fmtMm(row.geo_oal)} + {row.geo_nof ?? "\u2014"} + + ); + }) + )} + +
+
+ + {totalPages > 1 && ( +
+ + Page {page + 1} of {totalPages} + +
+ + +
+
+ )} +
+ ); +} diff --git a/web/src/pages/LibrariesPage.tsx b/web/src/pages/LibrariesPage.tsx new file mode 100644 index 0000000..4ba9958 --- /dev/null +++ b/web/src/pages/LibrariesPage.tsx @@ -0,0 +1,103 @@ +import { useEffect, useState } from "react"; +import { Link } from "react-router-dom"; +import { supabase } from "@/lib/supabase"; +import type { Library } from "@/lib/types"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { Badge } from "@/components/ui/badge"; +import { relativeTime } from "@/lib/utils"; + +export function LibrariesPage() { + const [libraries, setLibraries] = useState([]); + const [loading, setLoading] = useState(true); + + useEffect(() => { + async function fetchLibraries() { + const { data, error } = await supabase + .from("libraries") + .select("*") + .order("library_name"); + + if (error) { + console.error("Failed to fetch libraries:", error); + } else { + setLibraries(data ?? []); + } + setLoading(false); + } + fetchLibraries(); + }, []); + + if (loading) { + return
Loading libraries...
; + } + + const lastSyncIso = libraries.reduce((acc, lib) => { + if (!lib.ingested_at) return acc; + if (!acc || lib.ingested_at > acc) return lib.ingested_at; + return acc; + }, null); + + return ( +
+
+

+ Libraries{" "} + + ({libraries.length}) + +

+ {lastSyncIso && ( +
+ Last sync:{" "} + + {relativeTime(lastSyncIso)} + +
+ )} +
+ + {libraries.length === 0 ? ( + + + No libraries ingested yet. Run a sync to populate. + + + ) : ( +
+ {libraries.map((lib) => ( + + + + {lib.library_name} + + + {lib.vendor && ( +
+ Vendor + {lib.vendor} +
+ )} +
+ Tools + {lib.tool_count} +
+
+ Last synced + + {relativeTime(lib.ingested_at)} + +
+
+
+ + ))} +
+ )} +
+ ); +} diff --git a/web/src/pages/RecentPage.tsx b/web/src/pages/RecentPage.tsx new file mode 100644 index 0000000..e72993a --- /dev/null +++ b/web/src/pages/RecentPage.tsx @@ -0,0 +1,142 @@ +import { useEffect, useState } from "react"; +import { Link } from "react-router-dom"; +import { supabase } from "@/lib/supabase"; +import type { Tool } from "@/lib/types"; +import { Badge } from "@/components/ui/badge"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; + +const MM_PER_INCH = 25.4; +const STORAGE_KEY_IMPERIAL = "datum-imperial"; + +function readImperialPref(): boolean { + try { + return localStorage.getItem(STORAGE_KEY_IMPERIAL) !== "false"; + } catch { + return true; + } +} + +export function RecentPage() { + const [tools, setTools] = useState([]); + const [loading, setLoading] = useState(true); + const imperial = readImperialPref(); + + useEffect(() => { + async function fetchRecent() { + const { data, error } = await supabase + .from("tools") + .select("*, libraries(library_name, vendor, source_modified_at)") + .order("updated_at", { ascending: false }); + + if (error) { + console.error("Failed to fetch tools:", error); + setLoading(false); + return; + } + + const oneDayAgo = new Date(Date.now() - 24 * 60 * 60 * 1000).toISOString(); + const recent = (data ?? []).filter( + (t) => t.libraries?.source_modified_at && t.libraries.source_modified_at > oneDayAgo + ); + setTools(recent); + setLoading(false); + } + fetchRecent(); + }, []); + + function fmt(val: number | null): string { + if (val == null) return "\u2014"; + const v = imperial ? val / MM_PER_INCH : val; + return imperial ? v.toFixed(4) : v.toFixed(2); + } + + const dimUnit = imperial ? "in" : "mm"; + + if (loading) { + return
Loading...
; + } + + return ( +
+
+ + ← All tools + +
+ +

+ Recently Modified{" "} + + ({tools.length}) + +

+

+ Tools whose Fusion Hub library was modified in the last 24 hours. +

+ + {tools.length === 0 ? ( + + + No tools modified in the last 24 hours. + + + ) : ( +
+ {tools.map((tool) => ( + + + + + {tool.description || tool.product_id || "\u2014"} + + + +
+ {tool.type} + {tool.plex_supply_item_id ? ( + Synced + ) : ( + Local + )} +
+ {tool.vendor && ( +
+ Vendor + {tool.vendor} +
+ )} + {tool.product_id && ( +
+ Part # + {tool.product_id} +
+ )} + {tool.geo_dc != null && ( +
+ Diameter + {fmt(tool.geo_dc)} {dimUnit} +
+ )} + {tool.libraries?.library_name && ( +
+ Library + {tool.libraries.library_name} +
+ )} + {tool.libraries?.source_modified_at && ( +
+ Modified in Fusion + + {new Date(tool.libraries.source_modified_at).toLocaleString()} + +
+ )} +
+
+ + ))} +
+ )} +
+ ); +} diff --git a/web/src/pages/ScriptsPage.tsx b/web/src/pages/ScriptsPage.tsx new file mode 100644 index 0000000..f7dc1d7 --- /dev/null +++ b/web/src/pages/ScriptsPage.tsx @@ -0,0 +1,493 @@ +import { useEffect, useState } from "react"; +import { Link } from "react-router-dom"; +import { supabase } from "@/lib/supabase"; +import type { Tool } from "@/lib/types"; +import { Badge } from "@/components/ui/badge"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; + +const MM_PER_INCH = 25.4; + +interface RefMatch { + product_id: string; + vendor: string; + geo_oal: number | null; + description: string | null; + exact_oal: boolean; +} + +interface ToolFix extends Tool { + generatedDescription: string; + refMatches: RefMatch[]; + selectedRef: RefMatch | null; + accepted: boolean; + useVendor: string; + useProductId: string; +} + +// ─── Description generation ──────────────────────────────── + +function generateDescription(tool: Tool): string { + const isInches = tool.unit_original?.toLowerCase() === "inches"; + const unit = isInches ? '"' : "mm"; + const parts: string[] = []; + + if (tool.geo_dc != null) { + const val = isInches ? tool.geo_dc / MM_PER_INCH : tool.geo_dc; + parts.push(`${formatDim(val, isInches)}${unit}`); + } + if (tool.type) parts.push(tool.type); + if (tool.geo_nof != null) parts.push(`${tool.geo_nof}FL`); + if (tool.geo_oal != null) { + const val = isInches ? tool.geo_oal / MM_PER_INCH : tool.geo_oal; + parts.push(`${formatDim(val, isInches)}${unit} OAL`); + } + + return parts.join(" ").toUpperCase(); +} + +function formatDim(val: number, isInches: boolean): string { + if (isInches) { + const frac = toFraction(val); + if (frac) return frac; + return val.toFixed(4); + } + return val.toFixed(2); +} + +function toFraction(val: number): string | null { + const fractions: [number, string][] = [ + [1/64,"1/64"],[1/32,"1/32"],[3/64,"3/64"],[1/16,"1/16"], + [5/64,"5/64"],[3/32,"3/32"],[7/64,"7/64"],[1/8,"1/8"], + [9/64,"9/64"],[5/32,"5/32"],[11/64,"11/64"],[3/16,"3/16"], + [13/64,"13/64"],[7/32,"7/32"],[15/64,"15/64"],[1/4,"1/4"], + [17/64,"17/64"],[9/32,"9/32"],[19/64,"19/64"],[5/16,"5/16"], + [21/64,"21/64"],[11/32,"11/32"],[23/64,"23/64"],[3/8,"3/8"], + [25/64,"25/64"],[13/32,"13/32"],[27/64,"27/64"],[7/16,"7/16"], + [29/64,"29/64"],[15/32,"15/32"],[31/64,"31/64"],[1/2,"1/2"], + [33/64,"33/64"],[17/32,"17/32"],[35/64,"35/64"],[9/16,"9/16"], + [37/64,"37/64"],[19/32,"19/32"],[39/64,"39/64"],[5/8,"5/8"], + [41/64,"41/64"],[21/32,"21/32"],[43/64,"43/64"],[11/16,"11/16"], + [45/64,"45/64"],[23/32,"23/32"],[47/64,"47/64"],[3/4,"3/4"], + [49/64,"49/64"],[25/32,"25/32"],[51/64,"51/64"],[13/16,"13/16"], + [53/64,"53/64"],[27/32,"27/32"],[55/64,"55/64"],[7/8,"7/8"], + [57/64,"57/64"],[29/32,"29/32"],[59/64,"59/64"],[15/16,"15/16"], + [61/64,"61/64"],[31/32,"31/32"],[63/64,"63/64"], + [1,"1"],[1.5,"1-1/2"],[2,"2"],[2.5,"2-1/2"],[3,"3"],[4,"4"], + ]; + for (const [num, str] of fractions) { + if (Math.abs(val - num) < 0.0005) return str; + } + const whole = Math.floor(val); + if (whole >= 1 && val - whole > 0.001) { + const remainder = val - whole; + for (const [num, str] of fractions) { + if (Math.abs(remainder - num) < 0.0005) return `${whole}-${str}`; + } + } + return null; +} + +// ─── Reference catalog lookup ────────────────────────────── + +async function lookupRefs( + tools: Tool[] +): Promise> { + const result = new Map(); + + // Build unique geometry queries + const geometries = new Map(); + for (const t of tools) { + if (t.geo_dc == null || t.geo_nof == null) continue; + const key = `${t.type}|${t.geo_dc.toFixed(3)}|${t.geo_nof}`; + const list = geometries.get(key) ?? []; + list.push(t); + geometries.set(key, list); + } + + for (const [, group] of geometries) { + const t = group[0]; + const dc = t.geo_dc!; + const nof = t.geo_nof!; + + const { data } = await supabase + .from("reference_catalog") + .select("product_id, vendor, geo_oal, description") + .eq("type", t.type) + .gte("geo_dc", dc - 0.05) + .lte("geo_dc", dc + 0.05) + .eq("geo_nof", nof) + .limit(20); + + if (!data || data.length === 0) continue; + + for (const tool of group) { + // Sort matches: exact OAL first, then by OAL distance + const matches: RefMatch[] = data.map((r) => ({ + product_id: r.product_id, + vendor: r.vendor ?? "", + geo_oal: r.geo_oal, + description: r.description, + exact_oal: r.geo_oal != null && tool.geo_oal != null && + Math.abs(r.geo_oal - tool.geo_oal) < 0.5, + })); + + matches.sort((a, b) => { + if (a.exact_oal && !b.exact_oal) return -1; + if (!a.exact_oal && b.exact_oal) return 1; + const da = a.geo_oal != null && tool.geo_oal != null + ? Math.abs(a.geo_oal - tool.geo_oal) : 999; + const db = b.geo_oal != null && tool.geo_oal != null + ? Math.abs(b.geo_oal - tool.geo_oal) : 999; + return da - db; + }); + + result.set(tool.id, matches.slice(0, 5)); + } + } + + return result; +} + +// ─── Fusion script builder ───────────────────────────────── + +function buildFusionScript(tools: ToolFix[]): string { + const accepted = tools.filter((t) => t.accepted); + if (accepted.length === 0) return ""; + + const byLibrary = new Set(accepted.map((t) => t.libraries?.library_name ?? "Unknown")); + + const entries = accepted.map((t) => { + const desc = t.generatedDescription.replace(/"/g, '\\"'); + const vendor = (t.useVendor || "MSC").replace(/"/g, '\\"'); + const pid = (t.useProductId || "").replace(/"/g, '\\"'); + return ` "${t.fusion_guid}": {"description": "${desc}", "vendor": "${vendor}", "product_id": "${pid}"},`; + }).join("\n"); + + return `# ───────────────────────────────────────────────────────────── +# Datum — Fill Missing Tool Data +# Generated by Datum UI (${new Date().toISOString().slice(0, 10)}) +# ───────────────────────────────────────────────────────────── +# Paste into Fusion 360: Utilities → Scripts and Add-Ins → + +# Create a new script, paste into the .py file, and Run. +# +# Libraries: ${[...byLibrary].join(", ")} +# Tools to update: ${accepted.length} +# ───────────────────────────────────────────────────────────── + +import adsk.core +import adsk.cam +import traceback + +UPDATES = { +${entries} +} + +FIELD_MAP = { + "description": "description", + "vendor": "vendor", + "product_id": "product-id", +} + +def run(context): + app = adsk.core.Application.get() + ui = app.userInterface + updated = 0 + + try: + camMgr = adsk.cam.CAMManager.get() + libMgr = camMgr.libraryManager + toolLibs = libMgr.toolLibraries + + for url in toolLibs.urls: + lib = toolLibs.toolLibraryAtURL(url) + lib_changed = False + + for i in range(lib.count): + tool = lib.item(i) + guid_param = tool.parameters.itemByName("guid") + if not guid_param: + continue + guid = guid_param.value.stringValue + if guid not in UPDATES: + continue + + fields = UPDATES[guid] + for key, fusion_name in FIELD_MAP.items(): + val = fields.get(key, "") + if not val: + continue + param = tool.parameters.itemByName(fusion_name) + if param: + param.value.stringValue = val + lib_changed = True + + updated += 1 + + if lib_changed: + toolLibs.replaceToolLibrary(url, lib) + + ui.messageBox( + f"Done! Updated {updated} of {len(UPDATES)} tools.\\n" + f"Let Fusion sync to the cloud, then run a Datum nightly sync.", + "Datum — Tool Update" + ) + + except Exception: + ui.messageBox("Error:\\n" + traceback.format_exc(), "Datum Script Error") +`; +} + +// ─── Component ───────────────────────────────────────────── + +export function ScriptsPage() { + const [tools, setTools] = useState([]); + const [loading, setLoading] = useState(true); + const [copied, setCopied] = useState(false); + + useEffect(() => { + async function fetchData() { + const { data, error } = await supabase + .from("tools") + .select("*, libraries(library_name, vendor, source_modified_at)") + .or("description.is.null,description.eq.") + .order("type") + .order("geo_dc"); + + if (error) { + console.error("Failed to fetch tools:", error); + setLoading(false); + return; + } + + const rawTools = data ?? []; + const refs = await lookupRefs(rawTools); + + const fixes: ToolFix[] = rawTools.map((t) => { + const matches = refs.get(t.id) ?? []; + const best = matches.find((m) => m.exact_oal) ?? matches[0] ?? null; + return { + ...t, + generatedDescription: generateDescription(t), + refMatches: matches, + selectedRef: best, + accepted: true, + useVendor: best?.vendor || "MSC", + useProductId: best?.product_id || "", + }; + }); + + setTools(fixes); + setLoading(false); + } + fetchData(); + }, []); + + function updateTool(id: string, patch: Partial) { + setTools((prev) => prev.map((t) => t.id === id ? { ...t, ...patch } : t)); + } + + function setAllAccepted(accepted: boolean) { + setTools((prev) => prev.map((t) => ({ ...t, accepted }))); + } + + function selectRef(toolId: string, ref: RefMatch | null) { + updateTool(toolId, { + selectedRef: ref, + useVendor: ref?.vendor || "MSC", + useProductId: ref?.product_id || "", + }); + } + + const acceptedCount = tools.filter((t) => t.accepted).length; + const script = buildFusionScript(tools); + + async function copyScript() { + try { + await navigator.clipboard.writeText(script); + setCopied(true); + setTimeout(() => setCopied(false), 2000); + } catch {} + } + + if (loading) { + return
Loading...
; + } + + return ( +
+
+ + ← All tools + +
+ +
+

+ Fix Missing Tool Data{" "} + + ({tools.length} tools, {acceptedCount} accepted) + +

+

+ Description generated from geometry. Vendor and part # suggested from the + reference catalog (82k tools). Review each card, then copy the Fusion script. +

+
+ + {tools.length === 0 ? ( + + + All tools have descriptions. Nothing to fix. + + + ) : ( + <> +
+ + +
+ +
+ {tools.map((tool) => ( + + +
+ {/* Accept checkbox */} + + + {/* Main info */} +
+
+ + {tool.generatedDescription} + + {tool.type} + + {tool.libraries?.library_name} + +
+ + {/* Vendor + Part # fields */} +
+ + +
+ + {/* Reference catalog matches */} + {tool.refMatches.length > 0 && ( +
+ + Reference catalog matches + +
+ {tool.refMatches.map((ref, i) => ( + + ))} + +
+ {tool.refMatches.some((r) => r.exact_oal) && ( + * exact OAL match + )} +
+ )} + {tool.refMatches.length === 0 && ( + + No reference catalog match — defaulting to MSC + + )} +
+
+
+
+ ))} +
+ + {acceptedCount > 0 && ( + + +
+ + Fusion 360 Script ({acceptedCount} tools) + + +
+

+ Utilities → Scripts and Add-Ins → Create a new script → paste → Run +

+
+ +
+                  {script}
+                
+
+
+ )} + + )} +
+ ); +} diff --git a/web/src/pages/ToolDetailPage.tsx b/web/src/pages/ToolDetailPage.tsx new file mode 100644 index 0000000..dcc1750 --- /dev/null +++ b/web/src/pages/ToolDetailPage.tsx @@ -0,0 +1,414 @@ +import { useEffect, useState } from "react"; +import { useParams, Link } from "react-router-dom"; +import { supabase } from "@/lib/supabase"; +import type { Tool, CuttingPreset, PlexSupplyItem } from "@/lib/types"; +import { relativeTime } from "@/lib/utils"; +import { Badge } from "@/components/ui/badge"; +import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card"; +import { Separator } from "@/components/ui/separator"; +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table"; + +const MM_PER_INCH = 25.4; +const STORAGE_KEY_IMPERIAL = "datum-imperial"; + +function readImperialPref(): boolean { + try { + return localStorage.getItem(STORAGE_KEY_IMPERIAL) !== "false"; + } catch { + return true; + } +} + +function UnitToggle({ imperial, onToggle }: { imperial: boolean; onToggle: () => void }) { + return ( + + ); +} + +function GeoRow({ + label, + value, + unit, + imperial, +}: { + label: string; + value: number | null; + unit?: string; + imperial?: boolean; +}) { + if (value == null) return null; + + let displayVal = value; + let displayUnit = unit; + if (unit === "mm" && imperial) { + displayVal = value / MM_PER_INCH; + displayUnit = "in"; + } + + return ( +
+ {label} + + {typeof displayVal === "number" ? displayVal.toFixed(imperial && unit === "mm" ? 4 : 3) : displayVal} + {displayUnit && {displayUnit}} + +
+ ); +} + +export function ToolDetailPage() { + const { id } = useParams<{ id: string }>(); + const [tool, setTool] = useState(null); + const [presets, setPresets] = useState([]); + const [staging, setStaging] = useState(null); + const [loading, setLoading] = useState(true); + const [imperial, setImperial] = useState(readImperialPref); + + useEffect(() => { + async function fetch() { + const [toolRes, presetsRes] = await Promise.all([ + supabase + .from("tools") + .select("*, libraries(library_name, vendor)") + .eq("id", id!) + .single(), + supabase + .from("cutting_presets") + .select("*") + .eq("tool_id", id!) + .order("name"), + ]); + + if (toolRes.data) { + setTool(toolRes.data); + // Fetch staging row (separate query — plex_supply_items keys on fusion_guid, not id) + const { data: stagingData } = await supabase + .from("plex_supply_items") + .select("*") + .eq("fusion_guid", toolRes.data.fusion_guid) + .maybeSingle(); + if (stagingData) setStaging(stagingData); + } + if (presetsRes.data) setPresets(presetsRes.data); + setLoading(false); + } + fetch(); + }, [id]); + + if (loading) { + return
Loading...
; + } + + if (!tool) { + return ( +
+

Tool not found.

+ Back to tools +
+ ); + } + + return ( +
+
+ + ← All tools + +
+ +
+
+

+ {tool.description} +

+

+ {tool.vendor} · {tool.product_id} +

+
+
+ { + const next = !imperial; + setImperial(next); + try { localStorage.setItem(STORAGE_KEY_IMPERIAL, String(next)); } catch {} + }} /> + {tool.type} + {tool.plex_supply_item_id ? ( + Synced to Plex + ) : ( + Local only + )} +
+
+ + + + On hand + + + {!tool.plex_supply_item_id ? ( +

+ Not linked to Plex — will populate once writeback sync runs. +

+ ) : !tool.qty_tracked ? ( +

+ Linked to Plex but no adjustment history. +

+ ) : ( +
+ + {tool.qty_on_hand ?? 0} + + pcs + {tool.qty_synced_at && ( + + Synced {relativeTime(tool.qty_synced_at)} + + )} +
+ )} +
+
+ + {staging && ( + + + + Plex Staging Payload + {staging.plex_id ? ( + + Posted {staging.posted_to_plex_at ? new Date(staging.posted_to_plex_at).toLocaleDateString() : ""} + + ) : ( + Not posted + )} + + + +
+ Category + {staging.category} +
+
+ Group + {staging.item_group ?? "\u2014"} +
+
+ Description + {staging.description ?? "\u2014"} +
+
+ Supply Item # + {staging.supply_item_number ?? "\u2014"} +
+
+ Inventory Unit + {staging.inventory_unit} +
+
+ Type + {staging.item_type} +
+ {staging.plex_id && ( + <> + +
+ Plex UUID + {staging.plex_id} +
+ + )} +
+
+ )} + +
+ + + Geometry + + + + + + + + + + + {tool.geo_dc == null && tool.geo_oal == null && ( +

No geometry data available.

+ )} +
+
+ + + + Identity + + +
+ Vendor + {tool.vendor} +
+
+ Part number + {tool.product_id} +
+ {tool.bmc && ( +
+ Material (BMC) + {tool.bmc} +
+ )} + {tool.grade && ( +
+ Grade + {tool.grade} +
+ )} + {tool.product_link && ( +
+ Product link + + View + +
+ )} + {tool.libraries && ( +
+ Library + {tool.libraries.library_name} +
+ )} + +
+ Fusion GUID + {tool.fusion_guid} +
+ {tool.plex_supply_item_id && ( +
+ Plex ID + {tool.plex_supply_item_id} +
+ )} +
+ Last updated + {new Date(tool.updated_at).toLocaleDateString()} +
+
+
+
+ + {tool.pp_number != null && ( + + + Post-Processor + + + + {tool.pp_comment && ( +
+ Comment + {tool.pp_comment} +
+ )} +
+
+ )} + +
+

+ Cutting Presets{" "} + + ({presets.length}) + +

+ {presets.length === 0 ? ( +

+ No cutting presets for this tool. +

+ ) : ( +
+ + + + Name + Material + Vc (m/min) + fz ({imperial ? "in" : "mm"}) + RPM + Vf ({imperial ? "in/min" : "mm/min"}) + Coolant + + + + {presets.map((p) => ( + + {p.name ?? "—"} + {p.material_category ?? "—"} + + {p.v_c?.toFixed(1) ?? "—"} + + + {p.f_z == null + ? "—" + : imperial + ? (p.f_z / MM_PER_INCH).toFixed(5) + : p.f_z.toFixed(4)} + + + {p.n?.toFixed(0) ?? "—"} + + + {p.v_f == null + ? "—" + : imperial + ? (p.v_f / MM_PER_INCH).toFixed(2) + : p.v_f.toFixed(1)} + + + {p.tool_coolant ? ( + {p.tool_coolant} + ) : ( + "—" + )} + + + ))} + +
+
+ )} +
+
+ ); +} diff --git a/web/src/pages/ToolsPage.tsx b/web/src/pages/ToolsPage.tsx new file mode 100644 index 0000000..899457f --- /dev/null +++ b/web/src/pages/ToolsPage.tsx @@ -0,0 +1,541 @@ +import { useEffect, useRef, useState } from "react"; +import { Link, useSearchParams } from "react-router-dom"; +import { supabase } from "@/lib/supabase"; +import type { Tool } from "@/lib/types"; +import { relativeTime } from "@/lib/utils"; +import { Input } from "@/components/ui/input"; +import { Badge } from "@/components/ui/badge"; +import { + Table, + TableBody, + TableCell, + TableHead, + TableHeader, + TableRow, +} from "@/components/ui/table"; + +const MM_PER_INCH = 25.4; +const STORAGE_KEY_IMPERIAL = "datum-imperial"; + +function readImperialPref(): boolean { + try { + return localStorage.getItem(STORAGE_KEY_IMPERIAL) !== "false"; + } catch { + return true; + } +} + +const STORAGE_KEY_INV_FILTER = "datum-inv-filter"; + +type InvStatus = "in_stock" | "out_of_stock" | "not_tracked" | "not_linked"; +const ALL_INV_STATUSES: InvStatus[] = ["in_stock", "out_of_stock", "not_tracked", "not_linked"]; +const INV_LABELS: Record = { + in_stock: "In stock", + out_of_stock: "Out of stock", + not_tracked: "Not tracked", + not_linked: "Not linked", +}; + +function readInvFilter(): Set { + try { + const raw = localStorage.getItem(STORAGE_KEY_INV_FILTER); + if (raw) { + const arr = JSON.parse(raw) as string[]; + const valid = arr.filter((s): s is InvStatus => ALL_INV_STATUSES.includes(s as InvStatus)); + if (valid.length > 0) return new Set(valid); + } + } catch {} + return new Set(); +} + +function getInvStatus(tool: Tool): InvStatus { + if (!tool.plex_supply_item_id) return "not_linked"; + if (!tool.qty_tracked) return "not_tracked"; + return (tool.qty_on_hand ?? 0) > 0 ? "in_stock" : "out_of_stock"; +} + +type SortField = + | "description" + | "product_id" + | "vendor" + | "type" + | "geo_dc" + | "geo_oal" + | "geo_nof" + | "geo_re" + | "qty_on_hand" + | "plex"; +type SortDir = "asc" | "desc"; + +function compare(a: Tool, b: Tool, field: SortField): number { + switch (field) { + case "description": + return (a.description || "").localeCompare(b.description || ""); + case "product_id": + return (a.product_id || "").localeCompare(b.product_id || ""); + case "vendor": + return (a.vendor || "").localeCompare(b.vendor || ""); + case "type": + return (a.type || "").localeCompare(b.type || ""); + case "geo_dc": + case "geo_oal": + case "geo_nof": + case "geo_re": + case "qty_on_hand": { + // NULL sorts last regardless of direction (handled by using -Infinity + // for asc — caller flips sign for desc, so -Inf stays at the end) + const av = a[field] ?? -Infinity; + const bv = b[field] ?? -Infinity; + return av - bv; + } + case "plex": { + const ap = a.plex_supply_item_id ? 1 : 0; + const bp = b.plex_supply_item_id ? 1 : 0; + return ap - bp; + } + default: + return 0; + } +} + +/** Custom multi-select dropdown that looks like a normal onToggle(type)} + className="rounded" + /> + {type} + + ))} + + )} + + ); +} + +/** Inventory status filter dropdown */ +function InvStatusDropdown({ + selected, + onToggle, + onClear, +}: { + selected: Set; + onToggle: (status: InvStatus) => void; + onClear: () => void; +}) { + const [open, setOpen] = useState(false); + const ref = useRef(null); + + useEffect(() => { + function handleClick(e: MouseEvent) { + if (ref.current && !ref.current.contains(e.target as Node)) { + setOpen(false); + } + } + document.addEventListener("mousedown", handleClick); + return () => document.removeEventListener("mousedown", handleClick); + }, []); + + const label = + selected.size === 0 + ? "All inventory" + : selected.size === 1 + ? INV_LABELS[[...selected][0]] + : `${selected.size} statuses`; + + return ( +
+ + {open && ( +
+ {selected.size > 0 && ( + + )} + {ALL_INV_STATUSES.map((status) => ( + + ))} +
+ )} +
+ ); +} + +export function ToolsPage() { + const [tools, setTools] = useState([]); + const [search, setSearch] = useState(""); + const [typeFilters, setTypeFilters] = useState>(new Set()); + const [imperial, setImperial] = useState(readImperialPref); + const [loading, setLoading] = useState(true); + const [searchParams] = useSearchParams(); + const [sortField, setSortField] = useState("description"); + const [sortDir, setSortDir] = useState("asc"); + const [recentCount, setRecentCount] = useState(null); + const [invFilters, setInvFilters] = useState>(readInvFilter); + + useEffect(() => { + async function fetchTools() { + const { data, error } = await supabase + .from("tools") + .select("*, libraries(library_name, vendor, source_modified_at)") + .order("vendor") + .order("product_id"); + + if (error) { + console.error("Failed to fetch tools:", error); + } else { + setTools(data ?? []); + + // Count tools whose library was modified in the last 24h (per Fusion Hub) + const oneDayAgo = new Date(Date.now() - 24 * 60 * 60 * 1000).toISOString(); + const recent = (data ?? []).filter( + (t) => t.libraries?.source_modified_at && t.libraries.source_modified_at > oneDayAgo + ); + setRecentCount(recent.length); + } + setLoading(false); + } + fetchTools(); + }, []); + + function toggleImperial() { + const next = !imperial; + setImperial(next); + try { + localStorage.setItem(STORAGE_KEY_IMPERIAL, String(next)); + } catch {} + } + + const libraryParam = searchParams.get("library"); + const toolTypes = [...new Set(tools.map((t) => t.type))].sort(); + + const filtered = tools.filter((t) => { + if (typeFilters.size > 0 && !typeFilters.has(t.type)) return false; + if (invFilters.size > 0 && !invFilters.has(getInvStatus(t))) return false; + if (libraryParam && t.libraries?.library_name !== libraryParam) return false; + if (!search) return true; + const q = search.toLowerCase(); + return ( + t.description.toLowerCase().includes(q) || + t.product_id.toLowerCase().includes(q) || + t.vendor.toLowerCase().includes(q) + ); + }); + + const sorted = [...filtered].sort((a, b) => { + const c = compare(a, b, sortField); + return sortDir === "asc" ? c : -c; + }); + + function handleSort(field: SortField) { + if (sortField === field) { + setSortDir(sortDir === "asc" ? "desc" : "asc"); + } else { + setSortField(field); + setSortDir("asc"); + } + } + + function toggleTypeFilter(type: string) { + setTypeFilters((prev) => { + const next = new Set(prev); + if (next.has(type)) next.delete(type); + else next.add(type); + return next; + }); + } + + function toggleInvFilter(status: InvStatus) { + setInvFilters((prev) => { + const next = new Set(prev); + if (next.has(status)) next.delete(status); + else next.add(status); + try { localStorage.setItem(STORAGE_KEY_INV_FILTER, JSON.stringify([...next])); } catch {} + return next; + }); + } + + function fmt(val: number | null): string { + if (val == null) return "\u2014"; + const v = imperial ? val / MM_PER_INCH : val; + return imperial ? v.toFixed(4) : v.toFixed(2); + } + + const dimUnit = imperial ? "in" : "mm"; + + function SortHeader({ field, children, className }: { field: SortField; children: React.ReactNode; className?: string }) { + const active = sortField === field; + const arrow = active ? (sortDir === "asc" ? " \u25B2" : " \u25BC") : ""; + return ( + handleSort(field)} + > + {children}{arrow} + + ); + } + + if (loading) { + return
Loading tools...
; + } + + return ( +
+ {recentCount != null && recentCount > 0 && ( + +
+ + {recentCount} tool{recentCount !== 1 ? "s" : ""} modified in Fusion Hub in the last 24 hours + + +
+ + )} + +
+

+ Tools{" "} + + ({sorted.length}) + +

+ +
+ +
+ setSearch(e.target.value)} + className="max-w-sm" + /> + setTypeFilters(new Set())} + /> + { setInvFilters(new Set()); try { localStorage.removeItem(STORAGE_KEY_INV_FILTER); } catch {} }} + /> + {typeFilters.size > 0 && ( +
+ {[...typeFilters].map((t) => ( + toggleTypeFilter(t)} + > + {t} × + + ))} +
+ )} + {libraryParam && ( +
+ {libraryParam} + + clear + +
+ )} +
+ +
+ + + + Description + Part # + Vendor + Type + Dia ({dimUnit}) + OAL ({dimUnit}) + Flutes + Corner R ({dimUnit}) + On hand + Plex + + + + {sorted.length === 0 ? ( + + + {tools.length === 0 ? "No tools in database. Run a sync to populate." : "No tools match your search."} + + + ) : ( + sorted.map((tool) => ( + + + + {tool.description || "\u2014"} + + + + {tool.product_id} + + {tool.vendor} + + {tool.type} + + + {fmt(tool.geo_dc)} + + + {fmt(tool.geo_oal)} + + + {tool.geo_nof ?? "\u2014"} + + + {fmt(tool.geo_re)} + + + {!tool.plex_supply_item_id ? ( + + ) : !tool.qty_tracked ? ( + Not tracked + ) : ( + + {tool.qty_on_hand ?? 0} pcs + + )} + + + {tool.plex_supply_item_id ? ( + Synced + ) : ( + Local + )} + + + )) + )} + +
+
+
+ ); +} diff --git a/web/tsconfig.app.json b/web/tsconfig.app.json new file mode 100644 index 0000000..0ee124c --- /dev/null +++ b/web/tsconfig.app.json @@ -0,0 +1,30 @@ +{ + "compilerOptions": { + "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo", + "target": "es2023", + "lib": ["ES2023", "DOM", "DOM.Iterable"], + "module": "esnext", + "types": ["vite/client"], + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "verbatimModuleSyntax": true, + "moduleDetection": "force", + "noEmit": true, + "jsx": "react-jsx", + + /* Linting */ + "noUnusedLocals": true, + "noUnusedParameters": true, + "erasableSyntaxOnly": true, + "noFallthroughCasesInSwitch": true, + "baseUrl": ".", + "paths": { + "@/*": ["./src/*"] + }, + "ignoreDeprecations": "6.0" + }, + "include": ["src"] +} diff --git a/web/tsconfig.json b/web/tsconfig.json new file mode 100644 index 0000000..1ffef60 --- /dev/null +++ b/web/tsconfig.json @@ -0,0 +1,7 @@ +{ + "files": [], + "references": [ + { "path": "./tsconfig.app.json" }, + { "path": "./tsconfig.node.json" } + ] +} diff --git a/web/tsconfig.node.json b/web/tsconfig.node.json new file mode 100644 index 0000000..d3c52ea --- /dev/null +++ b/web/tsconfig.node.json @@ -0,0 +1,24 @@ +{ + "compilerOptions": { + "tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo", + "target": "es2023", + "lib": ["ES2023"], + "module": "esnext", + "types": ["node"], + "skipLibCheck": true, + + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "verbatimModuleSyntax": true, + "moduleDetection": "force", + "noEmit": true, + + /* Linting */ + "noUnusedLocals": true, + "noUnusedParameters": true, + "erasableSyntaxOnly": true, + "noFallthroughCasesInSwitch": true + }, + "include": ["vite.config.ts"] +} diff --git a/web/vite.config.ts b/web/vite.config.ts new file mode 100644 index 0000000..0cca23e --- /dev/null +++ b/web/vite.config.ts @@ -0,0 +1,14 @@ +import { defineConfig } from 'vite' +import react from '@vitejs/plugin-react' +import tailwindcss from '@tailwindcss/vite' +import path from 'path' + +// https://vite.dev/config/ +export default defineConfig({ + plugins: [react(), tailwindcss()], + resolve: { + alias: { + '@': path.resolve(__dirname, './src'), + }, + }, +}) diff --git a/wrangler.jsonc b/wrangler.jsonc new file mode 100644 index 0000000..f7698a8 --- /dev/null +++ b/wrangler.jsonc @@ -0,0 +1,9 @@ +{ + "$schema": "node_modules/wrangler/config-schema.json", + "name": "datum", + "compatibility_date": "2026-04-15", + "assets": { + "directory": "./web/dist", + "not_found_handling": "single-page-application" + } +}