diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index bf21090..6415165 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,18 +1,17 @@ { - "name": "Trading Runtime Dev", + "name": "TradingChassis Core Runtime Dev", "build": { "dockerfile": "Dockerfile", "context": ".." }, - "postCreateCommand": "./scripts/post-create.sh", + "workspaceFolder": "/workspaces/core-runtime", + "remoteUser": "root", + "runArgs": ["--security-opt=label=disable"], + "containerEnv": { + "SHELL": "/bin/bash" + }, + + "postCreateCommand": "./scripts/post-create.sh" - "customizations": { - "vscode": { - "extensions": [ - "ms-python.python", - "ms-python.debugpy" - ] - } - } -} \ No newline at end of file +} diff --git a/.dockerignore b/.dockerignore index 2a8ced1..c995f3b 100644 --- a/.dockerignore +++ b/.dockerignore @@ -52,6 +52,9 @@ site/ # ============================== # Test cache / reports # ============================== +.ruff_cache/ +.mypy_cache/ +.import_linter_cache/ pytest_cache/ .tox/ .nox/ @@ -59,6 +62,11 @@ pytest_cache/ .coverage* coverage.xml +# ============================== +# Local runtime outputs +# ============================== +.runtime/ + # ============================== # Random OS files # ============================== diff --git a/.env.example b/.env.example index 6fc9aa8..65e653c 100644 --- a/.env.example +++ b/.env.example @@ -1 +1 @@ -TRADING_FRAMEWORK_COMMIT=934d332c21bef56fa76c19f477143d8d438238c2 +TRADINGCHASSIS_CORE_COMMIT=934d332c21bef5... diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 7428e03..4b5f5ed 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -9,7 +9,7 @@ Clear and concise description of the issue. ## Environment -- Trading Framework version: +- Core (`tradingchassis-core`) version: - Python version: - Execution mode (local / cloud): - Strategy used: diff --git a/argo/run-backtest.yaml b/.github/argo-launchers/run-backtest.yaml similarity index 69% rename from argo/run-backtest.yaml rename to .github/argo-launchers/run-backtest.yaml index faa4121..ac695dd 100644 --- a/argo/run-backtest.yaml +++ b/.github/argo-launchers/run-backtest.yaml @@ -8,10 +8,10 @@ spec: arguments: parameters: - name: image_repo - value: "ghcr.io/trading-engineering/trading-runtime" + value: "ghcr.io/tradingchassis/core-runtime" - name: image_tag value: "${IMAGE_TAG}" - name: experiment_config - value: "/usr/local/lib/python3.11/site-packages/trading_runtime/argo/argo.json" + value: "/usr/local/lib/python3.11/site-packages/core_runtime/argo/argo.json" - name: scratch_root value: "/mnt/scratch" diff --git a/argo/run-build.yaml b/.github/argo-launchers/run-build.yaml similarity index 87% rename from argo/run-build.yaml rename to .github/argo-launchers/run-build.yaml index 558c9c4..d87312d 100644 --- a/argo/run-build.yaml +++ b/.github/argo-launchers/run-build.yaml @@ -9,5 +9,5 @@ spec: parameters: - name: git_branch value: "${GIT_BRANCH}" - - name: trading_runtime_commit + - name: core_runtime_commit value: "${RUNTIME_COMMIT}" diff --git a/.github/workflows/argo-build-and-backtest.yaml b/.github/workflows/argo-build-and-backtest.yaml index 79eb6a8..57c6bae 100644 --- a/.github/workflows/argo-build-and-backtest.yaml +++ b/.github/workflows/argo-build-and-backtest.yaml @@ -68,8 +68,8 @@ jobs: - name: Apply Argo WorkflowTemplates run: | sudo microk8s kubectl -n "${{ needs.resolve-context.outputs.namespace }}" apply \ - -f argo/workflowtemplate-build-push-ghcr.yaml \ - -f argo/workflowtemplate-backtest-fanout.yaml + -f argo/templates/workflowtemplate-build-push-ghcr.yaml \ + -f argo/templates/workflowtemplate-backtest-fanout.yaml build-and-backtest: runs-on: self-hosted @@ -88,7 +88,7 @@ jobs: export GIT_BRANCH="${{ needs.resolve-context.outputs.branch }}" BUILD_NAME=$( - envsubst < argo/run-build.yaml | \ + envsubst < .github/argo-launchers/run-build.yaml | \ sudo microk8s kubectl -n "${{ needs.resolve-context.outputs.namespace }}" create -f - -o jsonpath='{.metadata.name}' ) @@ -137,7 +137,7 @@ jobs: run: | export IMAGE_TAG="${GITHUB_SHA}" - envsubst < argo/run-backtest.yaml | \ + envsubst < .github/argo-launchers/run-backtest.yaml | \ sudo microk8s kubectl -n "${{ needs.resolve-context.outputs.namespace }}" create -f - echo "Submitted backtest workflow in namespace: ${{ needs.resolve-context.outputs.namespace }}" diff --git a/.gitignore b/.gitignore index ec65e06..bf02f8f 100644 --- a/.gitignore +++ b/.gitignore @@ -101,6 +101,7 @@ site/ local_settings.py db.sqlite3 db.sqlite3-journal +.runtime/ # ============================== # Devcontainer related diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ccab5b..9f5d799 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ This project adheres to Semantic Versioning. ## [0.1.0] โ€“ 2026-02-17 -Initial public release of the trading-runtime execution and orchestration layer. +Initial public release of the Core Runtime execution and orchestration layer. ### Added @@ -20,7 +20,7 @@ Initial public release of the trading-runtime execution and orchestration layer. #### Dependency Management -- Commit-pinned `trading-framework` integration +- Commit-pinned `tradingchassis-core` integration - Reproducible dependency compilation via pip-tools - Environment bootstrap scripts diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 678e6a1..cfa8e25 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ Thank you for your interest in contributing! -This repository is a runtime example for the [trading-framework](https://github.com/trading-engineering/trading-framework) framework using +This repository is a Core Runtime example for [Core (`tradingchassis-core`)](https://github.com/TradingChassis/core) using [Kubernetes](https://kubernetes.io) (via e.g. [MicroK8s](https://microk8s.io)) and [Argo Workflows](https://argoproj.github.io/workflows). Contributions should preserve clarity, explicitness and reproducibility. diff --git a/Dockerfile b/Dockerfile index f761b8e..5674585 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,13 +3,13 @@ # ================================================== FROM python:3.11.14-slim-trixie AS build -ARG TRADING_RUNTIME_COMMIT +# ARG TRADING_RUNTIME_COMMIT -ENV TRADING_RUNTIME_COMMIT=${TRADING_RUNTIME_COMMIT} +# ENV TRADING_RUNTIME_COMMIT=${TRADING_RUNTIME_COMMIT} ENV PATH="/install/bin:/install-dev/bin:${PATH}" ENV PYTHONPATH="/install/lib/python3.11/site-packages" -WORKDIR /workspaces/trading-runtime +WORKDIR /workspaces/core-runtime # System dependencies for building Python packages & running tests RUN apt-get update && \ @@ -29,7 +29,7 @@ RUN pip install --upgrade pip \ # Copy project files COPY pyproject.toml . COPY scripts/check.sh . -COPY trading_runtime/ trading_runtime/ +COPY core_runtime/ core_runtime/ COPY tests/ tests/ # Install the package itself diff --git a/README.md b/README.md index f2705b7..fd066af 100644 --- a/README.md +++ b/README.md @@ -1,300 +1,352 @@ -# Trading Runtime +# TradingChassis โ€” Core Runtime -![CI](https://github.com/trading-engineering/trading-runtime/actions/workflows/tests.yaml/badge.svg) +![CI](https://github.com/TradingChassis/core-runtime/actions/workflows/tests.yaml/badge.svg) ![Python](https://img.shields.io/badge/python-3.11+-blue) ![License](https://img.shields.io/badge/license-MIT-green) -Runtime execution layer and orchestration environment for the -[trading-framework](https://github.com/trading-engineering/trading-framework). +Execution and orchestration environment around Core. -This repository provides: - -- Local execution examples -- Reproducible runtime environments -- Dependency pinning -- [Kubernetes](https://kubernetes.io)-native orchestration via [Argo Workflows](https://argoproj.github.io/workflows) -- CI-integrated build pipelines +Core Runtime consumes Core (`tradingchassis_core`) and provides local/cluster entrypoints, +configuration, adapter integration, runtime packaging, and reproducible execution workflows. --- -## ๐Ÿง  What is this? +## Overview + +Core Runtime is the runtime layer for executing Core semantics in concrete environments. -`trading-runtime` is the execution and orchestration layer built on top of `trading-framework`. +- local hftbacktest-backed backtest execution +- runtime entrypoints for orchestration flows +- reproducible dependency/runtime packaging +- CI and infrastructure wiring for deployment workflows -While `trading-framework` implements the deterministic trading framework, -this repository focuses on: +--- -- how strategies are executed -- how environments are reproduced -- how workloads are orchestrated -- how results are produced and validated +## What Core Runtime is -It intentionally contains no domain framework logic. +Core Runtime provides: + +- executable runtime entrypoints (`core_runtime/...`) +- runtime configs and environment wiring +- adapter-facing integration layers around Core +- orchestration integration (Argo/Kubernetes) +- runtime validation and smoke/test workflows --- -## ๐Ÿงฉ Relationship to trading-framework +## What Core Runtime is not -``` -trading-framework โ†’ core framework, backtesting engine, domain logic -trading-runtime โ†’ executing entrypoints, runtime configs, orchestration -``` +Core Runtime is not the semantic source of truth for Core concepts. -The framework is consumed as a pinned Git dependency to guarantee -deterministic runtime environments. +It consumes Core and should not redefine canonical terms such as Event, Event Stream, Processing +Order, State, or Risk Engine. --- -## ๐Ÿ“ Repository Structure +## Current local hftbacktest usability status -``` -.github/workflows/ CI pipelines (tests, Argo template deploy) -argo/ Argo workflow templates -scripts/ environment & build helper scripts -trading_runtime/ Python runtime entrypoints -tests/ deterministic test data & validation +Current local smoke is usable from the `core-runtime` repository root: + +```bash +python -m core_runtime.local.backtest --config core_runtime/local/local.json ``` -### Key runtime modules +Default output location: -``` -trading_runtime/local/ Local execution mode -trading_runtime/argo/ Argo workflow entrypoints -trading_runtime/strategies/ Example strategies +```text +.runtime/local/results/ ``` ---- +This confirms current local usability and does not claim full canonical Event Stream completion. -## ๐Ÿ“Œ Dependency Pinning & Reproducibility +--- -The `trading-framework` dependency is pinned by commit SHA. +## Quick start -Create a `.env` file: +From the `core-runtime` repository root: ```bash -TRADING_FRAMEWORK_COMMIT= +python -m pip install -e . +python -m core_runtime.local.backtest --config core_runtime/local/local.json ``` -Generate reproducible environments: +If `tradingchassis_core` is not already resolvable in your environment, install `core` as a +sibling editable package in a monorepo workspace: ```bash -./scripts/compile-requirements.sh +python -m pip install -e ../core ``` -This produces: - -- `requirements.txt` -- `requirements-dev.txt` +--- -These files are used by: +## Entrypoint matrix -- Dev Containers -- Docker images +| Mode | Entrypoint | Command shape | Notes | +| --- | --- | --- | --- | +| Local backtest | `core_runtime/local/backtest.py` | `python -m core_runtime.local.backtest --config core_runtime/local/local.json` | Main local runner. | +| Argo plan/run orchestration | `core_runtime/backtest/runtime/entrypoint.py` | `python -m core_runtime.backtest.runtime.entrypoint --config core_runtime/argo/argo.json --plan` | Planner and sweep-context emitter for Argo flow. | +| Sweep worker | `core_runtime/backtest/runtime/run_sweep.py` | `python -m core_runtime.backtest.runtime.run_sweep --context ` | Executes one sweep context. | --- -## โ–ถ๏ธ Local Execution +## Adapter capability model -Run a deterministic local backtest: +| Capability area | Status | Notes | +| --- | --- | --- | +| Canonical runtime paths | Active | `MarketEvent`, `OrderSubmittedEvent`, `ControlTimeEvent` | +| Compatibility paths | Active | Post-submission order/fill progression via snapshots, `OrderStateEvent`, and `DerivedFillEvent` | +| Deferred capabilities | Deferred | Runtime `FillEvent` ingress, `ExecutionFeedbackRecordSource`, replay/storage/Event Stream persistence, `ProcessingContext` | -```bash -python trading_runtime/local/backtest.py \ - --config trading_runtime/local/local.json -``` +--- -This uses synthetic deterministic test data located in: +## Current hftbacktest capability map -``` -tests/data/parts/ -``` +- Local hftbacktest flow is usable for current transitional runtime paths. +- Compatibility mechanisms remain in place for post-submission progression. +- Deferred capabilities are intentionally not presented as shipped runtime behavior. -Results are written to: +--- -``` -tests/data/results/ -``` +## Canonical runtime paths + +- `MarketEvent` +- `OrderSubmittedEvent` +- `ControlTimeEvent` --- -## โš™๏ธ Infrastructure Requirements +## Compatibility paths + +- snapshot-based post-submission progression +- `OrderStateEvent` +- `DerivedFillEvent` -The Argo-based workflows require: +--- -- A self-hosted GitHub Actions runner -- microk8s Kubernetes distribution (with sudo access) -- Argo Workflows installed in the cluster -- GitHub Container Registry access (GHCR_TOKEN secret) +## Deferred capabilities -GitHub-hosted runners are only used for unit tests. -All Kubernetes orchestration runs on self-hosted infrastructure. +- runtime `FillEvent` ingress +- `ExecutionFeedbackRecordSource` +- replay/storage/Event Stream persistence +- `ProcessingContext` --- -## โ˜ธ Kubernetes & Argo Workflows +## Package and import names -This runtime is designed for Kubernetes-native execution using Argo Workflows. +- Human-facing concept name: Core Runtime +- Distribution/project name: `tradingchassis-core-runtime` +- Python import package: `core_runtime` +- Core distribution/project name: `tradingchassis-core` +- Core Python import package: `tradingchassis_core` -Two core workflow templates define the execution pipeline: +--- -``` -argo/workflowtemplate-build-push-ghcr.yaml -argo/workflowtemplate-backtest.yaml -``` +## Repository structure -### ๐Ÿณ Runtime Image Build & Push +```text +.github/workflows/ CI and deployment workflows +.github/argo-launchers/ Argo Workflow submit wrappers used by GitHub Actions +argo/templates/ Argo WorkflowTemplates shown in Argo UI +core_runtime/ Runtime entrypoints and execution modules +docs/ Runtime implementation notes +scripts/ Build/validation helper scripts +tests/ Runtime tests and deterministic fixtures +``` -`workflowtemplate-build-push-ghcr.yaml` builds the trading-runtime Docker image and pushes it to -GitHub Container Registry (GHCR). +--- -This image contains: +## Configuration -- Python dependencies and entrypoints -- trading-framework and trading-runtime commit SHA -- strategies and configs +Primary local config: -It acts as an immutable and deterministic runtime environment for all backtests. +- `core_runtime/local/local.json` +- OCI config template (for local object storage auth setups): `core_runtime/local/oci.config.example` -### โ–ถ๏ธ Backtest Orchestration +Note: local JSON configs use cwd-relative paths for `tests/data/...` inputs and `.runtime/...` +outputs. The supported default workflow is to run commands from the `core-runtime` repo root. -`workflowtemplate-backtest.yaml` orchestrates backtest workloads using Argo. +--- -It: +## Development setup -- pulls the runtime image from GHCR -- executes runtime entrypoints inside Kubernetes pods -- distributes workloads across the cluster -- saves deterministic result artifacts +### Standalone `core-runtime` root -All backtests always run inside the runtime image. +```bash +python -m pip install -e . +python -m pytest -q tests +./scripts/check.sh +``` -### ๐Ÿ”„ End-to-End Flow +### Monorepo workspace root (with `core/` and `core-runtime/`) +```bash +python -m pip install -e core +python -m pip install -e core-runtime +python -m pytest -q core-runtime/tests +python -m pytest -q core/tests ``` -Docker build โ†’ Push to GHCR โ†’ Argo pulls image โ†’ Backtests execute in cluster + +--- + +## Test commands + +From `core-runtime` root: + +```bash +python -m pytest -q tests +./scripts/check.sh ``` -This guarantees: +From monorepo root: -- identical runtime environments locally and in Kubernetes -- reproducible research runs +```bash +python -m pytest -q core-runtime/tests +python -m pytest -q core/tests +``` --- -## ๐Ÿ” GHCR Registry Access +## Relationship to Core -To allow Kubernetes to pull runtime images from GitHub Container Registry (GHCR), -the deployment workflow creates a `docker-registry` secret inside the target Kubernetes namespace. +Core provides deterministic semantics and domain contracts. -The secret is created by the GitHub Actions workflow located at: +Core Runtime provides execution environments and orchestration around those semantics. -``` -.github/workflows/deploy_argo_template.yaml +--- + +## Dependency pinning and reproducibility + +Core dependency can be pinned by commit SHA through environment configuration: + +```bash +TRADINGCHASSIS_CORE_COMMIT= ``` -It runs the equivalent of: +To compile reproducible requirements: ```bash -sudo microk8s kubectl -n $K8S_NAMESPACE create secret docker-registry ghcr-secret \ - --docker-server=ghcr.io \ - --docker-username=git \ - --docker-password=$GHCR_TOKEN \ - --dry-run=client -o yaml | sudo microk8s kubectl apply -f - +./scripts/compile-requirements.sh ``` -### Required Repository Secret +Artifacts: -The workflow requires a GitHub repository secret named: +- `requirements.txt` +- `requirements-dev.txt` -``` -GHCR_TOKEN -``` +--- -This token must be a GitHub Personal Access Token with: +## Infrastructure notes -* `read:packages` +Argo WorkflowTemplates (visible in Argo UI) are defined in: -Add it under: +- `argo/templates/workflowtemplate-build-push-ghcr.yaml` +- `argo/templates/workflowtemplate-backtest-fanout.yaml` -``` -Repository โ†’ Settings โ†’ Secrets and variables โ†’ Actions -``` +GitHub-only Argo submit wrappers are in: -Without this secret, the workflow cannot authenticate against GHCR, and Kubernetes will fail to pull the runtime image. +- `.github/argo-launchers/run-build.yaml` +- `.github/argo-launchers/run-backtest.yaml` ---- +Automation that applies templates and starts workflows is in: -## ๐Ÿ›  Scripts +- `.github/workflows/argo-build-and-backtest.yaml` -| Script | Purpose | -| ------------------------- | ----------------------------------------------- | -| `compile-requirements.sh` | Pins trading-framework and resolves dependencies | -| `post-create.sh` | Dev container bootstrap | -| `check.sh` | Local validation helpers | +### Argo UI usage ---- +Use this model to avoid confusion: -## ๐Ÿงช Test Data +- `argo/templates/*`: reusable `WorkflowTemplate` definitions that appear in the Argo UI. +- `.github/argo-launchers/*`: one-off `Workflow` manifests used by GitHub Actions with `envsubst`. -Synthetic datasets are provided in: +Namespace intent: -``` -tests/data/parts/ -``` +- `dev`: branch and development runs. +- `prod`: main branch and production-like runs. -Result artifacts: +#### Build image from Argo UI (`build-push-ghcr`) -``` -tests/data/results/ -``` +Template: `build-push-ghcr` -Helper generation scripts: +Recommended parameters: -``` -tests/data/scripts/ -``` +- `git_repo`: keep default `https://github.com/TradingChassis/core-runtime.git`. +- `image_repo`: keep default `ghcr.io/tradingchassis/core-runtime`. +- `git_branch`: set the branch name for tagging (default `main`). +- `core_runtime_commit`: set to a real commit SHA (required). -These guarantee reproducible runtime validation. +Guardrails: ---- +- `core_runtime_commit` must be a 7-40 character hex SHA. +- `git_repo` must be an HTTPS URL ending in `.git`. -## ๐Ÿงช CI & Automation +Tagging behavior: -GitHub Actions workflows: +- always pushes `:` +- always pushes `:` +- also pushes `:latest` when `git_branch=main` -- `tests.yaml` โ€” runtime validation -- `deploy_argo_template.yaml` โ€” Argo template deployment +#### Run backtest from Argo UI (`backtest-fanout`) -Supports both GitHub-hosted and self-hosted runners respectively. +Template: `backtest-fanout` ---- +Recommended parameters: + +- `image_repo`: keep default. +- `image_tag`: set to the exact commit SHA built by `build-push-ghcr` for reproducibility. +- `experiment_config`: keep default unless intentionally testing a different in-image config. +- `scratch_root`: keep default `/mnt/scratch`. -## ๐ŸŽฏ Design Principles +Guardrails: -- Determinism over convenience -- Reproducible environments -- Explicit execution entrypoints -- Infrastructure separated from domain logic -- Cloud-native orchestration +- prefer commit SHA tags for `prod` runs. +- use mutable tags such as `latest` only for quick smoke checks. + +### Backtest storage vs MLflow tracking + +Core Runtime and MLflow serve different purposes in cluster runs: + +- Backtest output artifacts are written by Core Runtime directly to OCI Object Storage. +- MLflow is used for tracking metadata only (params, metrics, tags), not for artifact files. + +Backtest artifact storage path: + +- bucket: `data` +- prefix: `backtests//...` +- auth mode: OCI Instance Principals (IAM policy controlled) + +Code anchors: + +- backtest result download/upload pipeline: `core_runtime/backtest/runtime/run_sweep.py` +- OCI Object Storage adapter + auth behavior: `core_runtime/backtest/io/s3_adapter.py` +- MLflow tracking logger (no artifact logging): `core_runtime/backtest/runtime/mlflow_segment_logger.py` + +Tracking-only policy: + +- MLflow run metadata remains in the backend store. +- MLflow artifact storage is intentionally unsupported in this setup. +- If a client starts calling artifact APIs (for example `mlflow.log_artifact(...)`), treat failures as expected until artifact storage is intentionally added. --- -## ๐Ÿ“Œ Scope +## Scripts -This repository includes: +| Script | Purpose | +| --- | --- | +| `compile-requirements.sh` | Resolves dependencies and pins Core revision inputs | +| `post-create.sh` | Dev container bootstrap | +| `check.sh` | Local validation helpers | -- runtime execution logic -- environment orchestration -- CI pipelines -- container workflows +--- -It does not include: +## Documentation index -- trading framework internals -- specific strategy research logic +- Runtime adapter design: `docs/venue-adapter-abstraction-design-v1.md` +- Shared terminology source of truth: `docs/docs/00-guides/terminology.md` +- Core library scope: `core/README.md` --- -## ๐Ÿท๏ธ Versioning +## License and versioning -This project follows the MIT license and semantic versioning. -Initial public release: `v0.1.0` +MIT licensed. Versioning follows semantic versioning. diff --git a/argo/workflowtemplate-backtest-fanout.yaml b/argo/templates/workflowtemplate-backtest-fanout.yaml similarity index 95% rename from argo/workflowtemplate-backtest-fanout.yaml rename to argo/templates/workflowtemplate-backtest-fanout.yaml index f2a140d..a6b16d7 100644 --- a/argo/workflowtemplate-backtest-fanout.yaml +++ b/argo/templates/workflowtemplate-backtest-fanout.yaml @@ -16,7 +16,7 @@ spec: parameters: - name: image_repo description: "Container image repo" - value: ghcr.io/trading-engineering/trading-runtime + value: ghcr.io/tradingchassis/core-runtime - name: image_tag description: "Container image tag to run (recommended: commit SHA)" @@ -24,7 +24,7 @@ spec: - name: experiment_config description: "Path to experiment JSON inside the container" - value: /usr/local/lib/python3.11/site-packages/trading_runtime/argo/argo.json + value: /usr/local/lib/python3.11/site-packages/core_runtime/argo/argo.json - name: scratch_root description: "Scratch root inside the container" @@ -104,7 +104,7 @@ spec: command: [python, -m] args: - - trading_framework.backtest.runtime.entrypoint + - core_runtime.backtest.runtime.entrypoint - --config - "{{workflow.parameters.experiment_config}}" - --run @@ -156,7 +156,7 @@ spec: command: [python, -m] args: - - trading_framework.backtest.runtime.run_sweep + - core_runtime.backtest.runtime.run_sweep - --context - "{{inputs.parameters.sweep-path}}" - --scratch-root @@ -204,7 +204,7 @@ spec: command: [python, -m] args: - - trading_framework.backtest.runtime.segment_finalize_entrypoint + - core_runtime.backtest.runtime.segment_finalize_entrypoint - "--experiment-id" - "{{inputs.parameters.experiment-id}}" @@ -268,7 +268,7 @@ spec: command: [python, -m] args: - - trading_framework.backtest.runtime.experiment_finalize_entrypoint + - core_runtime.backtest.runtime.experiment_finalize_entrypoint - "--experiment-id" - "{{inputs.parameters.experiment-id}}" diff --git a/argo/workflowtemplate-build-push-ghcr.yaml b/argo/templates/workflowtemplate-build-push-ghcr.yaml similarity index 64% rename from argo/workflowtemplate-build-push-ghcr.yaml rename to argo/templates/workflowtemplate-build-push-ghcr.yaml index a28e1dd..8c613b4 100644 --- a/argo/workflowtemplate-build-push-ghcr.yaml +++ b/argo/templates/workflowtemplate-build-push-ghcr.yaml @@ -6,15 +6,19 @@ spec: entrypoint: build arguments: parameters: + - name: git_repo + description: "Git repo used as Kaniko build context" + value: https://github.com/TradingChassis/core-runtime.git + - name: image_repo - value: ghcr.io/trading-engineering/trading-runtime + value: ghcr.io/tradingchassis/core-runtime - name: git_branch description: "Branch name (for tagging only)" value: main - - name: trading_runtime_commit - description: "Exact commit SHA of trading-runtime" + - name: core_runtime_commit + description: "Exact commit SHA of core-runtime" value: "" templates: @@ -26,15 +30,28 @@ spec: set -euo pipefail IMAGE_REPO="{{workflow.parameters.image_repo}}" + GIT_REPO="{{workflow.parameters.git_repo}}" GIT_BRANCH="{{workflow.parameters.git_branch}}" - RUNTIME_COMMIT="{{workflow.parameters.trading_runtime_commit}}" + RUNTIME_COMMIT="{{workflow.parameters.core_runtime_commit}}" if [ -z "$RUNTIME_COMMIT" ]; then - echo "> trading_runtime_commit must be set" + echo "> core_runtime_commit must be set" exit 1 fi - GIT_REPO="github.com/${IMAGE_REPO#ghcr.io/}" + if ! echo "$RUNTIME_COMMIT" | grep -Eq '^[0-9a-fA-F]{7,40}$'; then + echo "> core_runtime_commit must be a 7-40 character hex SHA" + exit 1 + fi + + case "$GIT_REPO" in + https://*".git") + ;; + *) + echo "> git_repo must be an HTTPS URL ending with .git" + exit 1 + ;; + esac SAFE_BRANCH=$(echo "$GIT_BRANCH" | tr '/' '-' | tr '[:upper:]' '[:lower:]') @@ -49,8 +66,10 @@ spec: DESTINATIONS="$DESTINATIONS --destination=$TAG" done + export GIT_PULL_METHOD=https + /kaniko/executor \ - --context=git://$GIT_REPO.git#$RUNTIME_COMMIT \ + --context="git://${GIT_REPO#https://}#$RUNTIME_COMMIT" \ --dockerfile=Dockerfile \ --target=runtime \ $DESTINATIONS \ diff --git a/trading_runtime/__init__.py b/core_runtime/__init__.py similarity index 100% rename from trading_runtime/__init__.py rename to core_runtime/__init__.py diff --git a/trading_runtime/argo/argo.json b/core_runtime/argo/argo.json similarity index 87% rename from trading_runtime/argo/argo.json rename to core_runtime/argo/argo.json index a59f091..98c3030 100644 --- a/trading_runtime/argo/argo.json +++ b/core_runtime/argo/argo.json @@ -74,13 +74,26 @@ }, "strategy": { - "class_path": "trading_runtime.strategies.debug_strategy:DebugStrategyV1", + "class_path": "core_runtime.strategies.debug_strategy:DebugStrategyV1", "spread": 5.0, "order_qty": 0.1, "use_price_tick_levels": 3, "post_only": true }, + "core": { + "version": "v1", + "market": { + "instruments": { + "BTC_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1 + } + } + } + }, + "experiment": { "start_ts_ns": 1636035200000000000, "end_ts_ns": 1836121600000000000, diff --git a/trading_runtime/local/__init__.py b/core_runtime/backtest/adapters/__init__.py similarity index 100% rename from trading_runtime/local/__init__.py rename to core_runtime/backtest/adapters/__init__.py diff --git a/core_runtime/backtest/adapters/execution.py b/core_runtime/backtest/adapters/execution.py new file mode 100644 index 0000000..fadd7d6 --- /dev/null +++ b/core_runtime/backtest/adapters/execution.py @@ -0,0 +1,163 @@ +"""Execution adapter for hftbacktest backtests.""" + +from __future__ import annotations + +import hashlib +from dataclasses import dataclass +from typing import TYPE_CHECKING, Protocol + +if TYPE_CHECKING: + from hftbacktest import ROIVectorMarketDepthBacktest + from tradingchassis_core.core.domain.types import OrderIntent + +from tradingchassis_core.core.domain.reject_reasons import RejectReason + + +class ExecutionAdapter(Protocol): + """Venue-facing execution boundary. + + Strategy, state, and risk layers must not depend on venue-specific + APIs. Only this adapter is allowed to call into the venue engine. + """ + + def apply_intents( + self, intents: list[OrderIntent] + ) -> list[tuple[OrderIntent, str]]: + """Send a batch of intents to the venue. + + Returns: + List of (intent, reason) pairs for venue-side failures. + """ + + +def _to_i64_order_id(external_id: str) -> int: + """Convert an external string order ID into a signed 64-bit integer.""" + sanitized = external_id.strip() + if sanitized.isdigit(): + value = int(sanitized) + else: + digest = hashlib.blake2b( + sanitized.encode("utf-8"), digest_size=8 + ).digest() + value = int.from_bytes(digest, byteorder="big", signed=False) + return value & ((1 << 63) - 1) + + +@dataclass(frozen=True) +class HftBacktestExecutionAdapter(ExecutionAdapter): + """Execution adapter for hftbacktest.""" + + hbt: ROIVectorMarketDepthBacktest + asset_no: int + + def apply_intents( + self, intents: list[OrderIntent] + ) -> list[tuple[OrderIntent, str]]: + """Apply a batch of order intents to the backtest venue.""" + # pylint: disable=too-many-locals,too-many-branches + + # hftbacktest enums (kept local to the adapter) + gtc = 0 + gtx = 1 # post-only + fok = 2 + ioc = 3 + + limit = 0 + market = 1 + + tif_map = { + "GTC": gtc, + "IOC": ioc, + "FOK": fok, + "POST_ONLY": gtx, + } + order_type_map = {"limit": limit, "market": market} + + execution_errors: list[tuple[OrderIntent, str]] = [] + + for intent in intents: + if intent.intent_type == "new": + order_id = _to_i64_order_id(intent.client_order_id) + tif = tif_map[intent.time_in_force] + order_type = order_type_map[intent.order_type] + quantity = intent.intended_qty.value + price = ( + intent.intended_price.value + if intent.intended_price is not None + else 0.0 + ) + + try: + if intent.side == "buy": + result_code = self.hbt.submit_buy_order( + self.asset_no, + order_id, + price, + quantity, + tif, + order_type, + False, + ) + else: + result_code = self.hbt.submit_sell_order( + self.asset_no, + order_id, + price, + quantity, + tif, + order_type, + False, + ) + except Exception: # pylint: disable=broad-exception-caught + execution_errors.append( + (intent, RejectReason.EXCHANGE_ERROR) + ) + continue + + if result_code != 0: + execution_errors.append( + (intent, RejectReason.EXCHANGE_REJECT) + ) + + elif intent.intent_type == "cancel": + order_id = _to_i64_order_id(intent.client_order_id) + try: + result_code = self.hbt.cancel( + self.asset_no, order_id, False + ) + except Exception: # pylint: disable=broad-exception-caught + execution_errors.append( + (intent, RejectReason.EXCHANGE_ERROR) + ) + continue + + if result_code != 0: + execution_errors.append( + (intent, RejectReason.EXCHANGE_REJECT) + ) + + elif intent.intent_type == "replace": + order_id = _to_i64_order_id(intent.client_order_id) + new_price = intent.intended_price.value + new_quantity = intent.intended_qty.value + + try: + result_code = self.hbt.modify( + self.asset_no, + order_id, + new_price, + new_quantity, + False, + ) + except Exception: # pylint: disable=broad-exception-caught + execution_errors.append( + (intent, RejectReason.EXCHANGE_ERROR) + ) + continue + + if result_code != 0: + execution_errors.append( + (intent, RejectReason.EXCHANGE_REJECT) + ) + + return execution_errors diff --git a/core_runtime/backtest/adapters/protocols.py b/core_runtime/backtest/adapters/protocols.py new file mode 100644 index 0000000..9a8dee3 --- /dev/null +++ b/core_runtime/backtest/adapters/protocols.py @@ -0,0 +1,97 @@ +"""Typing-only adapter capability protocols for backtest venue sources. + +This module introduces low-risk capability seams as ``typing.Protocol`` classes. +It is intentionally implementation-light: + +- no runtime behavior or orchestration changes; +- no runtime ``isinstance`` checks; +- no required explicit inheritance for concrete adapters; +- structural compatibility is sufficient. + +Current ``HftBacktestVenueAdapter`` already structurally conforms to all +protocols defined here. + +Notes on capability scope: + +- ``OrderSubmissionGateway`` is included as an outbound command-submission + typing seam only. +- ``ExecutionFeedbackRecordSource`` remains excluded in this slice because + execution-feedback capability is deferred and gated by existing + runtime/source contracts. +""" + +from __future__ import annotations + +from typing import Any, Protocol + +from tradingchassis_core.core.domain.types import OrderIntent + + +class VenueEventWaiter(Protocol): + """Wakeup capability for runtime loop progression. + + This is a typing seam only. It does not alter wait semantics, call order, + timeout computation, or rc-branch interpretation. + """ + + def wait_next(self, *, timeout_ns: int, include_order_resp: bool) -> int: + """Block until next wakeup and return venue-defined rc code.""" + + +class VenueClock(Protocol): + """Timestamp-read capability for runtime adoption.""" + + def current_timestamp_ns(self) -> int: + """Return current venue-local timestamp in nanoseconds.""" + + +class MarketInputSource(Protocol): + """Market snapshot read capability for canonical market mapping.""" + + def read_market_snapshot(self) -> Any: + """Return venue-specific market snapshot object.""" + + +class OrderSnapshotSource(Protocol): + """Order snapshot capability for compatibility materialization paths. + + The current compatibility boundary consumes a combined tuple from one call. + A future split may separate this source surface. + """ + + def read_orders_snapshot(self) -> tuple[Any, Any]: + """Return (state_values, orders) from current snapshot boundary.""" + + +class AccountSnapshotSource(Protocol): + """Account snapshot capability (currently shared tuple-return surface). + + This intentionally shares ``read_orders_snapshot`` with + ``OrderSnapshotSource`` in the current runtime shape. + """ + + def read_orders_snapshot(self) -> tuple[Any, Any]: + """Return (state_values, orders) from current snapshot boundary.""" + + +class OrderSubmissionGateway(Protocol): + """Outbound order command submission capability. + + This protocol is strictly about dispatching outbound order commands and + reporting dispatch failures. It is not an execution-feedback source. + + Successful outbound submission may allow runtime to produce + ``OrderSubmittedEvent`` for ``new`` intents under existing runner semantics. + Failure rows represent command rejection/dispatch errors only, where + reason values are string constants from the ``RejectReason`` namespace. + + This protocol does not imply canonical execution-feedback authority, + ``FillEvent`` ingress, or post-submission lifecycle migration. + ``ExecutionFeedbackRecordSource`` remains a separate deferred capability. + """ + + def apply_intents( + self, intents: list[OrderIntent] + ) -> list[tuple[OrderIntent, str]]: + """Submit intents and return per-intent dispatch failures.""" + diff --git a/core_runtime/backtest/adapters/venue.py b/core_runtime/backtest/adapters/venue.py new file mode 100644 index 0000000..33e1997 --- /dev/null +++ b/core_runtime/backtest/adapters/venue.py @@ -0,0 +1,49 @@ +"""Venue adapter implementation for hftbacktest backtests.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from hftbacktest import ROIVectorMarketDepthBacktest + +from tradingchassis_core.core.ports.venue_adapter import VenueAdapter + + +@dataclass(frozen=True) +class HftBacktestVenueAdapter(VenueAdapter): + """VenueAdapter implementation for hftbacktest. + + This adapter is the only place where the strategy loop is allowed to depend + on hftbacktest APIs. + """ + + hbt: ROIVectorMarketDepthBacktest + asset_no: int + + def wait_next(self, *, timeout_ns: int, include_order_resp: bool) -> int: + """Wait for the next venue event and return its type.""" + # hftbacktest backends are frequently Numba jitclass objects. + # Those methods often do not support keyword arguments. + return self.hbt.wait_next_feed(include_order_resp, timeout_ns) + + def current_timestamp_ns(self) -> int: + """Return the current venue timestamp in nanoseconds.""" + return self.hbt.current_timestamp + + def read_market_snapshot(self) -> Any: + """Return the current market depth snapshot.""" + return self.hbt.depth(self.asset_no) + + def read_orders_snapshot(self) -> tuple[Any, Any]: + """Return the current orders and state snapshot.""" + return ( + self.hbt.state_values(self.asset_no), + self.hbt.orders(self.asset_no), + ) + + def record(self, recorder: Any) -> None: + """Record the current backtest state using the given recorder.""" + # hftbacktest recorder is a thin wrapper exposing .recorder.record(hbt). + recorder.recorder.record(self.hbt) diff --git a/trading_runtime/strategies/__init__.py b/core_runtime/backtest/engine/__init__.py similarity index 100% rename from trading_runtime/strategies/__init__.py rename to core_runtime/backtest/engine/__init__.py diff --git a/core_runtime/backtest/engine/engine_base.py b/core_runtime/backtest/engine/engine_base.py new file mode 100644 index 0000000..babed91 --- /dev/null +++ b/core_runtime/backtest/engine/engine_base.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any + + +@dataclass +class BacktestConfig: + """Generic backtest configuration. + + Engine configs should subclass this + and add engine-specific fields. + """ + id: str + description: str + + +@dataclass +class BacktestResult: + """Lightweight container for backtest outputs. + + For now we only track the stats file path. + Can be extended with PnL curves, summary metrics, etc. + """ + id: str + stats_file: str | None = None + extra_metadata: dict[str, Any] | None = None + + +class BacktestEngine: + """Abstract base class for all backtest engines.""" + + def __init__(self, config: BacktestConfig) -> None: + self.config = config + + def run(self) -> BacktestResult: + """Run the backtest and return a result object. + + Subclass engines must implement this method. + """ + raise NotImplementedError("run() must be implemented by subclasses") diff --git a/core_runtime/backtest/engine/event_stream_cursor.py b/core_runtime/backtest/engine/event_stream_cursor.py new file mode 100644 index 0000000..a7f3b8e --- /dev/null +++ b/core_runtime/backtest/engine/event_stream_cursor.py @@ -0,0 +1,29 @@ +"""Runtime-owned canonical processing position cursor.""" + +from __future__ import annotations + +from tradingchassis_core.core.domain.processing_order import ProcessingPosition + + +class EventStreamCursor: + """Ordering-only helper for canonical ProcessingPosition allocation.""" + + def __init__(self, *, start_index: int = 0) -> None: + if start_index < 0: + raise ValueError("start_index must be >= 0") + self._next_index = start_index + + @property + def next_index(self) -> int: + return self._next_index + + def attempt_position(self) -> ProcessingPosition: + return ProcessingPosition(index=self._next_index) + + def commit_success(self, position: ProcessingPosition) -> None: + if position.index != self._next_index: + raise ValueError( + "Committed position does not match expected next index: " + f"expected={self._next_index} actual={position.index}" + ) + self._next_index += 1 diff --git a/core_runtime/backtest/engine/hft_engine.py b/core_runtime/backtest/engine/hft_engine.py new file mode 100644 index 0000000..62cfd67 --- /dev/null +++ b/core_runtime/backtest/engine/hft_engine.py @@ -0,0 +1,187 @@ +"""HFT backtest engine implementation based on hftbacktest.""" + +from __future__ import annotations + +import importlib +from dataclasses import dataclass +from typing import TYPE_CHECKING + +from hftbacktest import ( + BacktestAsset, + Recorder, + ROIVectorMarketDepthBacktest, +) + +if TYPE_CHECKING: + from tradingchassis_core.core.domain.configuration import CoreConfiguration + from tradingchassis_core.core.risk.risk_config import RiskConfig + +from tradingchassis_core.strategies.base import Strategy +from tradingchassis_core.strategies.strategy_config import StrategyConfig + +from core_runtime.backtest.adapters.execution import HftBacktestExecutionAdapter +from core_runtime.backtest.adapters.venue import HftBacktestVenueAdapter +from core_runtime.backtest.engine.engine_base import ( + BacktestConfig, + BacktestEngine, + BacktestResult, +) +from core_runtime.backtest.engine.strategy_runner import HftStrategyRunner + + +# pylint: disable=too-many-instance-attributes +@dataclass +class HftEngineConfig: + """Configuration for the HFT backtest engine.""" + + # Data wiring + initial_snapshot: str | None + data_files: list[str] + + # Contract / microstructure parameters + instrument: str + tick_size: float + lot_size: float + contract_size: float + + # Simple fee model: maker / taker in rate on trading value + maker_fee_rate: float + taker_fee_rate: float + + # Latency model (constant latency) + entry_latency_ns: int + response_latency_ns: int + + # Queue model / venue model toggles + use_risk_adverse_queue_model: bool + partial_fill_venue: bool + + # Strategy loop timing + max_steps: int + + last_trades_capacity: int + max_price_tick_levels: int + + roi_lb: int + roi_ub: int + + # Output + stats_npz_path: str + event_bus_path: str + + +@dataclass +class HftBacktestConfig(BacktestConfig): + """Backtest configuration for the HFT engine.""" + + engine_cfg: HftEngineConfig + strategy_cfg: StrategyConfig + risk_cfg: RiskConfig + # Boundary-prepared config for canonical core processing adoption. + core_cfg: CoreConfiguration + + +def _build_backtester(engine_cfg: HftEngineConfig) -> ROIVectorMarketDepthBacktest: + """Create an ROIVectorMarketDepthBacktest from the engine configuration.""" + asset = BacktestAsset() + + # For now we assume file paths. Later this can be replaced with an S3 resolver. + asset = asset.data(engine_cfg.data_files) + + if engine_cfg.initial_snapshot is not None: + asset = asset.initial_snapshot(engine_cfg.initial_snapshot) + + asset = ( + asset + .linear_asset(engine_cfg.contract_size) + .constant_latency(engine_cfg.entry_latency_ns, engine_cfg.response_latency_ns) + .tick_size(engine_cfg.tick_size) + .lot_size(engine_cfg.lot_size) + .trading_value_fee_model(engine_cfg.maker_fee_rate, engine_cfg.taker_fee_rate) + .last_trades_capacity(engine_cfg.last_trades_capacity) + .roi_lb(engine_cfg.roi_lb) + .roi_ub(engine_cfg.roi_ub) + ) + + if engine_cfg.use_risk_adverse_queue_model: + asset = asset.risk_adverse_queue_model() + + if engine_cfg.partial_fill_venue: + asset = asset.partial_fill_exchange() + else: + asset = asset.no_partial_fill_exchange() + + return ROIVectorMarketDepthBacktest([asset]) + + +class HftBacktestEngine(BacktestEngine): + """Backtest engine that uses hftbacktest internally.""" + + def __init__(self, config: HftBacktestConfig) -> None: + # pylint: disable=useless-super-delegation + super().__init__(config) + + def _load_strategy_class(self, class_path: str) -> type[Strategy]: + """Dynamically load a Strategy class from a module path.""" + module_path, class_name = class_path.split(":") + module = importlib.import_module(module_path) + cls = getattr(module, class_name) + + if not issubclass(cls, Strategy): + raise TypeError( + f"Loaded class {class_name} is not a subclass of Strategy." + ) + + return cls + + def _build_strategy(self, strategy_cfg: StrategyConfig) -> Strategy: + """Instantiate the strategy specified in the configuration.""" + cls = self._load_strategy_class(strategy_cfg.class_path) + return cls(**strategy_cfg.to_engine_params()) + + def run(self) -> BacktestResult: + """Run the backtest and return the aggregated result.""" + cfg: HftBacktestConfig = self.config + engine_cfg: HftEngineConfig = cfg.engine_cfg + strategy_cfg: StrategyConfig = cfg.strategy_cfg + risk_cfg: RiskConfig = cfg.risk_cfg + + # 1) Build hftbacktest backtester from engine config + hbt = _build_backtester(engine_cfg) + + # 2) Prepare recorder (single asset, record every step) + recorder = Recorder(1, engine_cfg.max_steps) + + # 3) Build strategy and runner + strategy = self._build_strategy(strategy_cfg) + runner = HftStrategyRunner( + engine_cfg=engine_cfg, + strategy=strategy, + risk_cfg=risk_cfg, + core_cfg=cfg.core_cfg, + ) + + # 4) Backtest-only venue and execution adapters + asset_no = 0 + venue = HftBacktestVenueAdapter(hbt=hbt, asset_no=asset_no) + execution = HftBacktestExecutionAdapter(hbt=hbt, asset_no=asset_no) + + # 5) Run strategy loop (venue-agnostic) + runner.run(venue, execution, recorder) + + # 6) Close backtester and persist statistics + _ = hbt.close() + recorder.to_npz(engine_cfg.stats_npz_path) + + return BacktestResult( + id=cfg.id, + stats_file=engine_cfg.stats_npz_path, + extra_metadata={ + "engine": "hftbacktest", + "instrument": engine_cfg.instrument, + "strategy_name": strategy_cfg.class_path, + "strategy_params": strategy_cfg.params, + "risk_scope": risk_cfg.scope, + "risk_params": risk_cfg.params, + }, + ) diff --git a/core_runtime/backtest/engine/strategy_runner.py b/core_runtime/backtest/engine/strategy_runner.py new file mode 100644 index 0000000..cd02717 --- /dev/null +++ b/core_runtime/backtest/engine/strategy_runner.py @@ -0,0 +1,403 @@ +"""Strategy execution loop for HFT backtests.""" + +from __future__ import annotations + +import logging +from collections import deque +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from tradingchassis_core.core.domain.configuration import CoreConfiguration +from tradingchassis_core.core.domain.processing import process_event_entry +from tradingchassis_core.core.domain.processing_order import ( + EventStreamEntry, +) +from tradingchassis_core.core.domain.state import StrategyState +from tradingchassis_core.core.domain.types import ( + BookLevel, + BookPayload, + ControlTimeEvent, + MarketEvent, + NewOrderIntent, + OrderIntent, + OrderSubmittedEvent, + Price, + Quantity, +) +from tradingchassis_core.core.events.event_bus import EventBus +from tradingchassis_core.core.events.sinks.sink_logging import LoggingEventSink +from tradingchassis_core.core.ports.venue_adapter import VenueAdapter +from tradingchassis_core.core.risk.risk_config import RiskConfig +from tradingchassis_core.core.risk.risk_engine import RejectedIntent, RiskEngine + +from core_runtime.backtest.adapters.protocols import OrderSubmissionGateway +from core_runtime.backtest.engine.event_stream_cursor import EventStreamCursor +from core_runtime.core.events.sinks.file_recorder import FileRecorderSink + +if TYPE_CHECKING: + from tradingchassis_core.strategies.base import Strategy + + from core_runtime.backtest.engine.hft_engine import HftEngineConfig + + +MAX_TIMEOUT_NS = 1 << 62 # Effectively "wait forever" without a heartbeat + + +class HftStrategyRunner: + """Strategy runner for HFT backtests. + + Invariant: + - One wait_next() wakeup corresponds to one fully committed timestamp block. + - Strategy is evaluated at most once per wakeup on a stable state. + """ + # pylint: disable=too-many-instance-attributes + + def __init__( + self, + *, + engine_cfg: HftEngineConfig, + strategy: Strategy, + risk_cfg: RiskConfig, + core_cfg: CoreConfiguration, + ) -> None: + self.engine_cfg = engine_cfg + self.strategy = strategy + self._core_cfg = core_cfg + + event_bus = self._build_event_bus( + path=Path(engine_cfg.event_bus_path), + ) + + self.strategy_state = StrategyState( + event_bus=event_bus, + ) + + self.risk = RiskEngine( + risk_cfg=risk_cfg, + event_bus=event_bus, + ) + + self._next_send_ts_ns_local: int | None = None + self._event_stream_cursor = EventStreamCursor() + self._last_injected_control_deadline_ns: int | None = None + + def _process_canonical_event(self, event: object) -> None: + position = self._event_stream_cursor.attempt_position() + entry = EventStreamEntry( + position=position, + event=event, + ) + process_event_entry( + self.strategy_state, + entry, + configuration=self._core_cfg, + ) + self._event_stream_cursor.commit_success(position) + + def _build_event_bus( + self, + *, + path: Path, + ) -> EventBus: + logger = logging.getLogger("bus") + + sinks = [ + LoggingEventSink(logger), + FileRecorderSink(path), + ] + + return EventBus(sinks=sinks) + + def _close_event_bus(self) -> None: + self.strategy_state._event_bus.close() + self.risk._event_bus.close() + + def _compute_timeout_ns(self, now_local_ns: int) -> int: + """Compute wait timeout in nanoseconds.""" + if self._next_send_ts_ns_local is None: + return MAX_TIMEOUT_NS + delta = self._next_send_ts_ns_local - now_local_ns + return 0 if delta <= 0 else delta + + def _sort_intents_for_gate(self, intents: list[OrderIntent]) -> list[OrderIntent]: + """Sort intents to ensure cancels are evaluated first.""" + + def intent_priority(intent: OrderIntent) -> int: + if intent.intent_type == "cancel": + return 0 + if intent.intent_type == "replace": + return 1 + if intent.intent_type == "new": + return 2 + return 9 + + return sorted(intents, key=lambda it: (intent_priority(it), it.ts_ns_local)) + + def _process_canonical_market_event(self, market_event: MarketEvent) -> None: + self._process_canonical_event(market_event) + + def _process_canonical_order_submitted_event( + self, + intent: NewOrderIntent, + *, + ts_ns_local_dispatch: int, + ) -> None: + order_submitted_event = OrderSubmittedEvent( + ts_ns_local_dispatch=ts_ns_local_dispatch, + instrument=intent.instrument, + client_order_id=intent.client_order_id, + side=intent.side, + order_type=intent.order_type, + intended_price=intent.intended_price, + intended_qty=intent.intended_qty, + time_in_force=intent.time_in_force, + intent_correlation_id=intent.intents_correlation_id, + dispatch_attempt_id=None, + runtime_correlation=None, + ) + self._process_canonical_event(order_submitted_event) + + def _process_canonical_control_time_event( + self, + *, + sim_now_ns: int, + scheduled_deadline_ns: int, + ) -> None: + control_time_event = ControlTimeEvent( + ts_ns_local_control=sim_now_ns, + reason="scheduled_control_recheck", + due_ts_ns_local=scheduled_deadline_ns, + realized_ts_ns_local=sim_now_ns, + obligation_reason="rate_limit", + obligation_due_ts_ns_local=scheduled_deadline_ns, + runtime_correlation=None, + ) + self._process_canonical_event(control_time_event) + + def run( + self, + venue: VenueAdapter, + execution: OrderSubmissionGateway, + recorder: Any, + ) -> None: + """Run the backtest loop.""" + # pylint: disable=too-many-locals,too-many-branches,too-many-statements + + instrument = self.engine_cfg.instrument + # Initialize hftbacktest engine + # Fetch very first event block to set local timestamp + venue.wait_next(timeout_ns=MAX_TIMEOUT_NS, include_order_resp=False) + observed_local_ns = venue.current_timestamp_ns() + self.strategy_state.update_timestamp(observed_local_ns) + sim_now_ns = self.strategy_state.sim_ts_ns_local + + while True: + timeout_ns = self._compute_timeout_ns(self.strategy_state.sim_ts_ns_local) + rc = venue.wait_next(timeout_ns=timeout_ns, include_order_resp=True) + + if rc == 1: + self._close_event_bus() + break + + observed_local_ns = venue.current_timestamp_ns() + self.strategy_state.update_timestamp(observed_local_ns) + sim_now_ns = self.strategy_state.sim_ts_ns_local + + raw_intents: list[OrderIntent] = [] + + # ----------------------------------------------------------------- + # Market update + # ----------------------------------------------------------------- + if rc == 2: + depth = venue.read_market_snapshot() + + bids: list[BookLevel] = [] + asks: list[BookLevel] = [] + + max_levels = max(0, int(self.engine_cfg.max_price_tick_levels)) + if max_levels > 0: + roi_lb_tick = depth.roi_lb_tick + tick_size = depth.tick_size + + # ----------------------- + # ASK side (fixed ticks) + # ----------------------- + for offset in range(max_levels): + price_tick = depth.best_ask_tick + offset + i = price_tick - roi_lb_tick + + qty = 0.0 + if 0 <= i < len(depth.ask_depth): + qty = depth.ask_depth[i] + + asks.append( + BookLevel( + price=Price( + currency="UNKNOWN", + value=price_tick * tick_size, + ), + quantity=Quantity( + value=qty, + unit="contracts", + ), + ) + ) + + # ----------------------- + # BID side (fixed ticks) + # ----------------------- + for offset in range(max_levels): + price_tick = depth.best_bid_tick - offset + i = price_tick - roi_lb_tick + + qty = 0.0 + if 0 <= i < len(depth.bid_depth): + qty = depth.bid_depth[i] + + bids.append( + BookLevel( + price=Price( + currency="UNKNOWN", + value=price_tick * tick_size, + ), + quantity=Quantity( + value=qty, + unit="contracts", + ), + ) + ) + + market_event = MarketEvent( + ts_ns_exch=sim_now_ns, + ts_ns_local=sim_now_ns, + instrument=instrument, + event_type="book", + book=BookPayload( + book_type="snapshot", + bids=bids, + asks=asks, + depth=min(len(bids), len(asks)), + ), + ) + + self._process_canonical_market_event(market_event) + + constraints = self.risk.build_constraints(sim_now_ns) + raw_intents.extend( + self.strategy.on_feed( + self.strategy_state, + market_event, + self.engine_cfg, + constraints, + ) + ) + + # ----------------------------------------------------------------- + # Order / account update + # ----------------------------------------------------------------- + if rc == 3: + state_values, orders = venue.read_orders_snapshot() + + self.strategy_state.update_account( + instrument=instrument, + position=state_values.position, + balance=state_values.balance, + fee=state_values.fee, + trading_volume=state_values.trading_volume, + trading_value=state_values.trading_value, + num_trades=state_values.num_trades, + ) + self.strategy_state.ingest_order_snapshots( + instrument, + orders.values(), + ) + + constraints = self.risk.build_constraints(sim_now_ns) + raw_intents.extend( + self.strategy.on_order_update( + self.strategy_state, + self.engine_cfg, + constraints, + ) + ) + + # ----------------------------------------------------------------- + # Queue flush + # ----------------------------------------------------------------- + scheduled_deadline_ns: int | None = None + if ( + self._next_send_ts_ns_local is not None + and sim_now_ns >= self._next_send_ts_ns_local + ): + scheduled_deadline_ns = self._next_send_ts_ns_local + raw_intents.extend( + self.strategy_state.pop_queued_intents(instrument) + ) + if ( + scheduled_deadline_ns + != self._last_injected_control_deadline_ns + ): + self._process_canonical_control_time_event( + sim_now_ns=sim_now_ns, + scheduled_deadline_ns=scheduled_deadline_ns, + ) + self._last_injected_control_deadline_ns = scheduled_deadline_ns + + # ----------------------------------------------------------------- + # Gate + execution + # ----------------------------------------------------------------- + if raw_intents: + combined = self._sort_intents_for_gate(raw_intents) + + decision = self.risk.decide_intents( + raw_intents=combined, + state=self.strategy_state, + now_ts_ns_local=sim_now_ns, + ) + + execution_errors: list[tuple[OrderIntent, str]] = [] + if decision.accepted_now: + execution_errors = execution.apply_intents( + decision.accepted_now + ) + + failed_keys = { + (it.instrument, it.client_order_id) + for it, _ in execution_errors + } + + for it in decision.accepted_now: + if (it.instrument, it.client_order_id) in failed_keys: + continue + if it.intent_type == "new": + self._process_canonical_order_submitted_event( + it, + ts_ns_local_dispatch=sim_now_ns, + ) + self.strategy_state.mark_intent_sent( + it.instrument, + it.client_order_id, + it.intent_type, + ) + + if execution_errors: + for it, reason in execution_errors: + decision.execution_rejected.append( + RejectedIntent(it, reason) + ) + + self.strategy.on_risk_decision(decision) + self._next_send_ts_ns_local = decision.next_send_ts_ns_local + + # If there are queued intents but the gate did not provide a next_send_ts_ns_local, + # wake up at the next second boundary to ensure progress. + if self._next_send_ts_ns_local is None: + queue = self.strategy_state.queued_intents.setdefault( + instrument, + deque(), + ) + if queue: + sec = sim_now_ns // 1_000_000_000 + self._next_send_ts_ns_local = (sec + 1) * 1_000_000_000 + + venue.record(recorder) diff --git a/core_runtime/backtest/io/__init__.py b/core_runtime/backtest/io/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core_runtime/backtest/io/s3_adapter.py b/core_runtime/backtest/io/s3_adapter.py new file mode 100644 index 0000000..4177eaa --- /dev/null +++ b/core_runtime/backtest/io/s3_adapter.py @@ -0,0 +1,290 @@ +from __future__ import annotations + +import io +from pathlib import Path + +from oci.auth.signers import InstancePrincipalsSecurityTokenSigner +from oci.config import from_file +from oci.object_storage import ObjectStorageClient +from oci.signer import Signer + + +class OCIObjectStorageS3Shim: + """ + Lightweight adapter that exposes a small, S3-like interface on top of + Oracle Cloud Infrastructure (OCI) Object Storage. + + The goal of this class is *API shape compatibility*, not feature parity: + it mimics a minimal subset of the boto3 S3 client that is sufficient for + simple readers/writers and data pipelines. + + Authentication modes: + - "instance_principal": + Uses the OCI Instance Principal of the current Compute instance. + Suitable only when running on OCI infrastructure. + - "api_key": + Uses a user-scoped OCI API key (private PEM key + config file). + Suitable for local development, CI, and non-OCI environments. + + Implemented operations: + - put_object: upload an object (write) + - list_objects: list objects under a bucket/prefix (read) + - get_object: download an object (read) + + Design notes: + - Method signatures and return shapes are intentionally boto3-like. + - This adapter talks directly to OCI Object Storage APIs, NOT to the + S3-compatibility HTTP endpoint. + - Authorization is fully governed by OCI IAM policies. + """ + def __init__( + self, + *, + region: str | None = None, + auth_mode: str = "instance_principal", + oci_config_file: str | None = None, + oci_profile: str = "DEFAULT", + ) -> None: + """ + Create a new Object Storage client wrapper. + + Parameters: + region: + OCI region identifier (e.g. "eu-frankfurt-1"). + If provided, it overrides the region in the OCI config file. + + auth_mode: + Authentication strategy to use: + - "instance_principal": use the instances identity (OCI-only) + - "api_key": use a user API key defined in an OCI config file + + oci_config_file: + Path to an OCI CLI-style config file (required for api_key auth). + Typically "~/.oci/config". + + oci_profile: + Profile name inside the OCI config file to load credentials from. + """ + if auth_mode == "instance_principal": + signer = InstancePrincipalsSecurityTokenSigner() + config = {} + + elif auth_mode == "api_key": + if oci_config_file is None: + raise ValueError("oci_config_file is required for api_key auth") + + config = from_file( + file_location=oci_config_file, + profile_name=oci_profile, + ) + signer = Signer( + tenancy=config["tenancy"], + user=config["user"], + fingerprint=config["fingerprint"], + private_key_file_location=config["key_file"], + pass_phrase=config.get("pass_phrase"), + ) + + else: + raise ValueError(f"Unknown auth_mode: {auth_mode}") + + client_kwargs = {} + if region: + client_kwargs["region"] = region + + self.client = ObjectStorageClient( + config=config, + signer=signer, + **client_kwargs, + ) + + self.namespace = self.client.get_namespace().data + + def put_object(self, bucket: str, key: str, body, content_type: str = "application/octet-stream"): + """ + Upload an object to an OCI Object Storage bucket. + + Parameters mirror boto3 semantics: + Bucket: bucket name + Key: object name (path-like) + Body: bytes or file-like object + ContentType: optional MIME type + + Returns a minimal boto3-like dict containing the object's ETag + (if provided by OCI). + """ + resp = self.client.put_object( + namespace_name=self.namespace, + bucket_name=bucket, + object_name=key, + put_object_body=body, + content_type=content_type, + ) + etag = None + try: + etag = resp.headers.get("etag") + except Exception: + pass + return {"ETag": etag} + + def list_objects( + self, + bucket: str, + prefix: str | None = None, + continuation_token: str | None = None, + max_keys: int = 1000, + ) -> dict[str, object]: + """ + List objects in a bucket, optionally filtered by prefix. + + This method approximates boto3's list_objects behavior: + - 'Prefix' filters object names + - pagination is exposed via ContinuationToken / NextContinuationToken + + Internally, this maps to OCI's 'list_objects' API, using: + - 'prefix' for filtering + - 'start' for pagination + + Returns: + A dict with keys: + - Contents: list of {"Key", "Size"} + - IsTruncated: whether more results are available + - NextContinuationToken: token for the next page (or None) + """ + kwargs = { + "namespace_name": self.namespace, + "bucket_name": bucket, + "limit": max_keys, + } + if prefix: + kwargs["prefix"] = prefix + if continuation_token: + kwargs["start"] = continuation_token + + resp = self.client.list_objects(**kwargs) + objects = [] + for o in resp.data.objects or []: + objects.append({"Key": o.name, "Size": getattr(o, "size", None)}) + + next_token = getattr(resp.data, "next_start_with", None) + return { + "Contents": objects, + "IsTruncated": bool(next_token), + "NextContinuationToken": next_token, + } + + def get_object(self, bucket: str, key: str) -> dict[str, object]: + """ + Download an object from OCI Object Storage. + + Returns a boto3-like response where: + - 'Body' is a file-like object (io.BytesIO) + - 'ContentLength' and 'ContentType' are best-effort metadata + + The OCI Python SDK exposes response bodies in different shapes + depending on transport and SDK version; this method normalizes + them into a single bytes buffer. + """ + resp = self.client.get_object( + namespace_name=self.namespace, + bucket_name=bucket, + object_name=key, + ) + + data_bytes = None + d = resp.data + + # Case 1: direct .read() + if hasattr(d, "read") and callable(getattr(d, "read")): + data_bytes = d.read() + + # Case 2: .content (bytes already) + elif hasattr(d, "content"): + data_bytes = d.content + + # Case 3: raw.read() + elif hasattr(d, "raw") and hasattr(d.raw, "read") and callable(getattr(d.raw, "read")): + data_bytes = d.raw.read() + + # Case 4: stream chunks (fallback) + elif hasattr(d, "raw") and hasattr(d.raw, "stream") and callable(getattr(d.raw, "stream")): + chunks = [] + for chunk in d.raw.stream(1024 * 1024, decode_content=False): + chunks.append(chunk) + data_bytes = b"".join(chunks) + + else: + raise TypeError("Unsupported OCI get_object response type; no readable data attribute found.") + + # Content-Length/Type (best effort) + content_length = None + try: + content_length = int(resp.headers.get("content-length", "0")) + except Exception: + pass + if not content_length and data_bytes is not None: + content_length = len(data_bytes) + + content_type = "" + try: + content_type = resp.headers.get("content-type", "") + except Exception: + pass + + return { + "Body": io.BytesIO(data_bytes if data_bytes is not None else b""), + "ContentLength": content_length or 0, + "ContentType": content_type, + } + + def download_to_file( + self, + bucket: str, + key: str, + destination: str | Path, + *, + chunk_size_bytes: int = 8 * 1024 * 1024, + ) -> None: + """ + Stream an object from OCI Object Storage directly to a local file. + + This method performs a chunked download over HTTPS and writes each + chunk incrementally to disk. The entire object is never loaded into + memory at once, ensuring constant and predictable RAM usage. + + Parameters: + bucket: + Name of the OCI Object Storage bucket. + key: + Object name (path-like key) within the bucket. + destination: + Local filesystem path where the object will be written. + Parent directories must already exist. + chunk_size_bytes: + Size of each streamed chunk in bytes. Defaults to 8 MiB. + + Raises: + RuntimeError: + If the OCI response does not expose a streamable body. + """ + destination_path = Path(destination) + + response = self.client.get_object( + namespace_name=self.namespace, + bucket_name=bucket, + object_name=key, + ) + + data = response.data + + if not hasattr(data, "raw") or not hasattr(data.raw, "stream"): + raise RuntimeError( + "OCI get_object response does not expose a streamable body." + ) + + with destination_path.open("wb") as file_handle: + for chunk in data.raw.stream( + chunk_size_bytes, + decode_content=False, + ): + file_handle.write(chunk) diff --git a/core_runtime/backtest/orchestrator/__init__.py b/core_runtime/backtest/orchestrator/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core_runtime/backtest/orchestrator/manifest.py b/core_runtime/backtest/orchestrator/manifest.py new file mode 100644 index 0000000..9aa8a8a --- /dev/null +++ b/core_runtime/backtest/orchestrator/manifest.py @@ -0,0 +1,46 @@ +""" +Dataset manifest definitions. + +This module defines metadata structures and protocols used to describe +datasets and their underlying data files. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Protocol + + +@dataclass(frozen=True, slots=True) +class DataFileMeta: + """ + Immutable metadata describing a single data file. + """ + + file_id: str + object_key: str + start_ts_ns: int + end_ts_ns: int + size_bytes: int + symbol: str + venue: str + datatype: str + + +class DatasetManifest(Protocol): + """ + Protocol describing a dataset manifest interface. + """ + + def iter_files( + self, + *, + start_ts_ns: int, + end_ts_ns: int, + symbol: str, + venue: str, + datatype: str, + ) -> list[DataFileMeta]: + """ + Iterate over data files matching the given constraints. + """ diff --git a/core_runtime/backtest/orchestrator/planner.py b/core_runtime/backtest/orchestrator/planner.py new file mode 100644 index 0000000..53288eb --- /dev/null +++ b/core_runtime/backtest/orchestrator/planner.py @@ -0,0 +1,151 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from core_runtime.backtest.orchestrator.manifest import DataFileMeta, DatasetManifest + +from core_runtime.backtest.orchestrator.planner_models import ( + ExperimentPlan, + SegmentPlan, +) +from core_runtime.backtest.orchestrator.segmenter import segment_files +from core_runtime.backtest.orchestrator.sweeps import ( + expand_parameter_grid, + expand_ranges, +) + + +def plan_experiment( + *, + experiment_id: str, + start_ts_ns: int, + end_ts_ns: int, + symbol: str, + venue: str, + datatype: str, + sweep_spec: dict[str, Any], + manifest: DatasetManifest, + max_segment_bytes: int, +) -> ExperimentPlan: + """ + Build a deterministic execution plan for an experiment. + + This function performs *planning only*. + It does not access S3 directly, does not allocate scratch space, + and does not execute any backtests. + + Responsibilities: + - resolve relevant data files via the manifest + - segment data according to scratch size limits + - expand parameter sweeps + - produce a pure ExperimentPlan + + Parameters + ---------- + experiment_id: + Stable identifier for the experiment. + + start_ts_ns / end_ts_ns: + Experiment time range (unix timestamp, nanoseconds). + + symbol: + Instrument included in the experiment. + + sweep_spec: + User-facing sweep specification. May contain: + - explicit values + - iterables + - RangeSpec instances + + manifest: + Dataset manifest used to resolve physical data files. + + max_segment_bytes: + Maximum total size (bytes) allowed per segment. + + Returns + ------- + ExperimentPlan + Fully expanded execution plan. + """ + + if start_ts_ns >= end_ts_ns: + raise ValueError("start_ts_ns must be < end_ts_ns") + + if max_segment_bytes <= 0: + raise ValueError("max_segment_bytes must be > 0") + + # ------------------------------------------------------------------ + # 1. Resolve all relevant data files + # ------------------------------------------------------------------ + + files: list[DataFileMeta] = manifest.iter_files( + start_ts_ns=start_ts_ns, + end_ts_ns=end_ts_ns, + symbol=symbol, + venue=venue, + datatype=datatype, + ) + + if not files: + raise RuntimeError("No data files found for given experiment range") + + # ------------------------------------------------------------------ + # 2. Segment files according to scratch constraints + # ------------------------------------------------------------------ + + file_segments: list[list[DataFileMeta]] = segment_files( + files=files, + max_bytes=max_segment_bytes, + ) + + if not file_segments: + raise RuntimeError("Segmenter produced no segments") + + # ------------------------------------------------------------------ + # 3. Expand parameter sweeps + # ------------------------------------------------------------------ + + normalized_grid = expand_ranges(sweep_spec) + sweep_plans = expand_parameter_grid(normalized_grid) + + # ------------------------------------------------------------------ + # 4. Build SegmentPlans + # ------------------------------------------------------------------ + + segments: list[SegmentPlan] = [] + + for index, segment in enumerate(file_segments): + segment_id = f"segment_{index:04d}" + + segment_start = min(f.start_ts_ns for f in segment) + segment_end = max(f.end_ts_ns for f in segment) + + estimated_bytes = sum(f.size_bytes for f in segment) + + if estimated_bytes > max_segment_bytes: + raise RuntimeError( + f"Segment {segment_id} exceeds max_segment_bytes " + f"({estimated_bytes} > {max_segment_bytes})" + ) + + segments.append( + SegmentPlan( + segment_id=segment_id, + start_ts_ns=segment_start, + end_ts_ns=segment_end, + estimated_bytes=estimated_bytes, + files=[f.object_key for f in segment], + sweeps=sweep_plans, + ) + ) + + # ------------------------------------------------------------------ + # 5. Return final experiment plan + # ------------------------------------------------------------------ + + return ExperimentPlan( + experiment_id=experiment_id, + segments=segments, + ) diff --git a/core_runtime/backtest/orchestrator/planner_models.py b/core_runtime/backtest/orchestrator/planner_models.py new file mode 100644 index 0000000..d6f36d9 --- /dev/null +++ b/core_runtime/backtest/orchestrator/planner_models.py @@ -0,0 +1,38 @@ +""" +Planning model definitions. + +This module contains immutable planning structures used to describe +experiments, segments, and sweeps. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from core_runtime.backtest.orchestrator.sweeps import SweepPlan + + +@dataclass(frozen=True, slots=True) +class SegmentPlan: + """ + Execution plan for a single segment of data. + """ + + segment_id: str + start_ts_ns: int + end_ts_ns: int + estimated_bytes: int + files: list[str] + sweeps: list[SweepPlan] + + +@dataclass(frozen=True, slots=True) +class ExperimentPlan: + """ + High-level execution plan for an experiment. + """ + + experiment_id: str + segments: list[SegmentPlan] diff --git a/core_runtime/backtest/orchestrator/s3_manifest.py b/core_runtime/backtest/orchestrator/s3_manifest.py new file mode 100644 index 0000000..d3557fc --- /dev/null +++ b/core_runtime/backtest/orchestrator/s3_manifest.py @@ -0,0 +1,128 @@ +from __future__ import annotations + +import json + +from core_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim +from core_runtime.backtest.orchestrator.manifest import DataFileMeta, DatasetManifest + + +class S3DatasetManifest(DatasetManifest): + """ + DatasetManifest implementation backed by S3. + + Semantics: + - Manifests live under a canonical prefix (e.g. s3://data/canonical/) + - All filtering is semantic (venue, datatype, symbol, time) + - Path layout is NOT part of the contract + """ + + def __init__( + self, + *, + bucket: str, + stage: str, + ) -> None: + self._s3 = OCIObjectStorageS3Shim(region="eu-frankfurt-1") + self._bucket = bucket + self._prefix = stage.rstrip("/") + + # ------------------------------------------------------------------ + + def iter_files( + self, + *, + start_ts_ns: int, + end_ts_ns: int, + symbol: str, + venue: str, + datatype: str, + ) -> list[DataFileMeta]: + files: list[DataFileMeta] = [] + + for key in self._list_manifest_keys(): + manifest = self._load_manifest(key) + + dataset = manifest["dataset"] + + if dataset["venue"] != venue: + continue + + if dataset["datatype"] != datatype: + continue + + time_range = manifest["time_range_ns"] + if not self._overlaps( + start_ts_ns, + end_ts_ns, + time_range["start"], + time_range["end"], + ): + continue + + for entry in manifest["files"]: + if not self._overlaps( + start_ts_ns, + end_ts_ns, + entry["start_ts_ns"], + entry["end_ts_ns"], + ): + continue + + manifest_key = key + manifest_dir = manifest_key.rsplit("/", 1)[0] + object_key = f"{manifest_dir}/{entry['file_id']}" + + files.append( + DataFileMeta( + file_id=entry["file_id"], + object_key=object_key, + start_ts_ns=entry["start_ts_ns"], + end_ts_ns=entry["end_ts_ns"], + size_bytes=entry["size_bytes"], + symbol=symbol, + venue=venue, + datatype=datatype, + ) + ) + + return files + + # ------------------------------------------------------------------ + + def _list_manifest_keys(self) -> list[str]: + resp = self._s3.list_objects( + bucket=self._bucket, + prefix=self._prefix, + ) + + contents = resp.get("Contents", []) + + return [ + obj["Key"] + for obj in contents + if obj["Key"].endswith("/manifest.json") + ] + + def _load_manifest(self, key: str) -> dict: + resp = self._s3.get_object( + bucket=self._bucket, + key=key, + ) + + body = resp["Body"] + + if hasattr(body, "read"): + raw_bytes = body.read() + else: + raw_bytes = body + + return json.loads(raw_bytes) + + @staticmethod + def _overlaps( + a_start: int, + a_end: int, + b_start: int, + b_end: int, + ) -> bool: + return a_start < b_end and b_start < a_end diff --git a/core_runtime/backtest/orchestrator/segmenter.py b/core_runtime/backtest/orchestrator/segmenter.py new file mode 100644 index 0000000..637b603 --- /dev/null +++ b/core_runtime/backtest/orchestrator/segmenter.py @@ -0,0 +1,44 @@ +""" +File segmentation logic. + +This module contains utilities for splitting data files into +byte-size-constrained segments. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from core_runtime.backtest.orchestrator.manifest import DataFileMeta + + +def segment_files( + files: list[DataFileMeta], + max_bytes: int, +) -> list[list[DataFileMeta]]: + """ + Split files into ordered segments such that each segment does not + exceed the given maximum size in bytes. + """ + + segments: list[list[DataFileMeta]] = [] + current_segment: list[DataFileMeta] = [] + current_bytes = 0 + + # Sort files by start timestamp to ensure deterministic segmentation + for file_meta in sorted(files, key=lambda item: item.start_ts_ns): + exceeds_limit = current_bytes + file_meta.size_bytes > max_bytes + + if current_segment and exceeds_limit: + segments.append(current_segment) + current_segment = [] + current_bytes = 0 + + current_segment.append(file_meta) + current_bytes += file_meta.size_bytes + + if current_segment: + segments.append(current_segment) + + return segments diff --git a/core_runtime/backtest/orchestrator/summary.py b/core_runtime/backtest/orchestrator/summary.py new file mode 100644 index 0000000..f325a27 --- /dev/null +++ b/core_runtime/backtest/orchestrator/summary.py @@ -0,0 +1,141 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING, List + +if TYPE_CHECKING: + from core_runtime.backtest.orchestrator.planner_models import ExperimentPlan + + +# --------------------------------------------------------------------------- +# Data models +# --------------------------------------------------------------------------- + +@dataclass(frozen=True, slots=True) +class SegmentSummary: + segment_id: str + start_ts_ns: int + end_ts_ns: int + estimated_bytes: int + file_count: int + sweep_count: int + scratch_utilization: float # 0.0 - 1.0 + + +@dataclass(frozen=True, slots=True) +class ExperimentSummary: + experiment_id: str + segment_count: int + sweeps_per_segment: int + total_backtests: int + max_segment_bytes: int + segments: List[SegmentSummary] + warnings: List[str] + + +# --------------------------------------------------------------------------- +# Summary builder +# --------------------------------------------------------------------------- + +def summarize_experiment( + *, + plan: ExperimentPlan, + max_segment_bytes: int, +) -> ExperimentSummary: + warnings: list[str] = [] + segments: list[SegmentSummary] = [] + + if not plan.segments: + warnings.append("Experiment contains no segments") + + sweeps_per_segment = ( + len(plan.segments[0].sweeps) if plan.segments else 0 + ) + + total_backtests = len(plan.segments) * sweeps_per_segment + + if sweeps_per_segment == 0: + warnings.append("No sweeps defined (0 backtests will run)") + + if total_backtests > 500: + warnings.append( + f"High number of backtests ({total_backtests}); runtime may be long" + ) + + if len(plan.segments) > 50: + warnings.append( + f"High number of segments ({len(plan.segments)})" + ) + + for segment in plan.segments: + utilization = segment.estimated_bytes / max_segment_bytes + + if utilization > 1.0: + warnings.append( + f"{segment.segment_id} exceeds scratch size " + f"({utilization:.0%})" + ) + elif utilization > 0.9: + warnings.append( + f"{segment.segment_id} uses {utilization:.0%} of scratch size" + ) + + if segment.estimated_bytes < max_segment_bytes * 0.1: + warnings.append( + f"{segment.segment_id} is very small " + f"({utilization:.0%} of scratch)" + ) + + segments.append( + SegmentSummary( + segment_id=segment.segment_id, + start_ts_ns=segment.start_ts_ns, + end_ts_ns=segment.end_ts_ns, + estimated_bytes=segment.estimated_bytes, + file_count=len(segment.files), + sweep_count=len(segment.sweeps), + scratch_utilization=utilization, + ) + ) + + return ExperimentSummary( + experiment_id=plan.experiment_id, + segment_count=len(plan.segments), + sweeps_per_segment=sweeps_per_segment, + total_backtests=total_backtests, + max_segment_bytes=max_segment_bytes, + segments=segments, + warnings=warnings, + ) + + +# --------------------------------------------------------------------------- +# Pretty printer +# --------------------------------------------------------------------------- + +def print_experiment_summary(summary: ExperimentSummary) -> None: + max_gb = summary.max_segment_bytes / 1024**3 + + print(f"Experiment: {summary.experiment_id}") + print(f"Segments: {summary.segment_count}") + print(f"Sweeps per segment: {summary.sweeps_per_segment}") + print(f"Total backtests: {summary.total_backtests}") + print(f"Max segment size: {max_gb:.2f} GB") + print() + + if summary.warnings: + print("Warnings:") + for w in summary.warnings: + print(f" - {w}") + print() + + print("Segments:") + for s in summary.segments: + used_gb = s.estimated_bytes / 1024**3 + print( + f" - {s.segment_id}: " + f"{s.file_count} files | " + f"{used_gb:.2f} / {max_gb:.2f} GB | " + f"{s.sweep_count} sweeps | " + f"{s.scratch_utilization:.0%} scratch" + ) diff --git a/core_runtime/backtest/orchestrator/sweeps.py b/core_runtime/backtest/orchestrator/sweeps.py new file mode 100644 index 0000000..7eb2960 --- /dev/null +++ b/core_runtime/backtest/orchestrator/sweeps.py @@ -0,0 +1,83 @@ +""" +Parameter sweep utilities. + +This module provides helpers to expand parameter specifications into +concrete sweep plans. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from itertools import product +from typing import Any, Iterable + + +@dataclass(frozen=True, slots=True) +class RangeSpec: + """ + Numeric range specification used for parameter sweeps. + """ + + start: float + stop: float + step: float + + +@dataclass(frozen=True, slots=True) +class SweepPlan: + """ + Concrete parameter sweep configuration. + """ + + sweep_id: str + parameters: dict[str, Any] + + +def expand_ranges(spec: dict[str, Any]) -> dict[str, list[Any]]: + """ + Expand range and iterable specifications into explicit value lists. + """ + + expanded: dict[str, list[Any]] = {} + + for key, value in spec.items(): + if isinstance(value, RangeSpec): + values: list[Any] = [] + current = value.start + + # Add small epsilon to avoid floating point termination issues + while current <= value.stop + 1e-12: + values.append(round(current, 10)) + current += value.step + + expanded[key] = values + continue + + if isinstance(value, Iterable) and not isinstance(value, (str, bytes)): + expanded[key] = list(value) + continue + + expanded[key] = [value] + + return expanded + + +def expand_parameter_grid(grid: dict[str, list[Any]]) -> list[SweepPlan]: + """ + Generate all parameter combinations from a parameter grid. + """ + + if not grid: + return [SweepPlan("sweep_0000", {})] + + keys = sorted(grid.keys()) + values = [grid[key] for key in keys] + + sweeps: list[SweepPlan] = [] + + for index, combination in enumerate(product(*values)): + parameters = dict(zip(keys, combination, strict=True)) + sweep_id = f"sweep_{index:04d}" + sweeps.append(SweepPlan(sweep_id, parameters)) + + return sweeps diff --git a/core_runtime/backtest/runtime/__init__.py b/core_runtime/backtest/runtime/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/core_runtime/backtest/runtime/context.py b/core_runtime/backtest/runtime/context.py new file mode 100644 index 0000000..e9301bf --- /dev/null +++ b/core_runtime/backtest/runtime/context.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +from dataclasses import dataclass +from datetime import datetime +from pathlib import Path +from typing import Mapping + + +@dataclass(frozen=True, slots=True) +class ExperimentContext: + experiment_id: str + + expected_segments: int + completed_segments: int + failed_segments: int + + experiment_started_at: datetime + + scratch_root: Path + + def __post_init__(self) -> None: + object.__setattr__(self, "scratch_root", Path(self.scratch_root)) + + @property + def scratch_experiment_dir(self) -> Path: + return self.scratch_root / self.experiment_id + + +@dataclass(frozen=True, slots=True) +class SegmentContext: + experiment_id: str + segment_id: str + + expected_sweeps: int + completed_sweeps: int + failed_sweeps: int + + segment_started_at: datetime + + scratch_root: Path + + def __post_init__(self) -> None: + object.__setattr__(self, "scratch_root", Path(self.scratch_root)) + + @property + def scratch_segment_dir(self) -> Path: + return ( + self.scratch_root + / self.experiment_id + / self.segment_id + ) + + +@dataclass(frozen=True, slots=True) +class SweepContext: + """ + Immutable runtime context for a single backtest sweep. + + One SweepContext == one Pod == one backtest execution. + """ + + # Identity + experiment_id: str + segment_id: str + sweep_id: str + + # Data + stage: str + venue: str + datatype: str + symbol: str + file_keys: tuple[str, ...] + + # Parameters + parameters: Mapping[str, object] + + # Runtime paths + scratch_root: Path + results_root: Path + + def __post_init__(self) -> None: + """ + Normalize runtime paths after JSON deserialization. + + JSON has no Path type, so scratch_root / results_root + may arrive as strings in worker pods. + """ + object.__setattr__(self, "scratch_root", Path(self.scratch_root)) + object.__setattr__(self, "results_root", Path(self.results_root)) + + @property + def scratch_segment_dir(self) -> Path: + return ( + self.scratch_root + / self.experiment_id + / self.segment_id + ) + + @property + def scratch_data_dir(self) -> Path: + return self.scratch_segment_dir / "data" + + @property + def scratch_results_dir(self) -> Path: + return self.scratch_segment_dir / "results" / self.sweep_id diff --git a/core_runtime/backtest/runtime/core_configuration_mapper.py b/core_runtime/backtest/runtime/core_configuration_mapper.py new file mode 100644 index 0000000..f45c646 --- /dev/null +++ b/core_runtime/backtest/runtime/core_configuration_mapper.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +import math +from collections.abc import Collection, Mapping + +from tradingchassis_core.core.domain.configuration import CoreConfiguration + +_REQUIRED_METADATA_FIELDS = ("tick_size", "lot_size", "contract_size") + + +def _require_mapping(value: object, *, field_path: str) -> Mapping[str, object]: + if not isinstance(value, Mapping): + raise TypeError(f"{field_path} must be a mapping") + + normalized: dict[str, object] = {} + for key, nested in value.items(): + if not isinstance(key, str): + raise TypeError(f"{field_path} keys must be strings") + normalized[key] = nested + return normalized + + +def _require_non_empty_string(value: object, *, field_path: str) -> str: + if value is None: + raise ValueError(f"Missing required field: {field_path}") + if not isinstance(value, str): + raise TypeError(f"{field_path} must be a string") + if not value: + raise ValueError(f"{field_path} must be non-empty") + return value + + +def _require_positive_number(value: object, *, field_path: str) -> float: + if value is None: + raise ValueError(f"Missing required field: {field_path}") + if isinstance(value, bool) or not isinstance(value, (int, float)): + raise TypeError(f"{field_path} must be numeric") + + numeric = float(value) + if not math.isfinite(numeric): + raise ValueError(f"{field_path} must be finite") + if numeric <= 0.0: + raise ValueError(f"{field_path} must be > 0") + return numeric + + +def _validate_instrument_metadata( + *, + instruments: Mapping[str, object], + instrument: str, +) -> dict[str, float]: + instrument_raw = instruments.get(instrument) + if instrument_raw is None: + raise ValueError( + "Missing required core instrument entry: " + f"core.market.instruments.{instrument}" + ) + + instrument_cfg = _require_mapping( + instrument_raw, + field_path=f"core.market.instruments.{instrument}", + ) + + validated: dict[str, float] = {} + for field in _REQUIRED_METADATA_FIELDS: + validated[field] = _require_positive_number( + instrument_cfg.get(field), + field_path=f"core.market.instruments.{instrument}.{field}", + ) + return validated + + +def build_core_configuration_from_sections( + *, + core_section: Mapping[str, object], + engine_section: Mapping[str, object] | None = None, + processed_instruments: Collection[str] | None = None, +) -> CoreConfiguration: + core = _require_mapping(core_section, field_path="core") + version = _require_non_empty_string(core.get("version"), field_path="core.version") + + market_raw = core.get("market") + if market_raw is None: + raise ValueError("Missing required field: core.market") + market = _require_mapping(market_raw, field_path="core.market") + + instruments_raw = market.get("instruments") + if instruments_raw is None: + raise ValueError("Missing required field: core.market.instruments") + instruments = _require_mapping( + instruments_raw, + field_path="core.market.instruments", + ) + + if not instruments: + raise ValueError("core.market.instruments must contain at least one instrument") + + to_validate = set(instruments.keys()) + if processed_instruments is not None: + to_validate.update(processed_instruments) + + validated_core_values: dict[str, dict[str, float]] = {} + for instrument in sorted(to_validate): + validated_core_values[instrument] = _validate_instrument_metadata( + instruments=instruments, + instrument=instrument, + ) + + if engine_section is not None: + engine = _require_mapping(engine_section, field_path="engine") + instrument_raw = engine.get("instrument") + if instrument_raw is not None: + instrument = _require_non_empty_string( + instrument_raw, + field_path="engine.instrument", + ) + if instrument not in instruments: + raise ValueError( + "engine.instrument must exist in core.market.instruments: " + f"{instrument}" + ) + + core_values = validated_core_values[instrument] + for field in _REQUIRED_METADATA_FIELDS: + if field not in engine: + continue + engine_value = _require_positive_number( + engine[field], + field_path=f"engine.{field}", + ) + if engine_value != core_values[field]: + raise ValueError( + f"Conflicting duplicate field values for {field}: " + f"engine.{field}={engine_value} != " + f"core.market.instruments.{instrument}.{field}={core_values[field]}" + ) + + # CoreConfiguration is constructed from the explicit core section only. + payload = {k: v for k, v in core.items() if k != "version"} + return CoreConfiguration(version=version, payload=payload) + + +def build_core_configuration_from_run_config( + run_config: Mapping[str, object], +) -> CoreConfiguration: + config = _require_mapping(run_config, field_path="run_config") + if "core" not in config: + raise ValueError("Missing required top-level section: core") + + core_section = _require_mapping(config["core"], field_path="core") + + engine_section: Mapping[str, object] | None = None + processed_instruments: list[str] = [] + if "engine" in config: + engine_section = _require_mapping(config["engine"], field_path="engine") + if "instrument" in engine_section: + instrument = _require_non_empty_string( + engine_section["instrument"], + field_path="engine.instrument", + ) + processed_instruments.append(instrument) + + return build_core_configuration_from_sections( + core_section=core_section, + engine_section=engine_section, + processed_instruments=processed_instruments or None, + ) diff --git a/core_runtime/backtest/runtime/entrypoint.py b/core_runtime/backtest/runtime/entrypoint.py new file mode 100644 index 0000000..2932156 --- /dev/null +++ b/core_runtime/backtest/runtime/entrypoint.py @@ -0,0 +1,273 @@ +from __future__ import annotations + +import argparse +import json +import sys +from dataclasses import asdict +from pathlib import Path +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from core_runtime.backtest.orchestrator.planner_models import ExperimentPlan + +from core_runtime.backtest.orchestrator.planner import plan_experiment +from core_runtime.backtest.orchestrator.s3_manifest import S3DatasetManifest +from core_runtime.backtest.orchestrator.summary import ( + print_experiment_summary, + summarize_experiment, +) +from core_runtime.backtest.orchestrator.sweeps import RangeSpec +from core_runtime.backtest.runtime.context import SweepContext +from core_runtime.backtest.runtime.core_configuration_mapper import ( + build_core_configuration_from_run_config, +) + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _load_json(path: Path) -> dict[str, Any]: + if not path.exists(): + raise FileNotFoundError(path) + return json.loads(path.read_text(encoding="utf-8")) + + +def _parse_sweep_spec(raw: dict[str, Any]) -> dict[str, Any]: + """ + Same semantics as your planner CLI: + dict -> RangeSpec or explicit lists + """ + parsed: dict[str, Any] = {} + for key, value in raw.items(): + if isinstance(value, dict): + parsed[key] = RangeSpec( + start=value["start"], + stop=value["stop"], + step=value["step"], + ) + else: + parsed[key] = value + return parsed + + +def _emit_sweep_context( + *, + plan: ExperimentPlan, + base_cfg: dict[str, Any], + scratch_root: Path, + results_root: Path, + out_dir: Path, +) -> None: + """ + Emit one SweepContext JSON per sweep. + These JSON files are what Argo consumes. + """ + out_dir.mkdir(parents=True, exist_ok=True) + + experiment: dict = base_cfg["experiment"] + stage: str = experiment.get("stage", "derived") + venue: str = experiment["venue"] + datatype: str = experiment["datatype"] + symbol: str = experiment["symbol"] + + for segment in plan.segments: + for sweep in segment.sweeps: + ctx = SweepContext( + experiment_id=plan.experiment_id, + segment_id=segment.segment_id, + sweep_id=sweep.sweep_id, + stage=stage, + venue=venue, + datatype=datatype, + symbol=symbol, + file_keys=tuple(segment.files), + parameters={ + # pass through full engine/strategy/risk/core blocks + "engine": base_cfg["engine"], + "strategy": base_cfg["strategy"], + "risk": base_cfg["risk"], + "core": base_cfg["core"], + # plus sweep-specific parameters + "sweep": sweep.parameters, + }, + scratch_root=scratch_root, + results_root=results_root, + ) + + out_path = out_dir / f"{segment.segment_id}__{sweep.sweep_id}.json" + out_path.write_text( + json.dumps(asdict(ctx), indent=2, default=str), + encoding="utf-8", + ) + + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- + +def main() -> None: + parser = argparse.ArgumentParser( + description="Backtest entrypoint (plan or run via sweep fan-out)" + ) + + parser.add_argument( + "--config", + type=Path, + required=True, + help="Path to experiment JSON config (inside image or mounted).", + ) + + parser.add_argument( + "--plan", + action="store_true", + help="Plan experiment and print summary (no execution).", + ) + + parser.add_argument( + "--run", + action="store_true", + help="Plan experiment and emit sweep contexts for execution.", + ) + + parser.add_argument( + "--scratch-root", + type=Path, + default=Path("/mnt/scratch"), + help="Root directory for scratch volume.", + ) + + parser.add_argument( + "--results-root", + type=Path, + default=Path("/results"), + help="Logical results root (used for context only).", + ) + + parser.add_argument( + "--emit-dir", + type=Path, + default=Path("/mnt/scratch/sweeps"), + help="Directory where SweepContext JSONs are emitted.", + ) + + args = parser.parse_args() + + if not args.plan and not args.run: + print("Error: one of --plan or --run must be specified.", file=sys.stderr) + sys.exit(2) + + # ------------------------------------------------------------------ + # Load config + # ------------------------------------------------------------------ + + cfg = _load_json(args.config) + _ = build_core_configuration_from_run_config(cfg) + + experiment_id: str = cfg["id"] + experiment_cfg = cfg["experiment"] + + start_ts_ns: int = experiment_cfg["start_ts_ns"] + end_ts_ns: int = experiment_cfg["end_ts_ns"] + symbol: str = experiment_cfg["symbol"] + venue: str = experiment_cfg["venue"] + datatype: str = experiment_cfg["datatype"] + + segmentation: dict = experiment_cfg.get("segmentation", {}) + max_segment_gb: float = segmentation.get("max_segment_gb", 100) + max_segment_bytes = max_segment_gb * 1024**3 + + sweep_spec = _parse_sweep_spec(experiment_cfg.get("sweeps", {})) + + manifest = S3DatasetManifest( + bucket="data", + stage=experiment_cfg.get("stage", "derived"), + ) + + # ------------------------------------------------------------------ + # Planning + # ------------------------------------------------------------------ + + plan = plan_experiment( + experiment_id=experiment_id, + start_ts_ns=start_ts_ns, + end_ts_ns=end_ts_ns, + symbol=symbol, + venue=venue, + datatype=datatype, + sweep_spec=sweep_spec, + manifest=manifest, + max_segment_bytes=max_segment_bytes, + ) + + summary = summarize_experiment( + plan=plan, + max_segment_bytes=max_segment_bytes, + ) + + # Always show the plan (this is what you want in Argo logs) + print_experiment_summary(summary) + + if args.plan and not args.run: + # Plan-only mode: exit after printing + return + + # ------------------------------------------------------------------ + # Run preparation (emit sweep contexts) + # ------------------------------------------------------------------ + + index: list[str] = [] + segments_index: list[dict[str, object]] = [] + + out_dir: Path = args.emit_dir + out_dir.mkdir(parents=True, exist_ok=True) + + (out_dir / "experiment_id.txt").write_text( + plan.experiment_id, + encoding="utf-8", + ) + + expected_segments = len(plan.segments) + + (out_dir / "expected_segments.txt").write_text( + str(expected_segments), + encoding="utf-8", + ) + + _emit_sweep_context( + plan=plan, + base_cfg=cfg, + scratch_root=args.scratch_root, + results_root=args.results_root, + out_dir=out_dir, + ) + + for segment in plan.segments: + segments_index.append( + { + "segment_id": segment.segment_id, + "expected_sweeps": len(segment.sweeps), + } + ) + + (out_dir / "segments.json").write_text( + json.dumps(segments_index, indent=2), + encoding="utf-8", + ) + + for segment in plan.segments: + for sweep in segment.sweeps: + out_path = out_dir / f"{segment.segment_id}__{sweep.sweep_id}.json" + index.append(str(out_path)) + + (out_dir / "index.json").write_text( + json.dumps(index, indent=2), + encoding="utf-8", + ) + + print() + print(f"Emitted sweep contexts to: {args.emit_dir}") + print("Each JSON represents exactly one sweep (one Pod).") + + +if __name__ == "__main__": + main() diff --git a/core_runtime/backtest/runtime/experiment_finalize_entrypoint.py b/core_runtime/backtest/runtime/experiment_finalize_entrypoint.py new file mode 100644 index 0000000..b16640a --- /dev/null +++ b/core_runtime/backtest/runtime/experiment_finalize_entrypoint.py @@ -0,0 +1,197 @@ +from __future__ import annotations + +import argparse +import json +import logging +import os +import shutil +from datetime import datetime, timezone +from pathlib import Path + +from core_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim +from core_runtime.backtest.runtime.context import ExperimentContext +from core_runtime.backtest.runtime.prometheus_metrics import PrometheusMetricsClient + +LOGGER = logging.getLogger(__name__) + + +class ExperimentFinalizer: + """ + Finalizes an experiment after all segments have completed. + + Responsibilities: + - write experiment_metadata.json + - write _DONE marker + """ + + def finalize(self, *, ctx: ExperimentContext) -> None: + finished_at = datetime.now(timezone.utc) + + status = "success" + if ctx.failed_segments > 0: + status = "failed" + + metadata = { + "schema_version": "1.0", + "identity": { + "experiment_id": ctx.experiment_id, + }, + "lifecycle": { + "status": status, + "started_at": ctx.experiment_started_at.isoformat(), + "finished_at": finished_at.isoformat(), + "duration_seconds": ( + finished_at - ctx.experiment_started_at + ).total_seconds(), + }, + "segments": { + "expected": ctx.expected_segments, + "completed": ctx.completed_segments, + "failed": ctx.failed_segments, + }, + } + + experiment_dir = ctx.scratch_experiment_dir + experiment_dir.mkdir(parents=True, exist_ok=True) + + (experiment_dir / "experiment_metadata.json").write_text( + json.dumps(metadata, indent=2), + encoding="utf-8", + ) + + (experiment_dir / "_DONE").write_text( + finished_at.isoformat(), + encoding="utf-8", + ) + + # --- Prometheus metrics (side-effect only) --- + metrics = PrometheusMetricsClient() + + if metrics.is_enabled(): + try: + labels = { + "experiment_id": ctx.experiment_id, + "status": status, + } + + metrics.push_gauge( + name="backtest_experiment_duration_seconds", + value=metadata["lifecycle"]["duration_seconds"], + labels=labels, + ) + + metrics.push_gauge( + name="backtest_experiment_completed_segments", + value=float(ctx.completed_segments), + labels=labels, + ) + + metrics.push_gauge( + name="backtest_experiment_failed_segments", + value=float(ctx.failed_segments), + labels=labels, + ) + + metrics.push_all(job="backtest_experiment") + + except Exception: + LOGGER.exception("Prometheus push failed") + + +class ExperimentMetadataPersister: + def __init__( + self, + *, + bucket: str, + prefix: str = "backtests", + ) -> None: + self._s3 = OCIObjectStorageS3Shim(region="eu-frankfurt-1") + self._bucket = bucket + self._prefix = prefix + + def persist( + self, + *, + experiment_id: str, + experiment_dir: Path, + ) -> None: + prefix = f"{self._prefix}/{experiment_id}" + + for name in ("experiment_metadata.json", "_DONE"): + path = experiment_dir / name + if not path.exists(): + continue + + with path.open("rb") as fh: + self._s3.put_object( + bucket=self._bucket, + key=f"{prefix}/{name}", + body=fh, + ) + + +def _cleanup_scratch(*, experiment_id: str, scratch_root: Path) -> None: + """ + Remove all scratch data for this workflow + experiment. + This is safe to call ONLY after successful finalization. + """ + + workflow_uid = os.environ.get("ARGO_WORKFLOW_UID") + if not workflow_uid: + raise RuntimeError("ARGO_WORKFLOW_UID is not set") + + sweeps_dir = scratch_root / "sweeps" / workflow_uid + experiment_dir = scratch_root / experiment_id + + if sweeps_dir.exists(): + shutil.rmtree(sweeps_dir) + + if experiment_dir.exists(): + shutil.rmtree(experiment_dir) + + +def main() -> None: + parser = argparse.ArgumentParser("finalize experiment") + + parser.add_argument("--experiment-id", type=str, required=True) + + parser.add_argument("--expected-segments", type=int, required=True) + parser.add_argument("--completed-segments", type=int, required=True) + parser.add_argument("--failed-segments", type=int, required=True) + + parser.add_argument( + "--experiment-started-at", + type=str, + required=True, + help="ISO-8601 timestamp (UTC)", + ) + + parser.add_argument("--scratch-root", type=Path, required=True) + + args = parser.parse_args() + + ctx = ExperimentContext( + experiment_id=args.experiment_id, + expected_segments=args.expected_segments, + completed_segments=args.completed_segments, + failed_segments=args.failed_segments, + experiment_started_at=datetime.fromisoformat(args.experiment_started_at), + scratch_root=args.scratch_root, + ) + + ExperimentFinalizer().finalize(ctx=ctx) + + persister = ExperimentMetadataPersister(bucket="data") + persister.persist( + experiment_id=ctx.experiment_id, + experiment_dir=ctx.scratch_experiment_dir, + ) + + _cleanup_scratch( + experiment_id=ctx.experiment_id, + scratch_root=ctx.scratch_root, + ) + + +if __name__ == "__main__": + main() diff --git a/core_runtime/backtest/runtime/mlflow_segment_logger.py b/core_runtime/backtest/runtime/mlflow_segment_logger.py new file mode 100644 index 0000000..78672fc --- /dev/null +++ b/core_runtime/backtest/runtime/mlflow_segment_logger.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import logging +import os +from typing import TYPE_CHECKING + +import mlflow + +if TYPE_CHECKING: + from core_runtime.backtest.runtime.context import SegmentContext + +LOGGER = logging.getLogger(__name__) + + +class MlflowSegmentLogger: + """Logs segment-level health & progress information to MLflow. + + Tracking is configured via environment variables (recommended for Kubernetes): + - MLFLOW_TRACKING_URI: HTTP(S) address of the MLflow tracking server. + Example: http://mlflow.ml.svc.cluster.local:5000 + + This logger is best-effort. Callers should catch exceptions and continue. + """ + + def __init__(self) -> None: + tracking_uri = os.environ.get("MLFLOW_TRACKING_URI") + if tracking_uri: + mlflow.set_tracking_uri(tracking_uri) + + def log( + self, + *, + ctx: SegmentContext, + duration_seconds: float, + status: str, + ) -> None: + """Log segment metadata as MLflow parameters/metrics/tags.""" + + # mlflow.set_experiment creates the experiment if it does not exist and + # avoids an explicit get/create race. + mlflow.set_experiment(ctx.experiment_id) + + with mlflow.start_run(run_name=ctx.segment_id): + # Parameters (stable, comparable) + mlflow.log_param("expected_sweeps", ctx.expected_sweeps) + mlflow.log_param("completed_sweeps", ctx.completed_sweeps) + mlflow.log_param("failed_sweeps", ctx.failed_sweeps) + + # Metrics + mlflow.log_metric("duration_seconds", duration_seconds) + + # Tags (UI / filtering) + mlflow.set_tag("status", status) + mlflow.set_tag("experiment_id", ctx.experiment_id) + mlflow.set_tag("segment_id", ctx.segment_id) + + LOGGER.info( + "MLflow segment log submitted", + extra={ + "experiment_id": ctx.experiment_id, + "segment_id": ctx.segment_id, + "status": status, + }, + ) \ No newline at end of file diff --git a/core_runtime/backtest/runtime/prometheus_metrics.py b/core_runtime/backtest/runtime/prometheus_metrics.py new file mode 100644 index 0000000..39ccda9 --- /dev/null +++ b/core_runtime/backtest/runtime/prometheus_metrics.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +import json +import logging +import os + +from prometheus_client import CollectorRegistry, Gauge, push_to_gateway + +LOGGER = logging.getLogger(__name__) + + +class PrometheusMetricsClient: + """Minimal Prometheus Pushgateway client for batch-style jobs. + + Expected environment: + - PROMETHEUS_PUSHGATEWAY_URL: URL to the Pushgateway. + Example: http://pushgateway.monitoring.svc.cluster.local:9091 + + Optional: + - PROMETHEUS_PUSHGATEWAY_GROUPING_KEY_JSON: JSON object used as grouping key. + If not set, metrics are grouped only by the 'job' argument, which often + causes pushes from different pods to overwrite each other. + + Example: + {"workflow_uid": "ARGO_WORKFLOW_UID"} + + This client is intentionally best-effort: callers should treat it as a + side-effect and never fail the workflow because of metrics delivery. + """ + + def __init__(self) -> None: + self._pushgateway_url = os.environ.get("PROMETHEUS_PUSHGATEWAY_URL") + self._grouping_key = self._load_grouping_key() + self._registry = CollectorRegistry() + + def is_enabled(self) -> bool: + return self._pushgateway_url is not None + + @staticmethod + def _load_grouping_key() -> dict[str, str]: + raw = os.environ.get("PROMETHEUS_PUSHGATEWAY_GROUPING_KEY_JSON") + if not raw: + return {} + + try: + data = json.loads(raw) + except json.JSONDecodeError: + LOGGER.warning( + "Invalid PROMETHEUS_PUSHGATEWAY_GROUPING_KEY_JSON; ignoring" + ) + return {} + + if not isinstance(data, dict): + return {} + + grouping: dict[str, str] = {} + for key, value in data.items(): + if isinstance(key, str) and isinstance(value, str): + grouping[key] = value + return grouping + + def push_gauge( + self, + *, + name: str, + value: float, + labels: dict[str, str], + ) -> None: + if not self._pushgateway_url: + return + + gauge = Gauge( + name, + documentation=name, + labelnames=list(labels.keys()), + registry=self._registry, + ) + + gauge.labels(**labels).set(value) + + def push_all(self, *, job: str) -> None: + if not self._pushgateway_url: + return + + push_to_gateway( + gateway=self._pushgateway_url, + job=job, + registry=self._registry, + grouping_key=self._grouping_key, + ) + + LOGGER.info( + "Prometheus metrics pushed", + extra={"job": job, "grouping_key": self._grouping_key}, + ) diff --git a/core_runtime/backtest/runtime/run_sweep.py b/core_runtime/backtest/runtime/run_sweep.py new file mode 100644 index 0000000..30865d0 --- /dev/null +++ b/core_runtime/backtest/runtime/run_sweep.py @@ -0,0 +1,511 @@ +from __future__ import annotations + +import argparse +import importlib.metadata +import json +import os +import platform +import shutil +import sys +import tomllib +from dataclasses import replace +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + +from tradingchassis_core.core.risk.risk_config import RiskConfig +from tradingchassis_core.strategies.strategy_config import StrategyConfig + +from core_runtime.backtest.engine.hft_engine import ( + HftBacktestConfig, + HftBacktestEngine, + HftEngineConfig, +) +from core_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim +from core_runtime.backtest.runtime.context import SweepContext +from core_runtime.backtest.runtime.core_configuration_mapper import ( + build_core_configuration_from_run_config, +) + + +class SweepMaterializer: + """ + Materializes sweep input data from S3 into a local scratch directory. + """ + + def __init__( + self, + *, + bucket: str, + ) -> None: + self._s3 = OCIObjectStorageS3Shim(region="eu-frankfurt-1") + self._bucket = bucket + + def materialize(self, ctx: SweepContext) -> None: + """ + Ensure all input files for the sweep are present locally. + + This operation is idempotent. + """ + data_dir = ctx.scratch_data_dir + ready_marker = data_dir / "_READY" + + if ready_marker.exists(): + return + + data_dir.mkdir(parents=True, exist_ok=True) + + for key in ctx.file_keys: + filename = Path(key).name + target_path = data_dir / filename + + if target_path.exists(): + continue + + self._s3.download_to_file( + bucket=self._bucket, + key=key, + destination=target_path, + ) + + ready_marker.touch() + + +class SweepEngineRunner: + """ + Runs exactly one HFT backtest sweep. + + One runner instance == one sweep == one engine.run(). + """ + + def __init__( + self, + *, + engine_cfg: HftEngineConfig, + strategy_cfg: StrategyConfig, + risk_cfg: RiskConfig, + core_cfg: object, + ) -> None: + self._engine_cfg = engine_cfg + self._strategy_cfg = strategy_cfg + self._risk_cfg = risk_cfg + self._core_cfg = core_cfg + + def run(self, ctx: SweepContext) -> dict[str, Any]: + """ + Execute the backtest for this sweep. + + Returns lightweight metadata about the run. + """ + results_dir = ctx.scratch_results_dir + results_dir.mkdir(parents=True, exist_ok=True) + + # IMPORTANT: + # Engine expects a FIXED list of local file paths. + data_files = [ + str(ctx.scratch_data_dir / Path(key).name) + for key in ctx.file_keys + ] + + engine_cfg = self._build_engine_cfg(data_files, results_dir) + + # Defensive: numpy will not create parent directories for output files. + Path(engine_cfg.stats_npz_path).parent.mkdir(parents=True, exist_ok=True) + + backtest_cfg = HftBacktestConfig( + # Keep IDs filesystem-safe. Some engines/libraries may use the ID + # as part of output paths. + id=f"{ctx.experiment_id}__{ctx.segment_id}__{ctx.sweep_id}", + description="sweep execution", + engine_cfg=engine_cfg, + strategy_cfg=self._strategy_cfg, + risk_cfg=self._risk_cfg, + core_cfg=self._core_cfg, + ) + + engine = HftBacktestEngine(backtest_cfg) + + # Ensure any relative writes performed by the engine end up inside the + # scratch subtree of this sweep. + previous_cwd = Path.cwd() + try: + os.chdir(ctx.scratch_segment_dir) + result = engine.run() + finally: + os.chdir(previous_cwd) + + done_marker = ctx.scratch_results_dir / "_DONE" + done_marker.touch() + + return { + "experiment_id": ctx.experiment_id, + "segment_id": ctx.segment_id, + "sweep_id": ctx.sweep_id, + "stats_file": result.stats_file, + "extra_metadata": result.extra_metadata, + } + + def _build_engine_cfg( + self, + data_files: list[str], + results_dir: Path, + ) -> HftEngineConfig: + """ + Clone the base engine config and inject sweep-specific paths. + """ + cfg = replace(self._engine_cfg) + + # THIS is the critical binding to the engine semantics + cfg.data_files = data_files + cfg.stats_npz_path = str(results_dir / "stats.npz") + cfg.event_bus_path = str(results_dir / "events.jsonl") + + return cfg + + +class SweepMetadataWriter: + """Writes immutable metadata.json for a completed sweep.""" + + def __init__(self, *, runner: str) -> None: + self._runner = runner + + @staticmethod + def _read_pyproject_project_info(pyproject_path: Path) -> tuple[str | None, str | None]: + """Read [project] name/version from pyproject.toml. + + This is used as a fallback when the project is executed from source without + being installed as a distribution (importlib.metadata won't find it). + """ + + try: + raw = pyproject_path.read_bytes() + except OSError: + return (None, None) + + try: + data = tomllib.loads(raw.decode("utf-8")) + except (UnicodeDecodeError, tomllib.TOMLDecodeError): + return (None, None) + + if "project" not in data: + return (None, None) + + project = data["project"] + name = project["name"] if isinstance(project, dict) and "name" in project else None + version = project["version"] if isinstance(project, dict) and "version" in project else None + + if not isinstance(name, str): + name = None + if not isinstance(version, str): + version = None + + return (name, version) + + @staticmethod + def _guess_repo_root(start: Path) -> Path | None: + """Walk upwards until pyproject.toml is found.""" + + current = start + for _ in range(20): + candidate = current / "pyproject.toml" + if candidate.exists(): + return current + if current.parent == current: + return None + current = current.parent + return None + + @classmethod + def _resolve_project_metadata(cls) -> dict[str, str | None]: + """Resolve project name/version without failing the sweep.""" + + repo_root = cls._guess_repo_root(Path(__file__).resolve()) + pyproject_path = (repo_root / "pyproject.toml") if repo_root is not None else None + + name_from_pyproject: str | None = None + version_from_pyproject: str | None = None + + if pyproject_path is not None: + name_from_pyproject, version_from_pyproject = cls._read_pyproject_project_info( + pyproject_path + ) + + distribution_name = name_from_pyproject or "tradingchassis-core" + + version: str | None + source: str + try: + version = importlib.metadata.version(distribution_name) + source = "importlib.metadata" + except importlib.metadata.PackageNotFoundError: + version = version_from_pyproject + source = "pyproject.toml" if version is not None else "unknown" + + return { + "name": distribution_name, + "version": version, + "source": source, + } + + def write( + self, + *, + ctx: SweepContext, + status: str, + started_at: datetime, + finished_at: datetime, + ) -> None: + duration_seconds = (finished_at - started_at).total_seconds() + + project_meta = self._resolve_project_metadata() + + metadata = { + "schema_version": "1.0", + "identity": { + "experiment_id": ctx.experiment_id, + "segment_id": ctx.segment_id, + "sweep_id": ctx.sweep_id, + }, + "lifecycle": { + "status": status, + "started_at": started_at.isoformat(), + "finished_at": finished_at.isoformat(), + "duration_seconds": duration_seconds, + "runner": self._runner, + }, + "parameters": ctx.parameters, + "code": { + "git": { + "commit": os.environ.get("GIT_COMMIT"), + "dirty": os.environ.get("GIT_DIRTY") == "1", + "branch": os.environ.get("GIT_BRANCH"), + }, + "project": { + "name": project_meta["name"], + "version": project_meta["version"], + "version_source": project_meta["source"], + }, + }, + "environment": { + "python": sys.version.split()[0], + "framework": platform.platform(), + "container_image": os.environ.get("IMAGE_TAG"), + }, + "artifacts": { + "stats": "stats.npz", + "events": "events.jsonl", + }, + "links": {}, + } + + target = ctx.scratch_results_dir / "sweep_metadata.json" + target.write_text(json.dumps(metadata, indent=2), encoding="utf-8") + + +class SweepResultPersister: + """ + Persists sweep results from scratch to S3. + + Upload is atomic at sweep level via a _DONE marker. + """ + + def __init__( + self, + *, + bucket: str, + prefix: str = "backtests", + ) -> None: + self._s3 = OCIObjectStorageS3Shim(region="eu-frankfurt-1") + self._bucket = bucket + self._prefix = prefix.rstrip("/") + + def persist(self, ctx: SweepContext) -> None: + results_dir = ctx.scratch_results_dir + done_marker = results_dir / "_DONE" + + if not results_dir.exists(): + raise RuntimeError(f"Results directory does not exist: {results_dir}") + + if not done_marker.exists(): + raise RuntimeError( + f"Sweep results not finalized (_DONE missing): {results_dir}" + ) + + s3_base = self._s3_base_scratch_prefix(ctx) + + for path in results_dir.iterdir(): + if path.is_dir(): + continue + + key = f"{s3_base}/{path.name}" + self._upload_file(path, key) + + def _upload_file(self, path: Path, key: str) -> None: + with path.open("rb") as fh: + self._s3.put_object( + bucket=self._bucket, + key=key, + body=fh, + ) + + def _s3_base_scratch_prefix(self, ctx: SweepContext) -> str: + return ( + f"{self._prefix}/" + f"{ctx.experiment_id}/" + f"{ctx.segment_id}/" + f"{ctx.sweep_id}" + ) + + +class SweepCleaner: + """ + Handles safe cleanup of sweep scratch directories. + + Invariant: + - Only sweep-private state may be removed during parallel execution. + - Segment-level directories are shared across sweeps and must not be + deleted by a single sweep. + + Cleanup is allowed ONLY after successful persistence. + """ + + def __init__(self, *, keep_scratch: bool) -> None: + self._keep_scratch = keep_scratch + + def cleanup(self, ctx: SweepContext) -> None: + """ + Remove the sweep's private scratch subtree. + + This deletes only: + + ///results// + + It intentionally does NOT delete the segment directory itself, since that + directory is shared by all sweeps in the segment (parallel execution). + """ + if self._keep_scratch: + return + + sweep_results_dir = ctx.scratch_results_dir + if not sweep_results_dir.exists(): + return + + self._validate_target(ctx, sweep_results_dir) + shutil.rmtree(sweep_results_dir) + + @staticmethod + def _validate_target(ctx: SweepContext, target_dir: Path) -> None: + """ + Guard rails against accidental deletion of shared directories. + + This method raises if the computed target does not match the expected + sweep results layout. + """ + if target_dir.name != ctx.sweep_id: + raise RuntimeError( + "Refusing to delete: target_dir does not match sweep_id " + f"({target_dir} vs {ctx.sweep_id})" + ) + + if target_dir.parent.name != "results": + raise RuntimeError( + "Refusing to delete: target_dir is not under a 'results' folder " + f"({target_dir})" + ) + + segment_dir = ctx.scratch_segment_dir + try: + resolved_target = target_dir.resolve() + resolved_segment = segment_dir.resolve() + except FileNotFoundError: + # If a parent directory was removed concurrently, treat as no-op. + return + + if not resolved_target.is_relative_to(resolved_segment): + raise RuntimeError( + "Refusing to delete: target_dir is outside scratch_segment_dir " + f"({resolved_target} not under {resolved_segment})" + ) + + +def main() -> None: + parser = argparse.ArgumentParser("run single backtest sweep") + parser.add_argument("--context", type=Path, required=True) + parser.add_argument("--scratch-root", type=Path, required=True) + args = parser.parse_args() + + # ------------------------------------------------------------------ + # Load sweep context + # ------------------------------------------------------------------ + + if not args.context.exists(): + raise FileNotFoundError( + f"SweepContext file does not exist: {args.context}. " + "Ensure it is mounted as an Argo artifact." + ) + + ctx = SweepContext(**json.loads(args.context.read_text(encoding="utf-8"))) + ctx = replace(ctx, scratch_root=args.scratch_root) + + run_config_for_core: dict[str, object] = {} + if "engine" in ctx.parameters: + run_config_for_core["engine"] = ctx.parameters["engine"] + if "core" in ctx.parameters: + run_config_for_core["core"] = ctx.parameters["core"] + core_cfg = build_core_configuration_from_run_config(run_config_for_core) + + # ------------------------------------------------------------------ + # Setup + # ------------------------------------------------------------------ + + materializer = SweepMaterializer(bucket="data") + materializer.materialize(ctx) + + engine_cfg = HftEngineConfig(**ctx.parameters["engine"]) + strategy_cfg = StrategyConfig(**ctx.parameters["strategy"]) + risk_cfg = RiskConfig(**ctx.parameters["risk"]) + + runner = SweepEngineRunner( + engine_cfg=engine_cfg, + strategy_cfg=strategy_cfg, + risk_cfg=risk_cfg, + core_cfg=core_cfg, + ) + + persister = SweepResultPersister(bucket="data") + + metadata_writer = SweepMetadataWriter(runner="argo") + cleaner = SweepCleaner(keep_scratch=False) + + # ------------------------------------------------------------------ + # Execute sweep + # ------------------------------------------------------------------ + + started_at = datetime.now(timezone.utc) + status = "success" + + try: + print(runner.run(ctx)) + except Exception: + status = "failed" + raise + else: + finished_at = datetime.now(timezone.utc) + + # Metadata is ALWAYS written + metadata_writer.write( + ctx=ctx, + status=status, + started_at=started_at, + finished_at=finished_at, + ) + + # Persist results ONLY on success + persister.persist(ctx) + finally: + # Sweep-level cleanup is ALWAYS allowed + cleaner.cleanup(ctx) + + +if __name__ == "__main__": + main() diff --git a/core_runtime/backtest/runtime/segment_finalize_entrypoint.py b/core_runtime/backtest/runtime/segment_finalize_entrypoint.py new file mode 100644 index 0000000..a126df5 --- /dev/null +++ b/core_runtime/backtest/runtime/segment_finalize_entrypoint.py @@ -0,0 +1,194 @@ +from __future__ import annotations + +import argparse +import json +import logging +from datetime import datetime, timezone +from pathlib import Path + +from core_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim +from core_runtime.backtest.runtime.context import SegmentContext +from core_runtime.backtest.runtime.mlflow_segment_logger import MlflowSegmentLogger +from core_runtime.backtest.runtime.prometheus_metrics import PrometheusMetricsClient + +LOGGER = logging.getLogger(__name__) + + +class SegmentFinalizer: + """ + Finalizes a segment after all sweeps have completed. + + Responsibilities: + - write segment_metadata.json + - write _DONE marker + """ + + def finalize( + self, + *, + ctx: SegmentContext, + ) -> None: + finished_at = datetime.now(timezone.utc) + + status = "success" + if ctx.failed_sweeps > 0: + status = "failed" + + metadata = { + "schema_version": "1.0", + "identity": { + "experiment_id": ctx.experiment_id, + "segment_id": ctx.segment_id, + }, + "lifecycle": { + "status": status, + "started_at": ctx.segment_started_at.isoformat(), + "finished_at": finished_at.isoformat(), + "duration_seconds": ( + finished_at - ctx.segment_started_at + ).total_seconds(), + }, + "sweeps": { + "expected": ctx.expected_sweeps, + "completed": ctx.completed_sweeps, + "failed": ctx.failed_sweeps, + }, + } + + segment_dir = ctx.scratch_segment_dir + segment_dir.mkdir(parents=True, exist_ok=True) + + (segment_dir / "segment_metadata.json").write_text( + json.dumps(metadata, indent=2), + encoding="utf-8", + ) + + (segment_dir / "_DONE").write_text( + finished_at.isoformat(), + encoding="utf-8", + ) + + # --- MLflow logging (side-effect only) --- + try: + MlflowSegmentLogger().log( + ctx=ctx, + duration_seconds=metadata["lifecycle"]["duration_seconds"], + status=status, + ) + except Exception: + LOGGER.exception("MLflow logging failed") + + # --- Prometheus metrics (side-effect only) --- + metrics = PrometheusMetricsClient() + + if metrics.is_enabled(): + try: + labels = { + "experiment_id": ctx.experiment_id, + "segment_id": ctx.segment_id, + "status": status, + } + + metrics.push_gauge( + name="backtest_segment_duration_seconds", + value=metadata["lifecycle"]["duration_seconds"], + labels=labels, + ) + + metrics.push_gauge( + name="backtest_segment_completed_sweeps", + value=float(ctx.completed_sweeps), + labels=labels, + ) + + metrics.push_gauge( + name="backtest_segment_failed_sweeps", + value=float(ctx.failed_sweeps), + labels=labels, + ) + + metrics.push_all(job="backtest_segment") + + except Exception: + LOGGER.exception("Prometheus push failed") + + +class SegmentMetadataPersister: + def __init__( + self, + *, + bucket: str, + prefix: str = "backtests", + ) -> None: + self._s3 = OCIObjectStorageS3Shim(region="eu-frankfurt-1") + self._bucket = bucket + self._prefix = prefix + + def persist( + self, + *, + experiment_id: str, + segment_id: str, + segment_dir: Path, + ) -> None: + prefix = f"{self._prefix}/{experiment_id}/{segment_id}" + + for name in ("segment_metadata.json", "_DONE"): + path = segment_dir / name + if not path.exists(): + continue + + with path.open("rb") as fh: + self._s3.put_object( + bucket=self._bucket, + key=f"{prefix}/{name}", + body=fh, + ) + + +def main() -> None: + parser = argparse.ArgumentParser("finalize segment") + + parser.add_argument("--experiment-id", type=str, required=True) + parser.add_argument("--segment-id", type=str, required=True) + + parser.add_argument("--expected-sweeps", type=int, required=True) + parser.add_argument("--completed-sweeps", type=int, required=True) + parser.add_argument("--failed-sweeps", type=int, required=True) + + parser.add_argument( + "--segment-started-at", + type=str, + required=True, + help="ISO-8601 timestamp (UTC)", + ) + + parser.add_argument("--scratch-root", type=Path, required=True) + + args = parser.parse_args() + + ctx = SegmentContext( + experiment_id=args.experiment_id, + segment_id=args.segment_id, + expected_sweeps=args.expected_sweeps, + completed_sweeps=args.completed_sweeps, + failed_sweeps=args.failed_sweeps, + segment_started_at=datetime.fromisoformat(args.segment_started_at), + scratch_root=args.scratch_root, + ) + + finalizer = SegmentFinalizer() + finalizer.finalize( + ctx=ctx, + ) + + persister = SegmentMetadataPersister(bucket="data") + persister.persist( + experiment_id=ctx.experiment_id, + segment_id=ctx.segment_id, + segment_dir=ctx.scratch_segment_dir, + ) + + +if __name__ == "__main__": + main() diff --git a/core_runtime/core/__init__.py b/core_runtime/core/__init__.py new file mode 100644 index 0000000..27d3575 --- /dev/null +++ b/core_runtime/core/__init__.py @@ -0,0 +1,2 @@ +"""Runtime-owned modules that are not part of the semantic core.""" + diff --git a/core_runtime/core/events/__init__.py b/core_runtime/core/events/__init__.py new file mode 100644 index 0000000..f8d3dcb --- /dev/null +++ b/core_runtime/core/events/__init__.py @@ -0,0 +1,2 @@ +"""Runtime event plumbing (sinks, emitters, wiring).""" + diff --git a/core_runtime/core/events/sinks/__init__.py b/core_runtime/core/events/sinks/__init__.py new file mode 100644 index 0000000..8749baf --- /dev/null +++ b/core_runtime/core/events/sinks/__init__.py @@ -0,0 +1,2 @@ +"""Concrete runtime event sinks (I/O).""" + diff --git a/core_runtime/core/events/sinks/file_recorder.py b/core_runtime/core/events/sinks/file_recorder.py new file mode 100644 index 0000000..bfe46bf --- /dev/null +++ b/core_runtime/core/events/sinks/file_recorder.py @@ -0,0 +1,30 @@ +""" +Append-only file recorder sink. +""" +from __future__ import annotations + +import json +from pathlib import Path +from typing import Any + + +class FileRecorderSink: + """Writes each event as a JSON line to a file.""" + + def __init__(self, path: str | Path) -> None: + self._path = Path(path) + self._path.parent.mkdir(parents=True, exist_ok=True) + self._fh = self._path.open("a", encoding="utf-8") + self._closed = False + + def on_event(self, event: Any) -> None: + record = event.__dict__ if hasattr(event, "__dict__") else {"event": str(event)} + self._fh.write(json.dumps(record) + "\n") + self._fh.flush() + + def close(self) -> None: + if self._closed: + return + self._fh.flush() + self._fh.close() + self._closed = True diff --git a/core_runtime/local/__init__.py b/core_runtime/local/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/trading_runtime/local/backtest.py b/core_runtime/local/backtest.py similarity index 80% rename from trading_runtime/local/backtest.py rename to core_runtime/local/backtest.py index e978919..62a58fb 100644 --- a/trading_runtime/local/backtest.py +++ b/core_runtime/local/backtest.py @@ -8,14 +8,18 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from trading_framework import BacktestResult + from core_runtime.backtest.engine.engine_base import BacktestResult -from trading_framework import ( +from tradingchassis_core.core.risk.risk_config import RiskConfig +from tradingchassis_core.strategies.strategy_config import StrategyConfig + +from core_runtime.backtest.engine.hft_engine import ( HftBacktestConfig, HftBacktestEngine, HftEngineConfig, - RiskConfig, - StrategyConfig, +) +from core_runtime.backtest.runtime.core_configuration_mapper import ( + build_core_configuration_from_run_config, ) @@ -36,6 +40,7 @@ def load_config(path: str) -> HftBacktestConfig: engine_cfg = HftEngineConfig(**engine_raw) strategy_cfg = StrategyConfig(**strategy_raw) risk_cfg = RiskConfig(**risk_raw) + core_cfg = build_core_configuration_from_run_config(raw_json) return HftBacktestConfig( id=raw_json["id"], @@ -43,6 +48,7 @@ def load_config(path: str) -> HftBacktestConfig: engine_cfg=engine_cfg, strategy_cfg=strategy_cfg, risk_cfg=risk_cfg, + core_cfg=core_cfg, ) diff --git a/trading_runtime/local/local.json b/core_runtime/local/local.json similarity index 74% rename from trading_runtime/local/local.json rename to core_runtime/local/local.json index b596453..83ef50b 100644 --- a/trading_runtime/local/local.json +++ b/core_runtime/local/local.json @@ -5,9 +5,9 @@ "engine": { "initial_snapshot": null, "data_files": [ - "/workspaces/trading-runtime/tests/data/parts/part-000.npz", - "/workspaces/trading-runtime/tests/data/parts/part-001.npz", - "/workspaces/trading-runtime/tests/data/parts/part-002.npz" + "tests/data/parts/part-000.npz", + "tests/data/parts/part-001.npz", + "tests/data/parts/part-002.npz" ], "instrument": "BTC_USDC-PERPETUAL", @@ -32,8 +32,8 @@ "roi_lb": 40000, "roi_ub": 80000, - "stats_npz_path": "/workspaces/trading-runtime/tests/data/results/stats.npz", - "event_bus_path": "/workspaces/trading-runtime/tests/data/results/events.json" + "stats_npz_path": ".runtime/local/results/stats.npz", + "event_bus_path": ".runtime/local/results/events.json" }, "risk": { @@ -78,11 +78,24 @@ }, "strategy": { - "class_path": "trading_runtime.strategies.debug_strategy:DebugStrategyV1", + "class_path": "core_runtime.strategies.debug_strategy:DebugStrategyV1", "spread": 5.0, "order_qty": 0.1, "use_price_tick_levels": 3, "post_only": true + }, + + "core": { + "version": "v1", + "market": { + "instruments": { + "BTC_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1 + } + } + } } } diff --git a/core_runtime/local/oci.config.example b/core_runtime/local/oci.config.example new file mode 100644 index 0000000..031317e --- /dev/null +++ b/core_runtime/local/oci.config.example @@ -0,0 +1,6 @@ +[DEFAULT] +user=ocid1.user.oc1..REPLACE_ME +tenancy=ocid1.tenancy.oc1..REPLACE_ME +region=eu-frankfurt-1 +fingerprint=aa:bb:cc:dd:REPLACE_ME +key_file=/absolute/path/to/.oci/oci_api_key.pem diff --git a/core_runtime/strategies/__init__.py b/core_runtime/strategies/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/trading_runtime/strategies/debug_strategy.py b/core_runtime/strategies/debug_strategy.py similarity index 98% rename from trading_runtime/strategies/debug_strategy.py rename to core_runtime/strategies/debug_strategy.py index 4ce1738..42486d1 100644 --- a/trading_runtime/strategies/debug_strategy.py +++ b/core_runtime/strategies/debug_strategy.py @@ -3,7 +3,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from trading_framework import ( + from tradingchassis_core import ( EngineContext, GateDecision, MarketEvent, @@ -11,7 +11,7 @@ StrategyState, ) -from trading_framework import ( +from tradingchassis_core import ( NewOrderIntent, OrderIntent, Price, diff --git a/docs/venue-adapter-abstraction-design-v1.md b/docs/venue-adapter-abstraction-design-v1.md new file mode 100644 index 0000000..e43d623 --- /dev/null +++ b/docs/venue-adapter-abstraction-design-v1.md @@ -0,0 +1,203 @@ +# Venue Adapter Abstraction Design v1 (core-runtime) + +--- + +## Purpose and scope + +This document defines an implementation-facing design note for a future +Venue Adapter Abstraction in `core-runtime` using split capability protocols. + +This is a docs-only slice: + +- it does not implement adapter APIs or protocols; +- it does not modify production code or tests; +- it does not change runtime behavior; +- it does not implement canonical `FillEvent` ingress; +- it does not canonicalize `OrderStateEvent`; +- it does not change `DerivedFillEvent` behavior; +- it does not change snapshot ingestion behavior; +- it does not change reducers or event taxonomy; +- it does not implement `ProcessingContext`; +- it does not implement replay/storage/EventStream persistence; +- it does not rename packages or directories. + +`VADN-01` - `core` remains venue-agnostic and must continue to consume canonical +Event Stream input and explicit configuration through existing boundaries. + +`VADN-02` - Adapters expose source capabilities; runtime owns orchestration and +maps capability outputs into canonical `EventStreamEntry` or compatibility paths. + +`VADN-03` - This note follows the split capability direction from Phase 7A and +does not introduce implementation API changes in this slice. + +--- + +## Contract references + +This note is implementation-facing and must stay consistent with: + +- `core/docs/venue-adapter-capability-model-v1.md` +- `core/docs/semantic-core-upgrade-milestone-closure-v1.md` +- `core/docs/runtime-execution-feedback-contract-v1.md` +- `core/docs/runtime-adapter-execution-feedback-source-contract-v1.md` +- `core/docs/post-submission-lifecycle-compatibility-map-v1.md` +- `core/docs/event-stream-cursor-characterization-v1.md` + +Current runtime anchors: + +- `core-runtime/core_runtime/backtest/adapters/venue.py` +- `core-runtime/core_runtime/backtest/adapters/execution.py` +- `core-runtime/core_runtime/backtest/engine/strategy_runner.py` +- `core-runtime/core_runtime/backtest/engine/event_stream_cursor.py` + +--- + +## Proposed split capability protocols (future, non-implemented) + +`VADN-04` - Future abstraction should be split by source responsibility and +authority class rather than one monolithic adapter interface. + +Conceptual capability names for future implementation planning: + +- `VenueEventWaiter` (or `WakeupSource`) +- `VenueClock` (or runtime clock boundary view) +- `MarketInputSource` +- `OrderSubmissionGateway` +- `OrderSnapshotSource` +- `AccountSnapshotSource` +- `ExecutionFeedbackRecordSource` + +`VADN-05` - Names above are conceptual and documentation-facing in this slice; +they do not define production protocol signatures yet. + +--- + +## Capability classification matrix + +| capability | responsibility | authority classification | current hftbacktest mapping | future live venue possibility | guardrails / non-goals | +| --- | --- | --- | --- | --- | --- | +| `VenueEventWaiter` / `WakeupSource` | wakeup signaling and wait control for runtime loop progression | runtime/internal only | mapped by `wait_next(...)` wrapper calling `wait_next_feed(...)` | may support richer wakeup sources while preserving runner loop ownership | wakeup signaling is not canonical Event authority; no branch ordering changes in this slice | +| `VenueClock` (runtime clock boundary view) | provide adopted venue-local timestamp axis used by runtime timestamp update | runtime/internal only | mapped by `current_timestamp_ns()` wrapper | may expose richer venue receipt/event-time metadata while runtime keeps canonical ordering by `ProcessingPosition` | clock/timestamp must not be treated as `ProcessingOrder` authority | +| `MarketInputSource` | provide market snapshots/deltas for canonical market mapping | canonical event capable | `read_market_snapshot()` mapped to canonical `MarketEvent` in runner | live adapters may map native book/trade feeds into canonical market events under runtime mapping | no hidden mutable snapshot promotion to canonical semantics outside boundary mapping | +| `OrderSubmissionGateway` | submit/modify/cancel outbound intents and expose dispatch result boundary | canonical event capable (submission boundary), plus runtime/internal transport | `apply_intents(...)`; successful `new` dispatch leads to canonical `OrderSubmittedEvent` | live adapters may provide richer dispatch metadata while preserving current canonical submission boundary semantics | no post-submission execution authority from synchronous return codes | +| `OrderSnapshotSource` | provide order snapshots for compatibility lifecycle materialization | compatibility projection only | `read_orders_snapshot()` -> `ingest_order_snapshots()` -> `OrderStateEvent` path | may remain compatibility sidecar where canonical execution feedback is unavailable | no `OrderStateEvent` canonicalization; no snapshot-to-canonical promotion | +| `AccountSnapshotSource` | provide account snapshots for runtime/account views and compatibility projections | compatibility projection only / runtime/internal only | `state_values` adoption into `update_account(...)` | live adapters may offer richer account views without canonical authority by default | no implicit canonical account event expansion in this slice | +| `ExecutionFeedbackRecordSource` | provide authoritative execution-feedback records for future canonical `FillEvent` mapping | optional future capability (canonical only after REFC/RAEFSC gates) | unsupported/ineligible today for hftbacktest integration | live adapters may satisfy this with native execution reports and deterministic source sequencing | no `FillEvent` ingress implementation here; no synthetic required-field authority | + +--- + +## hftbacktest capability map (current snapshot) + +`VADN-06` - Current hftbacktest integration under this model: + +- `MarketInputSource`: supported; canonical `MarketEvent` mapping path exists. +- `OrderSubmissionGateway`: supported for successful `new` dispatch boundary via + canonical `OrderSubmittedEvent` path. +- `OrderSnapshotSource`: supported; remains compatibility-only. +- `AccountSnapshotSource`: supported for compatibility/runtime-internal account + snapshot adoption. +- `VenueEventWaiter` + `VenueClock`: supported through existing wrappers. +- `ExecutionFeedbackRecordSource`: unsupported/ineligible today. + +`VADN-07` - Compatibility authority remains frozen for post-submission lifecycle +progression (`OrderStateEvent` / `DerivedFillEvent` path unchanged). + +--- + +## hftbacktest internals that remain internal + +`VADN-08` - The following are adapter/runtime internals and must not be treated +as canonical source semantics: + +- rc wakeup codes and branch signaling (`rc == 1/2/3`); +- hftbacktest order/depth object schemas; +- numeric enum mapping (time-in-force/order type) in execution adapter; +- string-to-`int64` order id adaptation at adapter boundary; +- recorder plumbing (`record(...)` wrapper behavior). + +--- + +## Future live venue expansion (non-implemented) + +`VADN-09` - A future live adapter may satisfy additional capabilities without +changing `core` semantics: + +- native execution reports exposed as `ExecutionFeedbackRecordSource`; +- source-authoritative `liquidity_flag` values; +- stable canonical correlation to `instrument + client_order_id`; +- deterministic strictly monotone non-timestamp `source_sequence`. + +`VADN-10` - Runner remains owner of global merge into `EventStreamEntry` with +`ProcessingPosition` ordering authority across canonical categories. + +`VADN-11` - `core` remains unchanged and venue-agnostic under this expansion. + +--- + +## Boundary rules + +`VADN-12` - `core` must not import runtime adapter classes. + +`VADN-13` - `core` must not know hftbacktest-specific APIs or structures. + +`VADN-14` - Runtime owns adapter orchestration and capability composition. + +`VADN-15` - Runtime owns mapping from adapter capability outputs to canonical +`EventStreamEntry` or compatibility ingestion paths. + +`VADN-16` - Adapter capabilities must not mutate `StrategyState` directly. + +`VADN-17` - Adapter capabilities must not call `process_event_entry` directly. + +`VADN-18` - Adapters expose source capabilities only; semantic authority is +decided at runtime boundary mapping under existing contracts. + +--- + +## Explicit non-goals for this slice + +`VADN-19` - No adapter protocol implementation. + +`VADN-20` - No runtime branch ordering or wakeup behavior changes. + +`VADN-21` - No canonical `FillEvent` ingress implementation. + +`VADN-22` - No `OrderStateEvent` canonicalization. + +`VADN-23` - No `DerivedFillEvent` behavior change. + +`VADN-24` - No snapshot lifecycle rewrite. + +`VADN-25` - No reducer or event taxonomy changes. + +`VADN-26` - No `ProcessingContext` implementation. + +`VADN-27` - No replay/storage/EventStream persistence implementation. + +`VADN-28` - No package or directory rename. + +--- + +## Future implementation prerequisites (before protocol introduction) + +`VADN-29` - Define a protocol-by-protocol migration strategy (incremental, +behavior-preserving) before introducing concrete protocol interfaces. + +`VADN-30` - Prepare characterization-first test plan for current behavior and +ordering invariants before abstraction refactors. + +`VADN-31` - Require hftbacktest parity tests proving no behavior drift across: + +- wakeup semantics; +- market mapping path; +- snapshot compatibility path; +- submission boundary behavior; +- canonical cursor/position sequencing invariants. + +`VADN-32` - Require explicit no-behavior-change proof for each migration step. + +`VADN-33` - Keep `ExecutionFeedbackRecordSource` gated by REFC/RAEFSC contracts; +no implementation planning should bypass those gate clauses. + +--- + diff --git a/pyproject.toml b/pyproject.toml index a538ec5..89b4a75 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,9 +3,9 @@ requires = ["setuptools>=69", "wheel"] build-backend = "setuptools.build_meta" [project] -name = "trading-runtime" +name = "tradingchassis-core-runtime" version = "0.1.0" -description = "Runtime execution and Kubernetes orchestration layer for the trading-framework with reproducible environments and deterministic backtesting." +description = "Runtime execution and Kubernetes orchestration layer for the core with reproducible environments and deterministic backtesting." readme = "README.md" requires-python = ">=3.11" authors = [{ name = "tradingeng@protonmail.com" }] @@ -18,6 +18,13 @@ classifiers = [ "Operating System :: OS Independent" ] +dependencies = [ + "hftbacktest>=2,<3", + "mlflow>=3,<4", + "oci>=2,<3", + "prometheus-client>=0.24,<1", +] + [project.optional-dependencies] dev = [ "pytest>=9,<10", @@ -30,13 +37,13 @@ dev = [ # Explicit package discovery # -------------------------------------------------- [tool.setuptools.packages.find] -include = ["trading_runtime*"] +include = ["core_runtime*"] # -------------------------------------------------- # Include runtime assets # -------------------------------------------------- [tool.setuptools.package-data] -trading_runtime = ["**/*"] +core_runtime = ["**/*"] [tool.setuptools] include-package-data = true @@ -75,5 +82,5 @@ ignore_errors = true # Import Linter # -------------------------------------------------- [tool.importlinter] -root_package = "trading_runtime" +root_package = "core_runtime" include_external_packages = true diff --git a/requirements-dev.txt b/requirements-dev.txt index 9c929cb..f98bc2b 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -12,10 +12,6 @@ annotated-types==0.7.0 # via pydantic anyio==4.12.1 # via starlette -attrs==25.4.0 - # via - # jsonschema - # referencing bleach==6.3.0 # via panel blinker==1.9.0 @@ -99,7 +95,7 @@ gunicorn==23.0.0 h11==0.16.0 # via uvicorn hftbacktest==2.4.4 - # via trading-framework + # via tradingchassis-core-runtime (pyproject.toml) holoviews==1.22.1 # via hftbacktest huey==2.6.0 @@ -109,7 +105,7 @@ idna==3.11 # anyio # requests import-linter==1.12.1 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) importlib-metadata==8.7.1 # via # mlflow-skinny @@ -124,10 +120,6 @@ jinja2==3.1.6 # flask joblib==1.5.3 # via scikit-learn -jsonschema==4.26.0 - # via trading-framework -jsonschema-specifications==2025.9.1 - # via jsonschema kiwisolver==1.4.9 # via matplotlib librt==0.8.0 @@ -159,13 +151,13 @@ mdit-py-plugins==0.5.0 mdurl==0.1.2 # via markdown-it-py mlflow==3.9.0 - # via trading-framework + # via tradingchassis-core-runtime (pyproject.toml) mlflow-skinny==3.9.0 # via mlflow mlflow-tracing==3.9.0 # via mlflow mypy==1.19.1 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) mypy-extensions==1.1.0 # via mypy narwhals==2.16.0 @@ -189,9 +181,8 @@ numpy==2.2.6 # scikit-learn # scipy # skops - # trading-framework oci==2.167.1 - # via trading-framework + # via tradingchassis-core-runtime (pyproject.toml) opentelemetry-api==1.39.1 # via # mlflow-skinny @@ -247,7 +238,7 @@ polars-runtime-32==1.38.1 prettytable==3.17.0 # via skops prometheus-client==0.24.1 - # via trading-framework + # via tradingchassis-core-runtime (pyproject.toml) protobuf==6.33.5 # via # databricks-sdk @@ -255,9 +246,7 @@ protobuf==6.33.5 # mlflow-tracing # opentelemetry-proto pyarrow==16.1.0 - # via - # mlflow - # trading-framework + # via mlflow pyasn1==0.6.2 # via # pyasn1-modules @@ -271,7 +260,7 @@ pydantic==2.12.5 # fastapi # mlflow-skinny # mlflow-tracing - # trading-framework + # tradingchassis-core pydantic-core==2.41.5 # via pydantic pygments==2.19.2 @@ -281,7 +270,7 @@ pyopenssl==25.1.0 pyparsing==3.3.2 # via matplotlib pytest==9.0.2 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) python-dateutil==2.9.0.post0 # via # graphene @@ -303,24 +292,16 @@ pyyaml==6.0.3 # via # bokeh # mlflow-skinny -referencing==0.37.0 - # via - # jsonschema - # jsonschema-specifications requests==2.32.5 # via # databricks-sdk # docker # mlflow-skinny # panel -rpds-py==0.30.0 - # via - # jsonschema - # referencing rsa==4.9.1 # via google-auth ruff==0.15.1 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) scikit-learn==1.8.0 # via # mlflow @@ -349,10 +330,8 @@ threadpoolctl==3.6.0 tornado==6.5.4 # via bokeh tqdm==4.67.3 - # via - # panel - # trading-framework -trading-framework @ git+https://github.com/trading-engineering/trading-framework.git@934d332c21bef56fa76c19f477143d8d438238c2 + # via panel +tradingchassis-core @ git+https://github.com/TradingChassis/core.git@10b09aac06881b0f8f942e08104a09d86ba2d2ca # via -r _git_deps.in typing-extensions==4.15.0 # via @@ -371,7 +350,6 @@ typing-extensions==4.15.0 # pydantic # pydantic-core # pyopenssl - # referencing # sqlalchemy # starlette # typing-inspection @@ -383,8 +361,6 @@ tzdata==2025.3 # via pandas uc-micro-py==1.0.3 # via linkify-it-py -ujson==5.11.0 - # via trading-framework urllib3==2.6.3 # via # docker diff --git a/requirements.txt b/requirements.txt index 4259c45..d3c302f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,10 +12,6 @@ annotated-types==0.7.0 # via pydantic anyio==4.12.1 # via starlette -attrs==25.4.0 - # via - # jsonschema - # referencing bleach==6.3.0 # via panel blinker==1.9.0 @@ -96,7 +92,7 @@ gunicorn==23.0.0 h11==0.16.0 # via uvicorn hftbacktest==2.4.4 - # via trading-framework + # via tradingchassis-core-runtime (pyproject.toml) holoviews==1.22.1 # via hftbacktest huey==2.6.0 @@ -117,10 +113,6 @@ jinja2==3.1.6 # flask joblib==1.5.3 # via scikit-learn -jsonschema==4.26.0 - # via trading-framework -jsonschema-specifications==2025.9.1 - # via jsonschema kiwisolver==1.4.9 # via matplotlib linkify-it-py==2.0.3 @@ -150,7 +142,7 @@ mdit-py-plugins==0.5.0 mdurl==0.1.2 # via markdown-it-py mlflow==3.9.0 - # via trading-framework + # via tradingchassis-core-runtime (pyproject.toml) mlflow-skinny==3.9.0 # via mlflow mlflow-tracing==3.9.0 @@ -176,9 +168,8 @@ numpy==2.2.6 # scikit-learn # scipy # skops - # trading-framework oci==2.167.1 - # via trading-framework + # via tradingchassis-core-runtime (pyproject.toml) opentelemetry-api==1.39.1 # via # mlflow-skinny @@ -229,7 +220,7 @@ polars-runtime-32==1.38.1 prettytable==3.17.0 # via skops prometheus-client==0.24.1 - # via trading-framework + # via tradingchassis-core-runtime (pyproject.toml) protobuf==6.33.5 # via # databricks-sdk @@ -237,9 +228,7 @@ protobuf==6.33.5 # mlflow-tracing # opentelemetry-proto pyarrow==16.1.0 - # via - # mlflow - # trading-framework + # via mlflow pyasn1==0.6.2 # via # pyasn1-modules @@ -253,7 +242,7 @@ pydantic==2.12.5 # fastapi # mlflow-skinny # mlflow-tracing - # trading-framework + # tradingchassis-core pydantic-core==2.41.5 # via pydantic pyopenssl==25.1.0 @@ -281,20 +270,12 @@ pyyaml==6.0.3 # via # bokeh # mlflow-skinny -referencing==0.37.0 - # via - # jsonschema - # jsonschema-specifications requests==2.32.5 # via # databricks-sdk # docker # mlflow-skinny # panel -rpds-py==0.30.0 - # via - # jsonschema - # referencing rsa==4.9.1 # via google-auth scikit-learn==1.8.0 @@ -325,10 +306,8 @@ threadpoolctl==3.6.0 tornado==6.5.4 # via bokeh tqdm==4.67.3 - # via - # panel - # trading-framework -trading-framework @ git+https://github.com/trading-engineering/trading-framework.git@934d332c21bef56fa76c19f477143d8d438238c2 + # via panel +tradingchassis-core @ git+https://github.com/TradingChassis/core.git@10b09aac06881b0f8f942e08104a09d86ba2d2ca # via -r _git_deps.in typing-extensions==4.15.0 # via @@ -344,7 +323,6 @@ typing-extensions==4.15.0 # pydantic # pydantic-core # pyopenssl - # referencing # sqlalchemy # starlette # typing-inspection @@ -356,8 +334,6 @@ tzdata==2025.3 # via pandas uc-micro-py==1.0.3 # via linkify-it-py -ujson==5.11.0 - # via trading-framework urllib3==2.6.3 # via # docker diff --git a/scripts/check.sh b/scripts/check.sh index 1ba9c8a..4e560e3 100755 --- a/scripts/check.sh +++ b/scripts/check.sh @@ -5,12 +5,12 @@ echo "๐Ÿ” Running import-linter..." lint-imports --verbose echo "โšก Running ruff (check only)..." -ruff check trading_runtime tests +ruff check core_runtime tests echo "๐Ÿง  Running mypy..." -mypy trading_runtime tests +mypy core_runtime tests echo "๐Ÿงช Running pytest..." -pytest +python -m pytest echo "โœ… All checks passed!" diff --git a/scripts/compile-requirements.sh b/scripts/compile-requirements.sh index 1bfdcf4..5439f90 100755 --- a/scripts/compile-requirements.sh +++ b/scripts/compile-requirements.sh @@ -6,10 +6,10 @@ set -a source .env set +a -: "${TRADING_FRAMEWORK_COMMIT:?Missing TRADING_FRAMEWORK_COMMIT in .env}" +: "${TRADINGCHASSIS_CORE_COMMIT:?Missing TRADINGCHASSIS_CORE_COMMIT in .env}" echo "๐Ÿ”ง Compiling requirements with pip-tools..." -echo "๐Ÿ“Œ Pinning trading-framework at commit: $TRADING_FRAMEWORK_COMMIT" +echo "๐Ÿ“Œ Pinning core at commit: $TRADINGCHASSIS_CORE_COMMIT" python -m pip install --upgrade \ "pip>=23.3,<25" \ @@ -19,7 +19,7 @@ python -m pip install --upgrade \ # Temporary requirements input for git dependency cat > _git_deps.in < None: + workspace_root = Path(__file__).resolve().parents[2] + runtime_root = workspace_root / "core-runtime" + core_root = workspace_root / "core" + + for path in (runtime_root, core_root): + path_str = str(path) + if path_str not in sys.path: + sys.path.insert(0, path_str) + + +_ensure_workspace_import_paths() diff --git a/tests/data/results/events.json b/tests/data/results/events.json deleted file mode 100644 index afb305a..0000000 --- a/tests/data/results/events.json +++ /dev/null @@ -1,412 +0,0 @@ -{"event": "RiskDecisionEvent(ts_ns_local=1723161256101000000, accepted=6, queued=0, rejected=0, handled=0, reject_reasons={})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state=None, next_state='working')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='working')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='working')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1078790734324421344', prev_state=None, next_state='working')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='working')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='7581400325422276892', prev_state=None, next_state='working')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256201000000, accepted=0, queued=0, rejected=0, handled=6, reject_reasons={})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state='working', next_state='expired')"} -{"event": "DerivedFillEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', side='buy', delta_qty=0.08, cum_qty=0.08, price=59999.9)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state='working', next_state='expired')"} -{"event": "DerivedFillEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', side='sell', delta_qty=0.01, cum_qty=0.01, price=60000.100000000006)"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256301000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161256310000000, instrument='BTC_USDC-PERPETUAL', exposure=1200.0000000000002, delta_exposure=1200.0000000000002)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='expired')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256401000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256501000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256601000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256701000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='expired')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256801000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "DerivedPnLEvent(ts_ns_local=1723161256810000000, instrument='BTC_USDC-PERPETUAL', delta_pnl=0.0009999999999763531, cum_realized_pnl=0.0009999999999763531)"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161256810000000, instrument='BTC_USDC-PERPETUAL', exposure=599.9999999999997, delta_exposure=-600.0000000000006)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256901000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257001000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257101000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257201000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257301000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257401000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257501000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257601000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257701000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257801000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257901000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258001000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258101000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258201000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258301000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258401000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258501000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258601000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258701000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258801000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258901000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259001000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259101000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259201000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259301000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259401000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "DerivedPnLEvent(ts_ns_local=1723161259410000000, instrument='BTC_USDC-PERPETUAL', delta_pnl=-0.0004999999999881766, cum_realized_pnl=0.0004999999999881766)"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161259410000000, instrument='BTC_USDC-PERPETUAL', exposure=599.9994999999997, delta_exposure=-0.0004999999999881766)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state='working', next_state='expired')"} -{"event": "DerivedFillEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', side='buy', delta_qty=0.02, cum_qty=0.02, price=59999.8)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259501000000, accepted=2, queued=0, rejected=3, handled=1, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 3})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='expired')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259601000000, accepted=0, queued=0, rejected=3, handled=3, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 3})"} -{"event": "DerivedPnLEvent(ts_ns_local=1723161259610000000, instrument='BTC_USDC-PERPETUAL', delta_pnl=0.012000000000284672, cum_realized_pnl=0.012500000000272848)"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161259610000000, instrument='BTC_USDC-PERPETUAL', exposure=5399.9955, delta_exposure=4799.996)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259701000000, accepted=0, queued=0, rejected=3, handled=3, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 3})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259801000000, accepted=0, queued=0, rejected=3, handled=3, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 3})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state='working', next_state='expired')"} -{"event": "DerivedFillEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', side='sell', delta_qty=0.02, cum_qty=0.02, price=60000.100000000006)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259901000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state=None, next_state='expired')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260001000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260101000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260201000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "DerivedPnLEvent(ts_ns_local=1723161260210000000, instrument='BTC_USDC-PERPETUAL', delta_pnl=0.007500000000163709, cum_realized_pnl=0.020000000000436557)"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161260210000000, instrument='BTC_USDC-PERPETUAL', exposure=2399.9979999999996, delta_exposure=-2999.9975000000004)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260301000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260401000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260501000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260601000000, accepted=6, queued=0, rejected=0, handled=0, reject_reasons={})"} -{"event": "DerivedPnLEvent(ts_ns_local=1723161260621000000, instrument='BTC_USDC-PERPETUAL', delta_pnl=0.00400000000036016, cum_realized_pnl=0.024000000000796717)"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161260621000000, instrument='BTC_USDC-PERPETUAL', exposure=2400.002, delta_exposure=0.00400000000036016)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260701000000, accepted=2, queued=2, rejected=0, handled=2, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260751000000, accepted=1, queued=1, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260801000000, accepted=1, queued=3, rejected=1, handled=2, reject_reasons={'ORDER_NOT_FOUND': 1})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260851000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260901000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260951000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261001000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261051000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261101000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261151000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261201000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261251000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261301000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261351000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261401000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261451000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261501000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261551000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261601000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261651000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261701000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261751000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261801000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261851000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261901000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261951000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262001000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262051000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262101000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262151000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262201000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262251000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262301000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262351000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262401000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262451000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262501000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262551000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262601000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262651000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262701000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262751000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262801000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262851000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262901000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262951000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263001000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263051000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263101000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263151000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263201000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263251000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263301000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263351000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263401000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263451000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263501000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263551000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263601000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263651000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263701000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263751000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263801000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263851000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263901000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263951000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264001000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264051000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264101000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264151000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264201000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264251000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264301000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264351000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264401000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264451000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264501000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264551000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264601000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264651000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264701000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264751000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264801000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264851000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264901000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264951000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265001000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265051000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265101000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265151000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265201000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265251000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265301000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265351000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265401000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265451000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265501000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265551000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265601000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265651000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265701000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265751000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265801000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265851000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "DerivedPnLEvent(ts_ns_local=1723161265851000000, instrument='BTC_USDC-PERPETUAL', delta_pnl=0.020000000000436557, cum_realized_pnl=0.044000000001233275)"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161265851000000, instrument='BTC_USDC-PERPETUAL', exposure=7200.006, delta_exposure=4800.004000000001)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=2, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=2, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=2, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=2, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} diff --git a/tests/data/results/stats.npz b/tests/data/results/stats.npz deleted file mode 100644 index da56e62..0000000 Binary files a/tests/data/results/stats.npz and /dev/null differ diff --git a/tests/runtime/test_adapter_boundary_guards.py b/tests/runtime/test_adapter_boundary_guards.py new file mode 100644 index 0000000..b1dfbe3 --- /dev/null +++ b/tests/runtime/test_adapter_boundary_guards.py @@ -0,0 +1,91 @@ +"""Static boundary guard tests for adapter/core layering. + +These checks intentionally use lightweight text scanning to catch boundary +drift early without introducing new tooling dependencies. +""" + +from __future__ import annotations + +import re +from pathlib import Path + +import pytest + +CORE_RUNTIME_ROOT = Path(__file__).resolve().parents[2] +WORKSPACE_ROOT = CORE_RUNTIME_ROOT.parent + +ADAPTER_FILES = [ + CORE_RUNTIME_ROOT / "core_runtime/backtest/adapters/venue.py", + CORE_RUNTIME_ROOT / "core_runtime/backtest/adapters/execution.py", + CORE_RUNTIME_ROOT / "core_runtime/backtest/adapters/protocols.py", +] + + +def _read_text(path: Path) -> str: + return path.read_text(encoding="utf-8") + + +def _assert_no_matches(path: Path, patterns: list[str], description: str) -> None: + content = _read_text(path) + for pattern in patterns: + match = re.search(pattern, content, flags=re.MULTILINE) + assert match is None, f"{path} violates {description}: matched /{pattern}/" + + +def test_adapters_do_not_import_or_reference_strategy_state() -> None: + patterns = [ + r"\bStrategyState\b", + ] + for path in ADAPTER_FILES: + _assert_no_matches(path, patterns, "StrategyState boundary") + + +def test_adapters_do_not_import_or_call_canonical_processing_boundaries() -> None: + patterns = [ + r"\bprocess_event_entry\b", + r"\bprocess_canonical_event\b", + r"\bfold_event_stream_entries\b", + ] + for path in ADAPTER_FILES: + _assert_no_matches(path, patterns, "canonical processing boundary") + + +def test_adapters_do_not_import_or_construct_fill_event() -> None: + patterns = [ + r"^\s*from\s+[^\n]*\s+import\s+[^\n]*\bFillEvent\b", + r"^\s*import\s+[^\n]*\bFillEvent\b", + r"\bFillEvent\s*\(", + ] + for path in ADAPTER_FILES: + _assert_no_matches(path, patterns, "FillEvent ingress boundary") + + +def test_core_production_package_does_not_import_trading_runtime() -> None: + core_pkg = WORKSPACE_ROOT / "core/tradingchassis_core" + if not core_pkg.exists(): + pytest.skip( + f"core package not present in this checkout layout: {core_pkg}" + ) + + patterns = [ + r"^\s*import\s+trading_runtime(\.|$)", + r"^\s*from\s+trading_runtime(\.|\s+import\b)", + r"^\s*import\s+core_runtime(\.|$)", + r"^\s*from\s+core_runtime(\.|\s+import\b)", + ] + + for path in core_pkg.rglob("*.py"): + _assert_no_matches(path, patterns, "core->core-runtime import boundary") + + +def test_protocols_does_not_define_execution_feedback_record_source_yet() -> None: + protocols_py = CORE_RUNTIME_ROOT / "core_runtime/backtest/adapters/protocols.py" + patterns = [ + r"^\s*class\s+ExecutionFeedbackRecordSource\b", + r"^\s*ExecutionFeedbackRecordSource\s*=", + ] + _assert_no_matches( + protocols_py, + patterns, + "deferred ExecutionFeedbackRecordSource capability", + ) diff --git a/tests/runtime/test_core_configuration_mapper.py b/tests/runtime/test_core_configuration_mapper.py new file mode 100644 index 0000000..76c33d2 --- /dev/null +++ b/tests/runtime/test_core_configuration_mapper.py @@ -0,0 +1,170 @@ +from __future__ import annotations + +import pytest +from tradingchassis_core.core.domain.configuration import CoreConfiguration + +from core_runtime.backtest.runtime.core_configuration_mapper import ( + build_core_configuration_from_run_config, +) + + +def _valid_run_config() -> dict[str, object]: + return { + "engine": { + "instrument": "BTC_USDC-PERPETUAL", + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1.0, + }, + "core": { + "version": "v1", + "market": { + "instruments": { + "BTC_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1.0, + } + } + }, + }, + } + + +def test_valid_explicit_core_builds_core_configuration() -> None: + cfg = build_core_configuration_from_run_config(_valid_run_config()) + + assert isinstance(cfg, CoreConfiguration) + assert cfg.version == "v1" + assert cfg.payload["market"]["instruments"]["BTC_USDC-PERPETUAL"]["tick_size"] == 0.1 + + +def test_missing_core_fails() -> None: + run_config = _valid_run_config() + run_config.pop("core") + + with pytest.raises(ValueError, match="Missing required top-level section: core"): + build_core_configuration_from_run_config(run_config) + + +def test_missing_version_fails() -> None: + run_config = _valid_run_config() + run_config["core"] = { + "market": run_config["core"]["market"], # type: ignore[index] + } + + with pytest.raises(ValueError, match="core.version"): + build_core_configuration_from_run_config(run_config) + + +def test_missing_market_instruments_fails() -> None: + run_config = _valid_run_config() + run_config["core"] = {"version": "v1", "market": {}} + + with pytest.raises(ValueError, match="core.market.instruments"): + build_core_configuration_from_run_config(run_config) + + +def test_missing_instrument_entry_fails() -> None: + run_config = _valid_run_config() + run_config["core"] = { + "version": "v1", + "market": { + "instruments": { + "ETH_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1.0, + } + } + }, + } + + with pytest.raises(ValueError, match="core.market.instruments.BTC_USDC-PERPETUAL"): + build_core_configuration_from_run_config(run_config) + + +@pytest.mark.parametrize("field_name", ["tick_size", "lot_size", "contract_size"]) +def test_missing_required_metadata_field_fails(field_name: str) -> None: + run_config = _valid_run_config() + instrument_cfg = run_config["core"]["market"]["instruments"]["BTC_USDC-PERPETUAL"] # type: ignore[index] + instrument_cfg.pop(field_name) + + with pytest.raises(ValueError, match=field_name): + build_core_configuration_from_run_config(run_config) + + +@pytest.mark.parametrize("field_name", ["tick_size", "lot_size", "contract_size"]) +def test_none_value_fails(field_name: str) -> None: + run_config = _valid_run_config() + run_config["core"]["market"]["instruments"]["BTC_USDC-PERPETUAL"][field_name] = None # type: ignore[index] + + with pytest.raises(ValueError, match=field_name): + build_core_configuration_from_run_config(run_config) + + +@pytest.mark.parametrize("field_name", ["tick_size", "lot_size", "contract_size"]) +def test_bool_value_fails(field_name: str) -> None: + run_config = _valid_run_config() + run_config["core"]["market"]["instruments"]["BTC_USDC-PERPETUAL"][field_name] = True # type: ignore[index] + + with pytest.raises(TypeError, match="must be numeric"): + build_core_configuration_from_run_config(run_config) + + +@pytest.mark.parametrize("field_name", ["tick_size", "lot_size", "contract_size"]) +def test_non_numeric_value_fails(field_name: str) -> None: + run_config = _valid_run_config() + run_config["core"]["market"]["instruments"]["BTC_USDC-PERPETUAL"][field_name] = "x" # type: ignore[index] + + with pytest.raises(TypeError, match="must be numeric"): + build_core_configuration_from_run_config(run_config) + + +@pytest.mark.parametrize("bad", [float("nan"), float("inf"), float("-inf")]) +def test_non_finite_value_fails(bad: float) -> None: + run_config = _valid_run_config() + run_config["core"]["market"]["instruments"]["BTC_USDC-PERPETUAL"]["tick_size"] = bad # type: ignore[index] + + with pytest.raises(ValueError, match="must be finite"): + build_core_configuration_from_run_config(run_config) + + +@pytest.mark.parametrize("bad", [0.0, -1.0]) +def test_non_positive_value_fails(bad: float) -> None: + run_config = _valid_run_config() + run_config["core"]["market"]["instruments"]["BTC_USDC-PERPETUAL"]["tick_size"] = bad # type: ignore[index] + + with pytest.raises(ValueError, match="must be > 0"): + build_core_configuration_from_run_config(run_config) + + +def test_no_fallback_from_engine_when_core_missing() -> None: + run_config = { + "engine": { + "instrument": "BTC_USDC-PERPETUAL", + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1.0, + } + } + + with pytest.raises(ValueError, match="Missing required top-level section: core"): + build_core_configuration_from_run_config(run_config) + + +def test_engine_duplicate_exact_match_allowed() -> None: + run_config = _valid_run_config() + + cfg = build_core_configuration_from_run_config(run_config) + + assert isinstance(cfg, CoreConfiguration) + + +@pytest.mark.parametrize("field_name", ["tick_size", "lot_size", "contract_size"]) +def test_engine_duplicate_mismatch_fails(field_name: str) -> None: + run_config = _valid_run_config() + run_config["engine"][field_name] = 999.0 # type: ignore[index] + + with pytest.raises(ValueError, match="Conflicting duplicate field values"): + build_core_configuration_from_run_config(run_config) diff --git a/tests/runtime/test_event_stream_cursor.py b/tests/runtime/test_event_stream_cursor.py new file mode 100644 index 0000000..b94b9b7 --- /dev/null +++ b/tests/runtime/test_event_stream_cursor.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +import pytest +from tradingchassis_core.core.domain.processing_order import ProcessingPosition + +from core_runtime.backtest.engine.event_stream_cursor import EventStreamCursor + + +def test_event_stream_cursor_starts_at_zero() -> None: + cursor = EventStreamCursor() + assert cursor.next_index == 0 + + +def test_attempt_position_does_not_advance_cursor() -> None: + cursor = EventStreamCursor() + attempted = cursor.attempt_position() + assert attempted.index == 0 + assert cursor.next_index == 0 + + +def test_commit_success_advances_by_one() -> None: + cursor = EventStreamCursor() + attempted = cursor.attempt_position() + cursor.commit_success(attempted) + assert cursor.next_index == 1 + + +def test_commit_success_rejects_mismatched_position() -> None: + cursor = EventStreamCursor() + with pytest.raises(ValueError, match="Committed position does not match expected next index"): + cursor.commit_success(ProcessingPosition(index=1)) + assert cursor.next_index == 0 + + +def test_repeated_attempt_commit_produces_sequential_positions() -> None: + cursor = EventStreamCursor() + observed: list[int] = [] + for _ in range(3): + position = cursor.attempt_position() + observed.append(position.index) + cursor.commit_success(position) + + assert observed == [0, 1, 2] + assert cursor.next_index == 3 diff --git a/tests/runtime/test_hftbacktest_execution_adapter_characterization.py b/tests/runtime/test_hftbacktest_execution_adapter_characterization.py new file mode 100644 index 0000000..f03634e --- /dev/null +++ b/tests/runtime/test_hftbacktest_execution_adapter_characterization.py @@ -0,0 +1,344 @@ +"""Characterization tests for HftBacktestExecutionAdapter. + +These tests lock current adapter submission behavior only. They do not imply: +- FillEvent ingress; +- ExecutionFeedbackRecordSource support; +- canonical execution-feedback authority; +- post-submission lifecycle migration. +""" + +from __future__ import annotations + +import inspect +from dataclasses import dataclass, field +from typing import Any + +from tradingchassis_core.core.domain.reject_reasons import RejectReason +from tradingchassis_core.core.domain.types import ( + CancelOrderIntent, + NewOrderIntent, + Price, + Quantity, + ReplaceOrderIntent, +) + +from core_runtime.backtest.adapters.execution import ( + HftBacktestExecutionAdapter, + _to_i64_order_id, +) + + +@dataclass +class _FakeHbt: + """Minimal fake hftbacktest object with configurable outcomes.""" + + result_codes: dict[str, int] = field(default_factory=dict) + raise_on: set[str] = field(default_factory=set) + calls: list[tuple[str, tuple[Any, ...]]] = field(default_factory=list) + + def _maybe_raise(self, method: str) -> None: + if method in self.raise_on: + raise RuntimeError(f"{method} failed") + + def _result_code(self, method: str) -> int: + return self.result_codes.get(method, 0) + + def submit_buy_order( + self, + asset_no: int, + order_id: int, + price: float, + quantity: float, + tif: int, + order_type: int, + post_only_flag: bool, + ) -> int: + self.calls.append( + ( + "submit_buy_order", + ( + asset_no, + order_id, + price, + quantity, + tif, + order_type, + post_only_flag, + ), + ) + ) + self._maybe_raise("submit_buy_order") + return self._result_code("submit_buy_order") + + def submit_sell_order( + self, + asset_no: int, + order_id: int, + price: float, + quantity: float, + tif: int, + order_type: int, + post_only_flag: bool, + ) -> int: + self.calls.append( + ( + "submit_sell_order", + ( + asset_no, + order_id, + price, + quantity, + tif, + order_type, + post_only_flag, + ), + ) + ) + self._maybe_raise("submit_sell_order") + return self._result_code("submit_sell_order") + + def modify( + self, + asset_no: int, + order_id: int, + new_price: float, + new_quantity: float, + post_only_flag: bool, + ) -> int: + self.calls.append( + ( + "modify", + (asset_no, order_id, new_price, new_quantity, post_only_flag), + ) + ) + self._maybe_raise("modify") + return self._result_code("modify") + + def cancel( + self, + asset_no: int, + order_id: int, + post_only_flag: bool, + ) -> int: + self.calls.append(("cancel", (asset_no, order_id, post_only_flag))) + self._maybe_raise("cancel") + return self._result_code("cancel") + + +def _new_intent( + *, + side: str, + client_order_id: str, + order_type: str = "limit", + tif: str = "GTC", + intended_price: Price | None = None, +) -> NewOrderIntent: + return NewOrderIntent( + ts_ns_local=1, + instrument="BTC_USDC-PERPETUAL", + client_order_id=client_order_id, + intents_correlation_id=f"corr-{client_order_id}", + side=side, + order_type=order_type, + intended_qty=Quantity(value=2.5, unit="contracts"), + intended_price=( + intended_price + if intended_price is not None + else Price(currency="USDC", value=100.5) + ), + time_in_force=tif, + ) + + +def _replace_intent(*, client_order_id: str = "cid-replace-1") -> ReplaceOrderIntent: + return ReplaceOrderIntent( + ts_ns_local=2, + instrument="BTC_USDC-PERPETUAL", + client_order_id=client_order_id, + intents_correlation_id=f"corr-{client_order_id}", + side="buy", + order_type="limit", + intended_qty=Quantity(value=3.0, unit="contracts"), + intended_price=Price(currency="USDC", value=101.25), + ) + + +def _cancel_intent(*, client_order_id: str = "cid-cancel-1") -> CancelOrderIntent: + return CancelOrderIntent( + ts_ns_local=3, + instrument="BTC_USDC-PERPETUAL", + client_order_id=client_order_id, + intents_correlation_id=f"corr-{client_order_id}", + ) + + +def test_new_buy_and_sell_submissions_call_expected_hbt_methods_and_map_arguments() -> None: + fake_hbt = _FakeHbt() + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=7) + + buy_intent = _new_intent( + side="buy", + client_order_id="123", + tif="IOC", + order_type="limit", + intended_price=Price(currency="USDC", value=99.75), + ) + sell_intent = _new_intent( + side="sell", + client_order_id="cid-sell-1", + tif="POST_ONLY", + order_type="market", + intended_price=Price(currency="USDC", value=100.5), + ) + + execution_errors = adapter.apply_intents([buy_intent, sell_intent]) + + assert execution_errors == [] + assert len(fake_hbt.calls) == 2 + + method_buy, args_buy = fake_hbt.calls[0] + assert method_buy == "submit_buy_order" + assert args_buy == ( + 7, + _to_i64_order_id("123"), + 99.75, + 2.5, + 3, # IOC + 0, # limit + False, + ) + + method_sell, args_sell = fake_hbt.calls[1] + assert method_sell == "submit_sell_order" + assert args_sell == ( + 7, + _to_i64_order_id("cid-sell-1"), + 100.5, + 2.5, + 1, # POST_ONLY -> GTX + 1, # market + False, + ) + + +def test_new_submission_nonzero_result_code_returns_exchange_reject() -> None: + fake_hbt = _FakeHbt(result_codes={"submit_buy_order": 9}) + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=1) + intent = _new_intent(side="buy", client_order_id="cid-new-reject") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [(intent, RejectReason.EXCHANGE_REJECT)] + + +def test_new_submission_exception_returns_exchange_error() -> None: + fake_hbt = _FakeHbt(raise_on={"submit_sell_order"}) + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=1) + intent = _new_intent(side="sell", client_order_id="cid-new-error") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [(intent, RejectReason.EXCHANGE_ERROR)] + + +def test_replace_calls_modify_with_expected_mapping_and_success_behavior() -> None: + fake_hbt = _FakeHbt() + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=3) + intent = _replace_intent(client_order_id="cid-replace-ok") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [] + assert fake_hbt.calls == [ + ( + "modify", + ( + 3, + _to_i64_order_id("cid-replace-ok"), + 101.25, + 3.0, + False, + ), + ) + ] + + +def test_replace_nonzero_result_code_returns_exchange_reject() -> None: + fake_hbt = _FakeHbt(result_codes={"modify": 4}) + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=0) + intent = _replace_intent(client_order_id="cid-replace-reject") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [(intent, RejectReason.EXCHANGE_REJECT)] + + +def test_replace_exception_returns_exchange_error() -> None: + fake_hbt = _FakeHbt(raise_on={"modify"}) + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=0) + intent = _replace_intent(client_order_id="cid-replace-error") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [(intent, RejectReason.EXCHANGE_ERROR)] + + +def test_cancel_calls_cancel_with_expected_mapping_and_success_behavior() -> None: + fake_hbt = _FakeHbt() + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=5) + intent = _cancel_intent(client_order_id="cid-cancel-ok") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [] + assert fake_hbt.calls == [ + ("cancel", (5, _to_i64_order_id("cid-cancel-ok"), False)) + ] + + +def test_cancel_nonzero_result_code_returns_exchange_reject() -> None: + fake_hbt = _FakeHbt(result_codes={"cancel": 2}) + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=0) + intent = _cancel_intent(client_order_id="cid-cancel-reject") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [(intent, RejectReason.EXCHANGE_REJECT)] + + +def test_cancel_exception_returns_exchange_error() -> None: + fake_hbt = _FakeHbt(raise_on={"cancel"}) + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=0) + intent = _cancel_intent(client_order_id="cid-cancel-error") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [(intent, RejectReason.EXCHANGE_ERROR)] + + +def test_to_i64_order_id_numeric_and_deterministic_non_numeric_behavior() -> None: + assert _to_i64_order_id("42") == 42 + assert _to_i64_order_id(" 77 ") == 77 + + alpha_a = _to_i64_order_id("cid-alpha") + alpha_b = _to_i64_order_id("cid-alpha") + beta = _to_i64_order_id("cid-beta") + + assert alpha_a == alpha_b + assert alpha_a != beta + assert 0 <= alpha_a < (1 << 63) + assert 0 <= beta < (1 << 63) + + +def test_characterization_scope_excludes_feedback_source_and_fill_ingress_implications() -> None: + public_methods = { + name + for name, member in inspect.getmembers(HftBacktestExecutionAdapter) + if callable(member) and not name.startswith("_") + } + apply_intents_source = inspect.getsource(HftBacktestExecutionAdapter.apply_intents) + + assert "drain_execution_feedback_records" not in public_methods + assert "ExecutionFeedbackRecordSource" not in apply_intents_source + assert "FillEvent" not in apply_intents_source + diff --git a/tests/runtime/test_hftbacktest_execution_feedback_probe.py b/tests/runtime/test_hftbacktest_execution_feedback_probe.py new file mode 100644 index 0000000..bbd7869 --- /dev/null +++ b/tests/runtime/test_hftbacktest_execution_feedback_probe.py @@ -0,0 +1,215 @@ +"""Phase 4K probe: hftbacktest execution feedback feasibility surface.""" + +from __future__ import annotations + +import inspect +from dataclasses import dataclass +from typing import Any + +import pytest + +from core_runtime.backtest.adapters.execution import ( + HftBacktestExecutionAdapter, + _to_i64_order_id, +) +from core_runtime.backtest.adapters.venue import HftBacktestVenueAdapter +from core_runtime.backtest.engine.strategy_runner import HftStrategyRunner + +hftbacktest = pytest.importorskip("hftbacktest") +from hftbacktest import types as hbt_types # type: ignore # noqa: E402 +from hftbacktest.binding import ROIVectorMarketDepthBacktest # type: ignore # noqa: E402 + + +@dataclass(frozen=True) +class ProbeRow: + source: str + explicit_feedback_boundary: bool + authoritative_filled_price: bool + authoritative_cum_filled_qty: bool + authoritative_liquidity_flag: bool + deterministic_client_order_id_correlation: bool + deterministic_source_sequence: bool + batching_safe: bool + eligible_for_execution_feedback_record_source: bool + + +def _public_methods(cls: type[Any]) -> set[str]: + return { + name + for name, member in inspect.getmembers(cls) + if callable(member) and not name.startswith("_") + } + + +def test_probe_wrapper_surface_is_snapshot_only_for_rc3_branch() -> None: + venue_methods = _public_methods(HftBacktestVenueAdapter) + execution_methods = _public_methods(HftBacktestExecutionAdapter) + runner_source = inspect.getsource(HftStrategyRunner.run) + + assert "read_orders_snapshot" in venue_methods + assert "wait_next" in venue_methods + assert "apply_intents" in execution_methods + + # Probe fact: no adapter-facing execution feedback source is currently exposed. + assert "wait_order_response" not in venue_methods + assert "drain_execution_feedback_records" not in venue_methods + assert "drain_execution_feedback_records" not in execution_methods + + # Probe fact: strategy runner rc==3 branch is snapshot materialization. + assert "if rc == 3" in runner_source + assert "read_orders_snapshot()" in runner_source + assert "state_values, orders = venue.read_orders_snapshot()" in runner_source + assert "wait_order_response" not in runner_source + + +def test_probe_hftbacktest_binding_response_surface() -> None: + _ = hftbacktest + binding_methods = _public_methods(ROIVectorMarketDepthBacktest) + + assert "wait_next_feed" in binding_methods + assert "wait_order_response" in binding_methods + assert "orders" in binding_methods + assert "state_values" in binding_methods + assert "last_trades" in binding_methods + + # Probe fact: there is no direct structured feedback drain API. + assert "drain_execution_feedback_records" not in binding_methods + assert "execution_feedback_records" not in binding_methods + + +def test_probe_order_dtype_diagnostics_and_contract_gaps() -> None: + field_names = set(hbt_types.order_dtype.names or ()) + + # Snapshot/order fields currently visible through `orders()`. + assert {"order_id", "exec_price_tick", "exec_qty", "status", "maker"} <= field_names + + # Required for the conceptual source contract but not present on raw order dtype. + assert "client_order_id" not in field_names + assert "source_sequence" not in field_names + assert "liquidity_flag" not in field_names + + +def test_probe_wait_order_response_is_status_code_only_and_timeout_ambiguous() -> None: + source = inspect.getsource(ROIVectorMarketDepthBacktest.wait_order_response) + + assert "def wait_order_response" in source + assert "-> int64" in source + assert "reaches the timeout" in source + assert "Returns:" in source + assert "order response" in source + + # Probe fact: no structured payload object is returned from this method. + assert "dict" not in source + assert "payload" not in source + assert "record" not in source + + +def test_probe_wait_next_feed_response_signal_is_any_order_response_only() -> None: + source = inspect.getsource(ROIVectorMarketDepthBacktest.wait_next_feed) + + assert "include_order_resp" in source + assert "`3` when it receives an order response" in source + assert "any order response" in source + assert "source_sequence" not in source + assert "order_id" not in source + + +def test_probe_immediate_order_lookup_fields_and_missing_boundary_fields() -> None: + field_names = set(hbt_types.order_dtype.names or ()) + + # Immediate lookup provides current order state fields. + assert { + "order_id", + "status", + "req", + "exec_qty", + "exec_price_tick", + "maker", + "local_timestamp", + "exch_timestamp", + "leaves_qty", + } <= field_names + + # Missing record-boundary/correlation fields for source contract. + assert "client_order_id" not in field_names + assert "source_sequence" not in field_names + assert "explicit_update_kind" not in field_names + assert "response_sequence" not in field_names + assert "cum_filled_qty" not in field_names + + +def test_probe_client_order_id_correlation_is_one_way_without_reverse_map() -> None: + # Deterministic forward mapping exists. + assert _to_i64_order_id("cid-123") == _to_i64_order_id("cid-123") + assert _to_i64_order_id("42") == 42 + + adapter_fields = set(HftBacktestExecutionAdapter.__dataclass_fields__.keys()) + apply_intents_source = inspect.getsource(HftBacktestExecutionAdapter.apply_intents) + id_mapping_source = inspect.getsource(_to_i64_order_id) + + # Probe fact: adapter persists no reverse order_id -> client_order_id correlation map. + assert adapter_fields == {"hbt", "asset_no"} + assert "blake2b" in id_mapping_source + assert "_to_i64_order_id(intent.client_order_id)" in apply_intents_source + assert "reverse" not in apply_intents_source + assert "mapping" not in apply_intents_source + + +def test_probe_wait_order_response_plus_immediate_lookup_candidate_stays_ineligible() -> None: + row = ProbeRow( + source="L: wait_order_response + immediate orders().get(order_id)", + explicit_feedback_boundary=False, + authoritative_filled_price=False, + authoritative_cum_filled_qty=False, + authoritative_liquidity_flag=False, + deterministic_client_order_id_correlation=False, + deterministic_source_sequence=False, + batching_safe=False, + eligible_for_execution_feedback_record_source=False, + ) + + assert row.eligible_for_execution_feedback_record_source is False + assert row.explicit_feedback_boundary is False + assert row.deterministic_source_sequence is False + assert row.deterministic_client_order_id_correlation is False + + +def test_probe_contract_matrix_for_candidates_a_b_c() -> None: + matrix = [ + ProbeRow( + source="A: direct structured order-response channel", + explicit_feedback_boundary=False, + authoritative_filled_price=False, + authoritative_cum_filled_qty=False, + authoritative_liquidity_flag=False, + deterministic_client_order_id_correlation=False, + deterministic_source_sequence=False, + batching_safe=False, + eligible_for_execution_feedback_record_source=False, + ), + ProbeRow( + source="B: rc==3 wakeup + immediate structured lookup", + explicit_feedback_boundary=False, + authoritative_filled_price=False, + authoritative_cum_filled_qty=False, + authoritative_liquidity_flag=False, + deterministic_client_order_id_correlation=False, + deterministic_source_sequence=False, + batching_safe=False, + eligible_for_execution_feedback_record_source=False, + ), + ProbeRow( + source="C: snapshot deltas (diagnostic only)", + explicit_feedback_boundary=False, + authoritative_filled_price=False, + authoritative_cum_filled_qty=False, + authoritative_liquidity_flag=False, + deterministic_client_order_id_correlation=False, + deterministic_source_sequence=False, + batching_safe=False, + eligible_for_execution_feedback_record_source=False, + ), + ] + + assert all(not row.eligible_for_execution_feedback_record_source for row in matrix) + assert all(not row.explicit_feedback_boundary for row in matrix) diff --git a/tests/runtime/test_import_compatibility_shim.py b/tests/runtime/test_import_compatibility_shim.py new file mode 100644 index 0000000..0b619f8 --- /dev/null +++ b/tests/runtime/test_import_compatibility_shim.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +import warnings + + +def test_nested_runtime_modules_share_identity_across_import_sites() -> None: + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + import core_runtime.backtest.engine.strategy_runner as old_strategy_runner + import core_runtime.strategies.debug_strategy as old_debug_strategy + + import core_runtime.backtest.engine.strategy_runner as new_strategy_runner + import core_runtime.strategies.debug_strategy as new_debug_strategy + + assert old_strategy_runner is new_strategy_runner + assert old_debug_strategy is new_debug_strategy + + +def test_runtime_symbols_share_identity_across_import_sites() -> None: + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + from core_runtime.backtest.engine.strategy_runner import HftStrategyRunner as OldRunner + from core_runtime.strategies.debug_strategy import DebugStrategyV1 as OldStrategy + + from core_runtime.backtest.engine.strategy_runner import HftStrategyRunner as NewRunner + from core_runtime.strategies.debug_strategy import DebugStrategyV1 as NewStrategy + + assert OldRunner is NewRunner + assert OldStrategy is NewStrategy diff --git a/tests/runtime/test_runtime_core_configuration_integration.py b/tests/runtime/test_runtime_core_configuration_integration.py new file mode 100644 index 0000000..f9a4ee2 --- /dev/null +++ b/tests/runtime/test_runtime_core_configuration_integration.py @@ -0,0 +1,213 @@ +from __future__ import annotations + +import json +import sys +import types +from pathlib import Path + +import pytest +from tradingchassis_core.core.domain.configuration import CoreConfiguration + +from core_runtime.local.backtest import load_config + + +def _repo_root() -> Path: + return Path(__file__).resolve().parents[2] + + +def _load_sample_config(path: Path) -> dict[str, object]: + return json.loads(path.read_text(encoding="utf-8")) + + +def _install_oci_stubs(monkeypatch: pytest.MonkeyPatch) -> None: + oci_mod = types.ModuleType("oci") + auth_mod = types.ModuleType("oci.auth") + signers_mod = types.ModuleType("oci.auth.signers") + config_mod = types.ModuleType("oci.config") + object_storage_mod = types.ModuleType("oci.object_storage") + signer_mod = types.ModuleType("oci.signer") + + class _InstancePrincipalsSecurityTokenSigner: # pragma: no cover - stub only + pass + + class _ObjectStorageClient: # pragma: no cover - stub only + pass + + class _Signer: # pragma: no cover - stub only + pass + + def _from_file(*, file_location: str, profile_name: str) -> dict[str, object]: + _ = (file_location, profile_name) + return {} + + signers_mod.InstancePrincipalsSecurityTokenSigner = _InstancePrincipalsSecurityTokenSigner + config_mod.from_file = _from_file + object_storage_mod.ObjectStorageClient = _ObjectStorageClient + signer_mod.Signer = _Signer + + monkeypatch.setitem(sys.modules, "oci", oci_mod) + monkeypatch.setitem(sys.modules, "oci.auth", auth_mod) + monkeypatch.setitem(sys.modules, "oci.auth.signers", signers_mod) + monkeypatch.setitem(sys.modules, "oci.config", config_mod) + monkeypatch.setitem(sys.modules, "oci.object_storage", object_storage_mod) + monkeypatch.setitem(sys.modules, "oci.signer", signer_mod) + + +def test_local_loader_fails_early_when_core_missing(tmp_path: Path) -> None: + sample_path = _repo_root() / "core_runtime/local/local.json" + config = _load_sample_config(sample_path) + config.pop("core", None) + + test_path = tmp_path / "missing-core.json" + test_path.write_text(json.dumps(config), encoding="utf-8") + + with pytest.raises(ValueError, match="Missing required top-level section: core"): + load_config(str(test_path)) + + +def test_local_loader_succeeds_with_valid_core() -> None: + sample_path = _repo_root() / "core_runtime/local/local.json" + cfg = load_config(str(sample_path)) + + assert isinstance(cfg.core_cfg, CoreConfiguration) + assert cfg.core_cfg.version == "v1" + + +def test_argo_entrypoint_rejects_invalid_run_config_before_planning( + tmp_path: Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + _install_oci_stubs(monkeypatch) + + from core_runtime.backtest.runtime.entrypoint import main as argo_entrypoint_main + + sample_path = _repo_root() / "core_runtime/argo/argo.json" + config = _load_sample_config(sample_path) + config.pop("core", None) + + config_path = tmp_path / "argo-missing-core.json" + config_path.write_text(json.dumps(config), encoding="utf-8") + + monkeypatch.setattr( + sys, + "argv", + [ + "entrypoint.py", + "--config", + str(config_path), + "--plan", + ], + ) + + with pytest.raises(ValueError, match="Missing required top-level section: core"): + argo_entrypoint_main() + + +def test_argo_sweep_worker_rejects_context_missing_core( + tmp_path: Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + _install_oci_stubs(monkeypatch) + + from core_runtime.backtest.runtime.run_sweep import main as run_sweep_main + + context = { + "experiment_id": "exp-1", + "segment_id": "seg-1", + "sweep_id": "sweep-1", + "stage": "derived", + "venue": "deribit", + "datatype": "mixed", + "symbol": "BTC_USDC-PERPETUAL", + "file_keys": [], + "parameters": { + "engine": { + "instrument": "BTC_USDC-PERPETUAL", + }, + "strategy": {}, + "risk": {}, + }, + "scratch_root": str(tmp_path / "scratch"), + "results_root": str(tmp_path / "results"), + } + + context_path = tmp_path / "context.json" + context_path.write_text(json.dumps(context), encoding="utf-8") + + monkeypatch.setattr( + sys, + "argv", + [ + "run_sweep.py", + "--context", + str(context_path), + "--scratch-root", + str(tmp_path / "scratch"), + ], + ) + + with pytest.raises(ValueError, match="Missing required top-level section: core"): + run_sweep_main() + + +def test_argo_emit_includes_core_section_in_sweep_context( + tmp_path: Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + _install_oci_stubs(monkeypatch) + + from core_runtime.backtest.orchestrator.planner_models import ( + ExperimentPlan, + SegmentPlan, + ) + from core_runtime.backtest.orchestrator.sweeps import SweepPlan + from core_runtime.backtest.runtime.entrypoint import _emit_sweep_context + + plan = ExperimentPlan( + experiment_id="exp-1", + segments=[ + SegmentPlan( + segment_id="seg-1", + start_ts_ns=1, + end_ts_ns=2, + estimated_bytes=123, + files=["file-1.npz"], + sweeps=[SweepPlan(sweep_id="sweep-0000", parameters={})], + ) + ], + ) + base_cfg = { + "experiment": { + "venue": "deribit", + "datatype": "mixed", + "symbol": "BTC_USDC-PERPETUAL", + }, + "engine": {"instrument": "BTC_USDC-PERPETUAL"}, + "strategy": {"class_path": "x:y"}, + "risk": {"scope": "s", "notional_limits": {"currency": "USDC", "max_gross_notional": 1.0}}, + "core": { + "version": "v1", + "market": { + "instruments": { + "BTC_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1.0, + } + } + }, + }, + } + + _emit_sweep_context( + plan=plan, + base_cfg=base_cfg, + scratch_root=tmp_path / "scratch", + results_root=tmp_path / "results", + out_dir=tmp_path / "emit", + ) + + emitted = json.loads( + (tmp_path / "emit" / "seg-1__sweep-0000.json").read_text(encoding="utf-8") + ) + assert emitted["parameters"]["core"] == base_cfg["core"] diff --git a/tests/runtime/test_strategy_runner_canonical_market_adoption.py b/tests/runtime/test_strategy_runner_canonical_market_adoption.py new file mode 100644 index 0000000..d3b315d --- /dev/null +++ b/tests/runtime/test_strategy_runner_canonical_market_adoption.py @@ -0,0 +1,1089 @@ +from __future__ import annotations + +from collections import deque +from types import SimpleNamespace +from typing import Any + +import pytest +from tradingchassis_core.core.domain.configuration import CoreConfiguration +from tradingchassis_core.core.domain.state import StrategyState +from tradingchassis_core.core.domain.types import ( + BookLevel, + BookPayload, + CancelOrderIntent, + ControlTimeEvent, + FillEvent, + MarketEvent, + NewOrderIntent, + OrderSubmittedEvent, + Price, + Quantity, + ReplaceOrderIntent, +) +from tradingchassis_core.core.events.event_bus import EventBus +from tradingchassis_core.core.risk.risk_config import RiskConfig +from tradingchassis_core.core.risk.risk_engine import GateDecision +from tradingchassis_core.strategies.base import Strategy + +import core_runtime.backtest.engine.strategy_runner as strategy_runner_module +from core_runtime.backtest.engine.event_stream_cursor import EventStreamCursor +from core_runtime.backtest.engine.hft_engine import HftEngineConfig +from core_runtime.backtest.engine.strategy_runner import ( + MAX_TIMEOUT_NS, + HftStrategyRunner, +) + + +class _NoopStrategy(Strategy): + def on_feed(self, state: Any, event: Any, engine_cfg: Any, constraints: Any) -> list[Any]: + _ = (state, event, engine_cfg, constraints) + return [] + + def on_order_update(self, state: Any, engine_cfg: Any, constraints: Any) -> list[Any]: + _ = (state, engine_cfg, constraints) + return [] + + def on_risk_decision(self, decision: Any) -> None: + _ = decision + + +class _NoopExecution: + def apply_intents(self, intents: list[Any]) -> list[tuple[Any, str]]: + _ = intents + return [] + + +class _RecorderWrapper: + recorder: Any + + def __init__(self) -> None: + self.recorder = SimpleNamespace(record=lambda _hbt: None) + + +class _StubVenue: + def __init__( + self, + *, + rc_sequence: list[int], + ts_sequence: list[int], + depth: object | None = None, + state_values: object | None = None, + orders: object | None = None, + ) -> None: + self._rc = list(rc_sequence) + self._ts = list(ts_sequence) + self._depth = depth + self._state_values = state_values + self._orders = orders + self._current_ts = 0 + self.wait_calls: list[tuple[int, bool]] = [] + + def wait_next(self, *, timeout_ns: int, include_order_resp: bool) -> int: + self.wait_calls.append((timeout_ns, include_order_resp)) + self._current_ts = self._ts.pop(0) + return self._rc.pop(0) + + def current_timestamp_ns(self) -> int: + return self._current_ts + + def read_market_snapshot(self) -> object: + return self._depth + + def read_orders_snapshot(self) -> tuple[object, object]: + return self._state_values, self._orders + + def record(self, recorder: Any) -> None: + recorder.recorder.record(self) + + +def _core_cfg() -> CoreConfiguration: + return CoreConfiguration( + version="v1", + payload={ + "market": { + "instruments": { + "BTC_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1.0, + } + } + } + }, + ) + + +def _engine_cfg() -> HftEngineConfig: + return HftEngineConfig( + initial_snapshot=None, + data_files=[], + instrument="BTC_USDC-PERPETUAL", + tick_size=0.1, + lot_size=0.01, + contract_size=1.0, + maker_fee_rate=0.0, + taker_fee_rate=0.0, + entry_latency_ns=0, + response_latency_ns=0, + use_risk_adverse_queue_model=False, + partial_fill_venue=False, + max_steps=1, + last_trades_capacity=1, + max_price_tick_levels=1, + roi_lb=0, + roi_ub=1, + stats_npz_path="/tmp/stats.npz", + event_bus_path="/tmp/events.jsonl", + ) + + +def _risk_cfg() -> RiskConfig: + return RiskConfig( + scope="test", + notional_limits={"currency": "USDC", "max_gross_notional": 1.0}, + ) + + +def _market_event(ts_ns: int) -> MarketEvent: + return MarketEvent( + ts_ns_exch=ts_ns, + ts_ns_local=ts_ns, + instrument="BTC_USDC-PERPETUAL", + event_type="book", + book=BookPayload( + book_type="snapshot", + bids=[ + BookLevel( + price=Price(currency="UNKNOWN", value=100.0), + quantity=Quantity(value=1.0, unit="contracts"), + ) + ], + asks=[ + BookLevel( + price=Price(currency="UNKNOWN", value=101.0), + quantity=Quantity(value=1.0, unit="contracts"), + ) + ], + depth=1, + ), + ) + + +def _depth_snapshot() -> object: + return SimpleNamespace( + roi_lb_tick=100, + tick_size=0.1, + best_ask_tick=101, + best_bid_tick=100, + ask_depth=[1.0, 0.0], + bid_depth=[1.0, 0.0], + best_bid=100.0, + best_ask=101.0, + best_bid_qty=1.0, + best_ask_qty=1.0, + ) + + +def _new_intent(ts_ns_local: int = 2) -> NewOrderIntent: + return NewOrderIntent( + ts_ns_local=ts_ns_local, + instrument="BTC_USDC-PERPETUAL", + client_order_id="cid-new-1", + intents_correlation_id="corr-new-1", + side="buy", + order_type="limit", + intended_qty=Quantity(value=1.0, unit="contracts"), + intended_price=Price(currency="USDC", value=100.0), + time_in_force="GTC", + ) + + +def _replace_intent(ts_ns_local: int = 2) -> ReplaceOrderIntent: + return ReplaceOrderIntent( + ts_ns_local=ts_ns_local, + instrument="BTC_USDC-PERPETUAL", + client_order_id="cid-existing-1", + intents_correlation_id="corr-replace-1", + side="buy", + order_type="limit", + intended_qty=Quantity(value=2.0, unit="contracts"), + intended_price=Price(currency="USDC", value=101.0), + ) + + +def _cancel_intent(ts_ns_local: int = 2) -> CancelOrderIntent: + return CancelOrderIntent( + ts_ns_local=ts_ns_local, + instrument="BTC_USDC-PERPETUAL", + client_order_id="cid-existing-1", + intents_correlation_id="corr-cancel-1", + ) + + +class _EmitIntentsStrategy(Strategy): + def __init__(self, intents: list[object]) -> None: + self._intents = intents + + def on_feed(self, state: Any, event: Any, engine_cfg: Any, constraints: Any) -> list[Any]: + _ = (state, event, engine_cfg, constraints) + return list(self._intents) + + def on_order_update(self, state: Any, engine_cfg: Any, constraints: Any) -> list[Any]: + _ = (state, engine_cfg, constraints) + return [] + + def on_risk_decision(self, decision: Any) -> None: + _ = decision + + +def _decision_for(accepted_now: list[Any]) -> GateDecision: + return GateDecision( + ts_ns_local=2, + accepted_now=accepted_now, + queued=[], + rejected=[], + replaced_in_queue=[], + dropped_in_queue=[], + handled_in_queue=[], + execution_rejected=[], + next_send_ts_ns_local=None, + ) + + +def test_process_market_event_routes_through_event_entry_with_core_configuration( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = object.__new__(HftStrategyRunner) + runner.strategy_state = object() + runner._core_cfg = _core_cfg() + runner._event_stream_cursor = EventStreamCursor() + + captured: list[tuple[int, object]] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = state + captured.append((entry.position.index, configuration)) + + monkeypatch.setattr( + strategy_runner_module, + "process_event_entry", + _spy_process_event_entry, + ) + + runner._process_canonical_market_event(_market_event(1)) + runner._process_canonical_market_event(_market_event(2)) + + assert [idx for idx, _ in captured] == [0, 1] + assert captured[0][1] is runner._core_cfg + assert captured[1][1] is runner._core_cfg + assert runner._event_stream_cursor.next_index == 2 + + +def test_first_canonical_event_uses_processing_position_zero( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = object.__new__(HftStrategyRunner) + runner.strategy_state = object() + runner._core_cfg = _core_cfg() + runner._event_stream_cursor = EventStreamCursor() + + captured: list[int] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = state + assert configuration is runner._core_cfg + captured.append(entry.position.index) + + monkeypatch.setattr( + strategy_runner_module, + "process_event_entry", + _spy_process_event_entry, + ) + + runner._process_canonical_market_event(_market_event(1)) + + assert captured == [0] + assert runner._event_stream_cursor.next_index == 1 + + +def test_market_branch_calls_canonical_boundary_not_update_market( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + monkeypatch.setattr( + runner.strategy_state, + "update_market", + lambda *args, **kwargs: (_ for _ in ()).throw(AssertionError("update_market must not be called")), + ) + monkeypatch.setattr( + runner.strategy_state, + "apply_fill_event", + lambda *args, **kwargs: (_ for _ in ()).throw(AssertionError("apply_fill_event must not be called")), + ) + + captured: list[tuple[int, object]] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = state + captured.append((entry.position.index, configuration)) + + monkeypatch.setattr( + strategy_runner_module, + "process_event_entry", + _spy_process_event_entry, + ) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1, 2, 3], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert captured == [(0, runner._core_cfg)] + + +def test_wait_next_bootstrap_uses_include_order_resp_false_then_true_in_loop() -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1, 2, 3], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert len(venue.wait_calls) >= 2 + first_timeout_ns, first_include_order_resp = venue.wait_calls[0] + assert first_timeout_ns == MAX_TIMEOUT_NS + assert first_include_order_resp is False + assert all(include_order_resp is True for _, include_order_resp in venue.wait_calls[1:]) + + +def test_market_mapping_from_depth_snapshot_is_deterministic_golden( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + + captured_market_events: list[MarketEvent] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = (state, configuration) + if isinstance(entry.event, MarketEvent): + captured_market_events.append(entry.event) + + monkeypatch.setattr( + strategy_runner_module, + "process_event_entry", + _spy_process_event_entry, + ) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1, 2_000_000_000, 2_000_000_001], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert len(captured_market_events) == 1 + market_event = captured_market_events[0] + assert market_event.instrument == "BTC_USDC-PERPETUAL" + assert market_event.ts_ns_local == 2_000_000_000 + assert market_event.ts_ns_exch == 2_000_000_000 + assert market_event.book is not None + assert market_event.book.bids[0].price.value == 10.0 + assert market_event.book.asks[0].price.value == 10.100000000000001 + assert market_event.book.bids[0].quantity.value == 1.0 + assert market_event.book.asks[0].quantity.value == 0.0 + + +def test_missing_core_cfg_fails_before_market_mutation() -> None: + runner = object.__new__(HftStrategyRunner) + runner.strategy_state = StrategyState(event_bus=EventBus(sinks=[])) + runner._core_cfg = None + runner._event_stream_cursor = EventStreamCursor() + + with pytest.raises(ValueError, match="CoreConfiguration is required"): + runner._process_canonical_market_event(_market_event(42)) + + assert runner.strategy_state.market == {} + assert runner.strategy_state._last_processing_position_index is None + assert runner._event_stream_cursor.next_index == 0 + + +def test_invalid_core_cfg_type_fails_before_market_mutation() -> None: + runner = object.__new__(HftStrategyRunner) + runner.strategy_state = StrategyState(event_bus=EventBus(sinks=[])) + runner._core_cfg = object() + runner._event_stream_cursor = EventStreamCursor() + + with pytest.raises(TypeError, match="configuration must be CoreConfiguration or None"): + runner._process_canonical_market_event(_market_event(42)) + + assert runner.strategy_state.market == {} + assert runner.strategy_state._last_processing_position_index is None + assert runner._event_stream_cursor.next_index == 0 + + +def test_order_snapshot_branch_keeps_compatibility_path( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + monkeypatch.setattr( + runner.strategy_state, + "apply_fill_event", + lambda *args, **kwargs: (_ for _ in ()).throw(AssertionError("apply_fill_event must not be called")), + ) + + calls = {"update_account": 0, "ingest_order_snapshots": 0} + + def _spy_update_account(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["update_account"] += 1 + + def _spy_ingest_order_snapshots(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["ingest_order_snapshots"] += 1 + + monkeypatch.setattr(runner.strategy_state, "update_account", _spy_update_account) + monkeypatch.setattr( + runner.strategy_state, + "ingest_order_snapshots", + _spy_ingest_order_snapshots, + ) + + venue = _StubVenue( + rc_sequence=[0, 3, 1], + ts_sequence=[1, 2, 3], + state_values=SimpleNamespace( + position=0.0, + balance=1000.0, + fee=0.0, + trading_volume=0.0, + trading_value=0.0, + num_trades=0, + ), + orders={}, + ) + + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert calls["update_account"] == 1 + assert calls["ingest_order_snapshots"] == 1 + assert runner._event_stream_cursor.next_index == 0 + + +def test_snapshot_only_rc3_does_not_consume_canonical_cursor_position( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + + calls = {"update_account": 0, "ingest_order_snapshots": 0, "canonical": 0} + + def _spy_update_account(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["update_account"] += 1 + + def _spy_ingest_order_snapshots(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["ingest_order_snapshots"] += 1 + + def _spy_process_event_entry(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["canonical"] += 1 + + monkeypatch.setattr(runner.strategy_state, "update_account", _spy_update_account) + monkeypatch.setattr( + runner.strategy_state, + "ingest_order_snapshots", + _spy_ingest_order_snapshots, + ) + monkeypatch.setattr( + strategy_runner_module, + "process_event_entry", + _spy_process_event_entry, + ) + + venue = _StubVenue( + rc_sequence=[0, 3, 1], + ts_sequence=[1, 2, 3], + state_values=SimpleNamespace( + position=0.0, + balance=1000.0, + fee=0.0, + trading_volume=0.0, + trading_value=0.0, + num_trades=0, + ), + orders={}, + ) + + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert calls == { + "update_account": 1, + "ingest_order_snapshots": 1, + "canonical": 0, + } + assert runner._event_stream_cursor.next_index == 0 + + +def test_rc2_rc3_paths_never_emit_fill_event_through_process_event_entry( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + + calls = {"update_account": 0, "ingest_order_snapshots": 0} + emitted_fill_events = 0 + + def _spy_update_account(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["update_account"] += 1 + + def _spy_ingest_order_snapshots(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["ingest_order_snapshots"] += 1 + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + nonlocal emitted_fill_events + _ = (state, configuration) + if isinstance(entry.event, FillEvent): + emitted_fill_events += 1 + + monkeypatch.setattr(runner.strategy_state, "update_account", _spy_update_account) + monkeypatch.setattr( + runner.strategy_state, + "ingest_order_snapshots", + _spy_ingest_order_snapshots, + ) + monkeypatch.setattr( + strategy_runner_module, + "process_event_entry", + _spy_process_event_entry, + ) + + venue = _StubVenue( + rc_sequence=[0, 2, 3, 1], + ts_sequence=[1, 2, 3, 4], + depth=_depth_snapshot(), + state_values=SimpleNamespace( + position=0.0, + balance=1000.0, + fee=0.0, + trading_volume=0.0, + trading_value=0.0, + num_trades=0, + ), + orders={}, + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert emitted_fill_events == 0 + assert calls["update_account"] == 1 + assert calls["ingest_order_snapshots"] == 1 + + +def test_successful_new_dispatch_processes_order_submitted_before_mark_sent( + monkeypatch: pytest.MonkeyPatch, +) -> None: + new_intent = _new_intent() + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_EmitIntentsStrategy([new_intent]), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + monkeypatch.setattr(runner.strategy_state, "apply_fill_event", lambda *args, **kwargs: None) + monkeypatch.setattr( + runner.risk, + "decide_intents", + lambda **_: _decision_for([new_intent]), + ) + + ordering: list[str] = [] + submitted_events: list[OrderSubmittedEvent] = [] + marks: list[tuple[str, str, str]] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = (state, configuration) + if isinstance(entry.event, OrderSubmittedEvent): + ordering.append("submitted") + submitted_events.append(entry.event) + + def _spy_mark_intent_sent(instrument: str, client_order_id: str, intent_type: str) -> None: + ordering.append("mark") + marks.append((instrument, client_order_id, intent_type)) + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + monkeypatch.setattr(runner.strategy_state, "mark_intent_sent", _spy_mark_intent_sent) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1_111, 5_000_000_000, 5_000_000_001], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert len(submitted_events) == 1 + event = submitted_events[0] + assert event.instrument == new_intent.instrument + assert event.client_order_id == new_intent.client_order_id + assert event.side == new_intent.side + assert event.order_type == new_intent.order_type + assert event.intended_price == new_intent.intended_price + assert event.intended_qty == new_intent.intended_qty + assert event.time_in_force == new_intent.time_in_force + assert event.intent_correlation_id == new_intent.intents_correlation_id + assert event.dispatch_attempt_id is None + assert event.runtime_correlation is None + assert event.ts_ns_local_dispatch == 5_000_000_000 + assert ordering == ["submitted", "mark"] + assert marks == [(new_intent.instrument, new_intent.client_order_id, "new")] + + +def test_failed_new_dispatch_processes_no_order_submitted_event( + monkeypatch: pytest.MonkeyPatch, +) -> None: + new_intent = _new_intent() + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_EmitIntentsStrategy([new_intent]), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + monkeypatch.setattr( + runner.risk, + "decide_intents", + lambda **_: _decision_for([new_intent]), + ) + + submitted_event_count = 0 + marked_count = 0 + captured_decisions: list[GateDecision] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + nonlocal submitted_event_count + _ = (state, configuration) + if isinstance(entry.event, OrderSubmittedEvent): + submitted_event_count += 1 + + def _spy_mark_intent_sent(instrument: str, client_order_id: str, intent_type: str) -> None: + nonlocal marked_count + _ = (instrument, client_order_id, intent_type) + marked_count += 1 + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + monkeypatch.setattr(runner.strategy_state, "mark_intent_sent", _spy_mark_intent_sent) + monkeypatch.setattr( + runner.strategy, + "on_risk_decision", + lambda decision: captured_decisions.append(decision), + ) + + class _ExecutionFailNew: + def apply_intents(self, intents: list[Any]) -> list[tuple[Any, str]]: + _ = intents + return [(new_intent, "EXCHANGE_REJECT")] + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[10, 20, 30], + depth=_depth_snapshot(), + ) + runner.run( + venue=venue, + execution=_ExecutionFailNew(), + recorder=_RecorderWrapper(), + ) + + assert submitted_event_count == 0 + assert marked_count == 0 + assert len(captured_decisions) == 1 + assert len(captured_decisions[0].execution_rejected) == 1 + assert captured_decisions[0].execution_rejected[0].intent.client_order_id == new_intent.client_order_id + + +def test_successful_replace_cancel_dispatch_processes_no_order_submitted_event( + monkeypatch: pytest.MonkeyPatch, +) -> None: + replace_intent = _replace_intent() + cancel_intent = _cancel_intent() + accepted_now = [replace_intent, cancel_intent] + + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_EmitIntentsStrategy(accepted_now), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + monkeypatch.setattr( + runner.risk, + "decide_intents", + lambda **_: _decision_for(accepted_now), + ) + + submitted_event_count = 0 + marks: list[tuple[str, str, str]] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + nonlocal submitted_event_count + _ = (state, configuration) + if isinstance(entry.event, OrderSubmittedEvent): + submitted_event_count += 1 + + def _spy_mark_intent_sent(instrument: str, client_order_id: str, intent_type: str) -> None: + marks.append((instrument, client_order_id, intent_type)) + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + monkeypatch.setattr(runner.strategy_state, "mark_intent_sent", _spy_mark_intent_sent) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[100, 200, 300], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert submitted_event_count == 0 + assert marks == [ + ( + replace_intent.instrument, + replace_intent.client_order_id, + "replace", + ), + ( + cancel_intent.instrument, + cancel_intent.client_order_id, + "cancel", + ), + ] + + +def test_global_canonical_counter_shared_between_market_and_order_submitted( + monkeypatch: pytest.MonkeyPatch, +) -> None: + new_intent = _new_intent() + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_EmitIntentsStrategy([new_intent]), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + monkeypatch.setattr( + runner.risk, + "decide_intents", + lambda **_: _decision_for([new_intent]), + ) + + positions: list[tuple[int, str]] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = (state, configuration) + event_name = type(entry.event).__name__ + positions.append((entry.position.index, event_name)) + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[7, 9_999_999_999, 10_000_000_000], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert positions == [ + (0, "MarketEvent"), + (1, "OrderSubmittedEvent"), + ] + assert runner._event_stream_cursor.next_index == 2 + + +def test_canonical_counter_increments_only_after_successful_canonical_processing( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = object.__new__(HftStrategyRunner) + runner.strategy_state = object() + runner._core_cfg = _core_cfg() + runner._event_stream_cursor = EventStreamCursor() + + def _fail(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + raise RuntimeError("boom") + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _fail) + with pytest.raises(RuntimeError, match="boom"): + runner._process_canonical_market_event(_market_event(1)) + assert runner._event_stream_cursor.next_index == 0 + + called = {"count": 0} + + def _ok(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + called["count"] += 1 + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _ok) + runner._process_canonical_market_event(_market_event(2)) + assert called["count"] == 1 + assert runner._event_stream_cursor.next_index == 1 + + +def test_control_time_event_injected_when_scheduled_deadline_is_realized( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + runner._next_send_ts_ns_local = 5 + + control_events: list[ControlTimeEvent] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = (state, configuration) + if isinstance(entry.event, ControlTimeEvent): + control_events.append(entry.event) + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + venue = _StubVenue( + rc_sequence=[0, 0, 1], + ts_sequence=[1, 10, 11], + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert len(control_events) == 1 + event = control_events[0] + assert event.ts_ns_local_control == 10 + assert event.reason == "scheduled_control_recheck" + assert event.due_ts_ns_local == 5 + assert event.realized_ts_ns_local == 10 + assert event.obligation_reason == "rate_limit" + assert event.obligation_due_ts_ns_local == 5 + assert event.runtime_correlation is None + + +def test_no_control_time_event_when_no_deadline_scheduled( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + control_count = 0 + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + nonlocal control_count + _ = (state, configuration) + if isinstance(entry.event, ControlTimeEvent): + control_count += 1 + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1, 2, 3], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert control_count == 0 + + +def test_no_control_time_event_when_deadline_not_yet_realized( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + runner._next_send_ts_ns_local = 50 + control_count = 0 + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + nonlocal control_count + _ = (state, configuration) + if isinstance(entry.event, ControlTimeEvent): + control_count += 1 + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + venue = _StubVenue( + rc_sequence=[0, 0, 1], + ts_sequence=[1, 10, 20], + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert control_count == 0 + + +def test_control_time_deadline_injection_is_not_periodic_for_same_deadline( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + runner._next_send_ts_ns_local = 5 + control_count = 0 + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + nonlocal control_count + _ = (state, configuration) + if isinstance(entry.event, ControlTimeEvent): + control_count += 1 + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + venue = _StubVenue( + rc_sequence=[0, 0, 0, 1], + ts_sequence=[1, 10, 10, 11], + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert control_count == 1 + + +def test_control_time_event_processed_after_pop_and_before_gate( + monkeypatch: pytest.MonkeyPatch, +) -> None: + queued_intent = _new_intent() + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + runner._next_send_ts_ns_local = 5 + + ordering: list[str] = [] + captured_raw_inputs: list[list[Any]] = [] + + def _spy_pop_queued_intents(instrument: str) -> list[Any]: + _ = instrument + ordering.append("pop") + return [queued_intent] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = (state, configuration) + if isinstance(entry.event, ControlTimeEvent): + ordering.append("control") + + def _spy_decide_intents(**kwargs: Any) -> GateDecision: + ordering.append("gate") + captured_raw_inputs.append(list(kwargs["raw_intents"])) + return _decision_for([]) + + monkeypatch.setattr(runner.strategy_state, "pop_queued_intents", _spy_pop_queued_intents) + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + monkeypatch.setattr(runner.risk, "decide_intents", _spy_decide_intents) + + venue = _StubVenue( + rc_sequence=[0, 0, 1], + ts_sequence=[1, 10, 11], + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert ordering == ["pop", "control", "gate"] + assert len(captured_raw_inputs) == 1 + assert [it.client_order_id for it in captured_raw_inputs[0]] == [queued_intent.client_order_id] + + +def test_global_canonical_counter_shared_with_control_time_market_and_submitted( + monkeypatch: pytest.MonkeyPatch, +) -> None: + new_intent = _new_intent() + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_EmitIntentsStrategy([new_intent]), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + runner._next_send_ts_ns_local = 5 + + monkeypatch.setattr( + runner.risk, + "decide_intents", + lambda **_: _decision_for([new_intent]), + ) + + positions: list[tuple[int, str]] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = (state, configuration) + positions.append((entry.position.index, type(entry.event).__name__)) + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1, 10, 11], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert positions == [ + (0, "MarketEvent"), + (1, "ControlTimeEvent"), + (2, "OrderSubmittedEvent"), + ] + assert runner._event_stream_cursor.next_index == 3 + + +def test_fallback_second_boundary_wakeup_behavior_unchanged( + monkeypatch: pytest.MonkeyPatch, +) -> None: + intent = _new_intent() + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_EmitIntentsStrategy([intent]), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + runner.strategy_state.queued_intents.setdefault(runner.engine_cfg.instrument, deque()) + runner.strategy_state.queued_intents[runner.engine_cfg.instrument].append( + SimpleNamespace(intent=intent) + ) + + monkeypatch.setattr(runner.risk, "decide_intents", lambda **_: _decision_for([])) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1, 2_000_000_000, 2_000_000_001], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert runner._next_send_ts_ns_local == 3_000_000_000 diff --git a/tests/test_dummy.py b/tests/test_dummy.py index 13a78be..cbb29c5 100644 --- a/tests/test_dummy.py +++ b/tests/test_dummy.py @@ -27,10 +27,10 @@ def test_exception(): def test_package_import(): """ - Optional: verify that trading_runtime can be imported. + Optional: verify that core_runtime can be imported. Remove if not needed. """ try: - import trading_runtime # noqa: F401 + import core_runtime # noqa: F401 except ImportError as exc: - pytest.fail(f"Failed to import trading_runtime: {exc}") + pytest.fail(f"Failed to import runtime package: {exc}")