From 2de07b8e21148cb81bfcb1e30ba9cd64d884a3d2 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Wed, 29 Apr 2026 14:24:37 +0200 Subject: [PATCH 01/36] fix --- .devcontainer/devcontainer.json | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index bf21090..6415165 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,18 +1,17 @@ { - "name": "Trading Runtime Dev", + "name": "TradingChassis Core Runtime Dev", "build": { "dockerfile": "Dockerfile", "context": ".." }, - "postCreateCommand": "./scripts/post-create.sh", + "workspaceFolder": "/workspaces/core-runtime", + "remoteUser": "root", + "runArgs": ["--security-opt=label=disable"], + "containerEnv": { + "SHELL": "/bin/bash" + }, + + "postCreateCommand": "./scripts/post-create.sh" - "customizations": { - "vscode": { - "extensions": [ - "ms-python.python", - "ms-python.debugpy" - ] - } - } -} \ No newline at end of file +} From d15f4db37b59653542d54866f289438f989cc2fc Mon Sep 17 00:00:00 2001 From: bxvtr Date: Wed, 29 Apr 2026 14:25:42 +0200 Subject: [PATCH 02/36] fix --- trading_runtime/local/backtest.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/trading_runtime/local/backtest.py b/trading_runtime/local/backtest.py index e978919..1116a31 100644 --- a/trading_runtime/local/backtest.py +++ b/trading_runtime/local/backtest.py @@ -8,14 +8,14 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from trading_framework import BacktestResult + from trading_runtime.backtest.engine.engine_base import BacktestResult -from trading_framework import ( +from trading_framework.core.risk.risk_config import RiskConfig +from trading_framework.strategies.strategy_config import StrategyConfig +from trading_runtime.backtest.engine.hft_engine import ( HftBacktestConfig, HftBacktestEngine, HftEngineConfig, - RiskConfig, - StrategyConfig, ) From ff9e0eaf17b8de6cb34a64d5301466d0806a4de4 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Wed, 29 Apr 2026 14:47:19 +0200 Subject: [PATCH 03/36] m1-slice1-backtest-move --- argo/workflowtemplate-backtest-fanout.yaml | 8 +- trading_runtime/backtest/adapters/__init__.py | 0 .../backtest/adapters/execution.py | 164 ++++++ trading_runtime/backtest/adapters/venue.py | 49 ++ trading_runtime/backtest/engine/__init__.py | 0 .../backtest/engine/engine_base.py | 41 ++ trading_runtime/backtest/engine/hft_engine.py | 182 +++++++ .../backtest/engine/strategy_runner.py | 331 ++++++++++++ trading_runtime/backtest/io/__init__.py | 0 trading_runtime/backtest/io/s3_adapter.py | 290 ++++++++++ .../backtest/orchestrator/__init__.py | 0 .../backtest/orchestrator/manifest.py | 46 ++ .../backtest/orchestrator/planner.py | 151 ++++++ .../backtest/orchestrator/planner_models.py | 38 ++ .../backtest/orchestrator/s3_manifest.py | 128 +++++ .../backtest/orchestrator/segmenter.py | 44 ++ .../backtest/orchestrator/summary.py | 141 +++++ .../backtest/orchestrator/sweeps.py | 83 +++ trading_runtime/backtest/runtime/__init__.py | 0 trading_runtime/backtest/runtime/context.py | 105 ++++ .../backtest/runtime/entrypoint.py | 268 ++++++++++ .../runtime/experiment_finalize_entrypoint.py | 197 +++++++ .../backtest/runtime/mlflow_segment_logger.py | 64 +++ .../backtest/runtime/prometheus_metrics.py | 95 ++++ trading_runtime/backtest/runtime/run_sweep.py | 496 ++++++++++++++++++ .../runtime/segment_finalize_entrypoint.py | 194 +++++++ trading_runtime/core/__init__.py | 2 + trading_runtime/core/events/__init__.py | 2 + trading_runtime/core/events/sinks/__init__.py | 2 + .../core/events/sinks/file_recorder.py | 30 ++ 30 files changed, 3147 insertions(+), 4 deletions(-) create mode 100644 trading_runtime/backtest/adapters/__init__.py create mode 100644 trading_runtime/backtest/adapters/execution.py create mode 100644 trading_runtime/backtest/adapters/venue.py create mode 100644 trading_runtime/backtest/engine/__init__.py create mode 100644 trading_runtime/backtest/engine/engine_base.py create mode 100644 trading_runtime/backtest/engine/hft_engine.py create mode 100644 trading_runtime/backtest/engine/strategy_runner.py create mode 100644 trading_runtime/backtest/io/__init__.py create mode 100644 trading_runtime/backtest/io/s3_adapter.py create mode 100644 trading_runtime/backtest/orchestrator/__init__.py create mode 100644 trading_runtime/backtest/orchestrator/manifest.py create mode 100644 trading_runtime/backtest/orchestrator/planner.py create mode 100644 trading_runtime/backtest/orchestrator/planner_models.py create mode 100644 trading_runtime/backtest/orchestrator/s3_manifest.py create mode 100644 trading_runtime/backtest/orchestrator/segmenter.py create mode 100644 trading_runtime/backtest/orchestrator/summary.py create mode 100644 trading_runtime/backtest/orchestrator/sweeps.py create mode 100644 trading_runtime/backtest/runtime/__init__.py create mode 100644 trading_runtime/backtest/runtime/context.py create mode 100644 trading_runtime/backtest/runtime/entrypoint.py create mode 100644 trading_runtime/backtest/runtime/experiment_finalize_entrypoint.py create mode 100644 trading_runtime/backtest/runtime/mlflow_segment_logger.py create mode 100644 trading_runtime/backtest/runtime/prometheus_metrics.py create mode 100644 trading_runtime/backtest/runtime/run_sweep.py create mode 100644 trading_runtime/backtest/runtime/segment_finalize_entrypoint.py create mode 100644 trading_runtime/core/__init__.py create mode 100644 trading_runtime/core/events/__init__.py create mode 100644 trading_runtime/core/events/sinks/__init__.py create mode 100644 trading_runtime/core/events/sinks/file_recorder.py diff --git a/argo/workflowtemplate-backtest-fanout.yaml b/argo/workflowtemplate-backtest-fanout.yaml index f2a140d..22f1067 100644 --- a/argo/workflowtemplate-backtest-fanout.yaml +++ b/argo/workflowtemplate-backtest-fanout.yaml @@ -104,7 +104,7 @@ spec: command: [python, -m] args: - - trading_framework.backtest.runtime.entrypoint + - trading_runtime.backtest.runtime.entrypoint - --config - "{{workflow.parameters.experiment_config}}" - --run @@ -156,7 +156,7 @@ spec: command: [python, -m] args: - - trading_framework.backtest.runtime.run_sweep + - trading_runtime.backtest.runtime.run_sweep - --context - "{{inputs.parameters.sweep-path}}" - --scratch-root @@ -204,7 +204,7 @@ spec: command: [python, -m] args: - - trading_framework.backtest.runtime.segment_finalize_entrypoint + - trading_runtime.backtest.runtime.segment_finalize_entrypoint - "--experiment-id" - "{{inputs.parameters.experiment-id}}" @@ -268,7 +268,7 @@ spec: command: [python, -m] args: - - trading_framework.backtest.runtime.experiment_finalize_entrypoint + - trading_runtime.backtest.runtime.experiment_finalize_entrypoint - "--experiment-id" - "{{inputs.parameters.experiment-id}}" diff --git a/trading_runtime/backtest/adapters/__init__.py b/trading_runtime/backtest/adapters/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/trading_runtime/backtest/adapters/execution.py b/trading_runtime/backtest/adapters/execution.py new file mode 100644 index 0000000..206f5e7 --- /dev/null +++ b/trading_runtime/backtest/adapters/execution.py @@ -0,0 +1,164 @@ +"""Execution adapter for hftbacktest backtests.""" + +from __future__ import annotations + +import hashlib +from dataclasses import dataclass +from typing import TYPE_CHECKING, Protocol + +if TYPE_CHECKING: + from hftbacktest import ROIVectorMarketDepthBacktest + + from trading_framework.core.domain.types import OrderIntent + +from trading_framework.core.domain.reject_reasons import RejectReason + + +class ExecutionAdapter(Protocol): + """Venue-facing execution boundary. + + Strategy, state, and risk layers must not depend on venue-specific + APIs. Only this adapter is allowed to call into the venue engine. + """ + + def apply_intents( + self, intents: list[OrderIntent] + ) -> list[tuple[OrderIntent, str]]: + """Send a batch of intents to the venue. + + Returns: + List of (intent, reason) pairs for venue-side failures. + """ + + +def _to_i64_order_id(external_id: str) -> int: + """Convert an external string order ID into a signed 64-bit integer.""" + sanitized = external_id.strip() + if sanitized.isdigit(): + value = int(sanitized) + else: + digest = hashlib.blake2b( + sanitized.encode("utf-8"), digest_size=8 + ).digest() + value = int.from_bytes(digest, byteorder="big", signed=False) + return value & ((1 << 63) - 1) + + +@dataclass(frozen=True) +class HftBacktestExecutionAdapter(ExecutionAdapter): + """Execution adapter for hftbacktest.""" + + hbt: ROIVectorMarketDepthBacktest + asset_no: int + + def apply_intents( + self, intents: list[OrderIntent] + ) -> list[tuple[OrderIntent, str]]: + """Apply a batch of order intents to the backtest venue.""" + # pylint: disable=too-many-locals,too-many-branches + + # hftbacktest enums (kept local to the adapter) + gtc = 0 + gtx = 1 # post-only + fok = 2 + ioc = 3 + + limit = 0 + market = 1 + + tif_map = { + "GTC": gtc, + "IOC": ioc, + "FOK": fok, + "POST_ONLY": gtx, + } + order_type_map = {"limit": limit, "market": market} + + execution_errors: list[tuple[OrderIntent, str]] = [] + + for intent in intents: + if intent.intent_type == "new": + order_id = _to_i64_order_id(intent.client_order_id) + tif = tif_map[intent.time_in_force] + order_type = order_type_map[intent.order_type] + quantity = intent.intended_qty.value + price = ( + intent.intended_price.value + if intent.intended_price is not None + else 0.0 + ) + + try: + if intent.side == "buy": + result_code = self.hbt.submit_buy_order( + self.asset_no, + order_id, + price, + quantity, + tif, + order_type, + False, + ) + else: + result_code = self.hbt.submit_sell_order( + self.asset_no, + order_id, + price, + quantity, + tif, + order_type, + False, + ) + except Exception: # pylint: disable=broad-exception-caught + execution_errors.append( + (intent, RejectReason.EXCHANGE_ERROR) + ) + continue + + if result_code != 0: + execution_errors.append( + (intent, RejectReason.EXCHANGE_REJECT) + ) + + elif intent.intent_type == "cancel": + order_id = _to_i64_order_id(intent.client_order_id) + try: + result_code = self.hbt.cancel( + self.asset_no, order_id, False + ) + except Exception: # pylint: disable=broad-exception-caught + execution_errors.append( + (intent, RejectReason.EXCHANGE_ERROR) + ) + continue + + if result_code != 0: + execution_errors.append( + (intent, RejectReason.EXCHANGE_REJECT) + ) + + elif intent.intent_type == "replace": + order_id = _to_i64_order_id(intent.client_order_id) + new_price = intent.intended_price.value + new_quantity = intent.intended_qty.value + + try: + result_code = self.hbt.modify( + self.asset_no, + order_id, + new_price, + new_quantity, + False, + ) + except Exception: # pylint: disable=broad-exception-caught + execution_errors.append( + (intent, RejectReason.EXCHANGE_ERROR) + ) + continue + + if result_code != 0: + execution_errors.append( + (intent, RejectReason.EXCHANGE_REJECT) + ) + + return execution_errors diff --git a/trading_runtime/backtest/adapters/venue.py b/trading_runtime/backtest/adapters/venue.py new file mode 100644 index 0000000..09c524d --- /dev/null +++ b/trading_runtime/backtest/adapters/venue.py @@ -0,0 +1,49 @@ +"""Venue adapter implementation for hftbacktest backtests.""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from hftbacktest import ROIVectorMarketDepthBacktest + +from trading_framework.core.ports.venue_adapter import VenueAdapter + + +@dataclass(frozen=True) +class HftBacktestVenueAdapter(VenueAdapter): + """VenueAdapter implementation for hftbacktest. + + This adapter is the only place where the strategy loop is allowed to depend + on hftbacktest APIs. + """ + + hbt: ROIVectorMarketDepthBacktest + asset_no: int + + def wait_next(self, *, timeout_ns: int, include_order_resp: bool) -> int: + """Wait for the next venue event and return its type.""" + # hftbacktest backends are frequently Numba jitclass objects. + # Those methods often do not support keyword arguments. + return self.hbt.wait_next_feed(include_order_resp, timeout_ns) + + def current_timestamp_ns(self) -> int: + """Return the current venue timestamp in nanoseconds.""" + return self.hbt.current_timestamp + + def read_market_snapshot(self) -> Any: + """Return the current market depth snapshot.""" + return self.hbt.depth(self.asset_no) + + def read_orders_snapshot(self) -> tuple[Any, Any]: + """Return the current orders and state snapshot.""" + return ( + self.hbt.state_values(self.asset_no), + self.hbt.orders(self.asset_no), + ) + + def record(self, recorder: Any) -> None: + """Record the current backtest state using the given recorder.""" + # hftbacktest recorder is a thin wrapper exposing .recorder.record(hbt). + recorder.recorder.record(self.hbt) diff --git a/trading_runtime/backtest/engine/__init__.py b/trading_runtime/backtest/engine/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/trading_runtime/backtest/engine/engine_base.py b/trading_runtime/backtest/engine/engine_base.py new file mode 100644 index 0000000..babed91 --- /dev/null +++ b/trading_runtime/backtest/engine/engine_base.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any + + +@dataclass +class BacktestConfig: + """Generic backtest configuration. + + Engine configs should subclass this + and add engine-specific fields. + """ + id: str + description: str + + +@dataclass +class BacktestResult: + """Lightweight container for backtest outputs. + + For now we only track the stats file path. + Can be extended with PnL curves, summary metrics, etc. + """ + id: str + stats_file: str | None = None + extra_metadata: dict[str, Any] | None = None + + +class BacktestEngine: + """Abstract base class for all backtest engines.""" + + def __init__(self, config: BacktestConfig) -> None: + self.config = config + + def run(self) -> BacktestResult: + """Run the backtest and return a result object. + + Subclass engines must implement this method. + """ + raise NotImplementedError("run() must be implemented by subclasses") diff --git a/trading_runtime/backtest/engine/hft_engine.py b/trading_runtime/backtest/engine/hft_engine.py new file mode 100644 index 0000000..1a4f7eb --- /dev/null +++ b/trading_runtime/backtest/engine/hft_engine.py @@ -0,0 +1,182 @@ +"""HFT backtest engine implementation based on hftbacktest.""" + +from __future__ import annotations + +import importlib +from dataclasses import dataclass +from typing import TYPE_CHECKING + +from hftbacktest import ( + BacktestAsset, + Recorder, + ROIVectorMarketDepthBacktest, +) + +if TYPE_CHECKING: + from trading_framework.core.risk.risk_config import RiskConfig + +from trading_runtime.backtest.adapters.execution import HftBacktestExecutionAdapter +from trading_runtime.backtest.adapters.venue import HftBacktestVenueAdapter +from trading_runtime.backtest.engine.engine_base import ( + BacktestConfig, + BacktestEngine, + BacktestResult, +) +from trading_runtime.backtest.engine.strategy_runner import HftStrategyRunner +from trading_framework.strategies.base import Strategy +from trading_framework.strategies.strategy_config import StrategyConfig + + +# pylint: disable=too-many-instance-attributes +@dataclass +class HftEngineConfig: + """Configuration for the HFT backtest engine.""" + + # Data wiring + initial_snapshot: str | None + data_files: list[str] + + # Contract / microstructure parameters + instrument: str + tick_size: float + lot_size: float + contract_size: float + + # Simple fee model: maker / taker in rate on trading value + maker_fee_rate: float + taker_fee_rate: float + + # Latency model (constant latency) + entry_latency_ns: int + response_latency_ns: int + + # Queue model / venue model toggles + use_risk_adverse_queue_model: bool + partial_fill_venue: bool + + # Strategy loop timing + max_steps: int + + last_trades_capacity: int + max_price_tick_levels: int + + roi_lb: int + roi_ub: int + + # Output + stats_npz_path: str + event_bus_path: str + + +@dataclass +class HftBacktestConfig(BacktestConfig): + """Backtest configuration for the HFT engine.""" + + engine_cfg: HftEngineConfig + strategy_cfg: StrategyConfig + risk_cfg: RiskConfig + + +def _build_backtester(engine_cfg: HftEngineConfig) -> ROIVectorMarketDepthBacktest: + """Create an ROIVectorMarketDepthBacktest from the engine configuration.""" + asset = BacktestAsset() + + # For now we assume file paths. Later this can be replaced with an S3 resolver. + asset = asset.data(engine_cfg.data_files) + + if engine_cfg.initial_snapshot is not None: + asset = asset.initial_snapshot(engine_cfg.initial_snapshot) + + asset = ( + asset + .linear_asset(engine_cfg.contract_size) + .constant_latency(engine_cfg.entry_latency_ns, engine_cfg.response_latency_ns) + .tick_size(engine_cfg.tick_size) + .lot_size(engine_cfg.lot_size) + .trading_value_fee_model(engine_cfg.maker_fee_rate, engine_cfg.taker_fee_rate) + .last_trades_capacity(engine_cfg.last_trades_capacity) + .roi_lb(engine_cfg.roi_lb) + .roi_ub(engine_cfg.roi_ub) + ) + + if engine_cfg.use_risk_adverse_queue_model: + asset = asset.risk_adverse_queue_model() + + if engine_cfg.partial_fill_venue: + asset = asset.partial_fill_exchange() + else: + asset = asset.no_partial_fill_exchange() + + return ROIVectorMarketDepthBacktest([asset]) + + +class HftBacktestEngine(BacktestEngine): + """Backtest engine that uses hftbacktest internally.""" + + def __init__(self, config: HftBacktestConfig) -> None: + # pylint: disable=useless-super-delegation + super().__init__(config) + + def _load_strategy_class(self, class_path: str) -> type[Strategy]: + """Dynamically load a Strategy class from a module path.""" + module_path, class_name = class_path.split(":") + module = importlib.import_module(module_path) + cls = getattr(module, class_name) + + if not issubclass(cls, Strategy): + raise TypeError( + f"Loaded class {class_name} is not a subclass of Strategy." + ) + + return cls + + def _build_strategy(self, strategy_cfg: StrategyConfig) -> Strategy: + """Instantiate the strategy specified in the configuration.""" + cls = self._load_strategy_class(strategy_cfg.class_path) + return cls(**strategy_cfg.to_engine_params()) + + def run(self) -> BacktestResult: + """Run the backtest and return the aggregated result.""" + cfg: HftBacktestConfig = self.config + engine_cfg: HftEngineConfig = cfg.engine_cfg + strategy_cfg: StrategyConfig = cfg.strategy_cfg + risk_cfg: RiskConfig = cfg.risk_cfg + + # 1) Build hftbacktest backtester from engine config + hbt = _build_backtester(engine_cfg) + + # 2) Prepare recorder (single asset, record every step) + recorder = Recorder(1, engine_cfg.max_steps) + + # 3) Build strategy and runner + strategy = self._build_strategy(strategy_cfg) + runner = HftStrategyRunner( + engine_cfg=engine_cfg, + strategy=strategy, + risk_cfg=risk_cfg, + ) + + # 4) Backtest-only venue and execution adapters + asset_no = 0 + venue = HftBacktestVenueAdapter(hbt=hbt, asset_no=asset_no) + execution = HftBacktestExecutionAdapter(hbt=hbt, asset_no=asset_no) + + # 5) Run strategy loop (venue-agnostic) + runner.run(venue, execution, recorder) + + # 6) Close backtester and persist statistics + _ = hbt.close() + recorder.to_npz(engine_cfg.stats_npz_path) + + return BacktestResult( + id=cfg.id, + stats_file=engine_cfg.stats_npz_path, + extra_metadata={ + "engine": "hftbacktest", + "instrument": engine_cfg.instrument, + "strategy_name": strategy_cfg.class_path, + "strategy_params": strategy_cfg.params, + "risk_scope": risk_cfg.scope, + "risk_params": risk_cfg.params, + }, + ) diff --git a/trading_runtime/backtest/engine/strategy_runner.py b/trading_runtime/backtest/engine/strategy_runner.py new file mode 100644 index 0000000..51c0dbb --- /dev/null +++ b/trading_runtime/backtest/engine/strategy_runner.py @@ -0,0 +1,331 @@ +"""Strategy execution loop for HFT backtests.""" + +from __future__ import annotations + +import logging +from collections import deque +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from trading_framework.core.domain.state import StrategyState +from trading_framework.core.domain.types import ( + BookLevel, + BookPayload, + MarketEvent, + OrderIntent, + Price, + Quantity, +) +from trading_framework.core.events.event_bus import EventBus +from trading_runtime.core.events.sinks.file_recorder import FileRecorderSink +from trading_framework.core.events.sinks.sink_logging import LoggingEventSink +from trading_framework.core.ports.venue_adapter import VenueAdapter +from trading_framework.core.risk.risk_config import RiskConfig +from trading_framework.core.risk.risk_engine import RejectedIntent, RiskEngine + +if TYPE_CHECKING: + from trading_runtime.backtest.adapters.execution import HftBacktestExecutionAdapter + from trading_runtime.backtest.engine.hft_engine import HftEngineConfig + from trading_framework.strategies.base import Strategy + + +MAX_TIMEOUT_NS = 1 << 62 # Effectively "wait forever" without a heartbeat + + +class HftStrategyRunner: + """Strategy runner for HFT backtests. + + Invariant: + - One wait_next() wakeup corresponds to one fully committed timestamp block. + - Strategy is evaluated at most once per wakeup on a stable state. + """ + # pylint: disable=too-many-instance-attributes + + def __init__( + self, + *, + engine_cfg: HftEngineConfig, + strategy: Strategy, + risk_cfg: RiskConfig, + ) -> None: + self.engine_cfg = engine_cfg + self.strategy = strategy + + event_bus = self._build_event_bus( + path=Path(engine_cfg.event_bus_path), + ) + + self.strategy_state = StrategyState( + event_bus=event_bus, + ) + + self.risk = RiskEngine( + risk_cfg=risk_cfg, + event_bus=event_bus, + ) + + self._next_send_ts_ns_local: int | None = None + + def _build_event_bus( + self, + *, + path: Path, + ) -> EventBus: + logger = logging.getLogger("bus") + + sinks = [ + LoggingEventSink(logger), + FileRecorderSink(path), + ] + + return EventBus(sinks=sinks) + + def _close_event_bus(self) -> None: + self.strategy_state._event_bus.close() + self.risk._event_bus.close() + + def _compute_timeout_ns(self, now_local_ns: int) -> int: + """Compute wait timeout in nanoseconds.""" + if self._next_send_ts_ns_local is None: + return MAX_TIMEOUT_NS + delta = self._next_send_ts_ns_local - now_local_ns + return 0 if delta <= 0 else delta + + def _sort_intents_for_gate(self, intents: list[OrderIntent]) -> list[OrderIntent]: + """Sort intents to ensure cancels are evaluated first.""" + + def intent_priority(intent: OrderIntent) -> int: + if intent.intent_type == "cancel": + return 0 + if intent.intent_type == "replace": + return 1 + if intent.intent_type == "new": + return 2 + return 9 + + return sorted(intents, key=lambda it: (intent_priority(it), it.ts_ns_local)) + + def run( + self, + venue: VenueAdapter, + execution: HftBacktestExecutionAdapter, + recorder: Any, + ) -> None: + """Run the backtest loop.""" + # pylint: disable=too-many-locals,too-many-branches,too-many-statements + + instrument = self.engine_cfg.instrument + contract_size = self.engine_cfg.contract_size + + # Initialize hftbacktest engine + # Fetch very first event block to set local timestamp + venue.wait_next(timeout_ns=MAX_TIMEOUT_NS, include_order_resp=False) + observed_local_ns = venue.current_timestamp_ns() + self.strategy_state.update_timestamp(observed_local_ns) + sim_now_ns = self.strategy_state.sim_ts_ns_local + + while True: + timeout_ns = self._compute_timeout_ns(self.strategy_state.sim_ts_ns_local) + rc = venue.wait_next(timeout_ns=timeout_ns, include_order_resp=True) + + if rc == 1: + self._close_event_bus() + break + + observed_local_ns = venue.current_timestamp_ns() + self.strategy_state.update_timestamp(observed_local_ns) + sim_now_ns = self.strategy_state.sim_ts_ns_local + + raw_intents: list[OrderIntent] = [] + + # ----------------------------------------------------------------- + # Market update + # ----------------------------------------------------------------- + if rc == 2: + depth = venue.read_market_snapshot() + + bids: list[BookLevel] = [] + asks: list[BookLevel] = [] + + max_levels = max(0, int(self.engine_cfg.max_price_tick_levels)) + if max_levels > 0: + roi_lb_tick = depth.roi_lb_tick + tick_size = depth.tick_size + + # ----------------------- + # ASK side (fixed ticks) + # ----------------------- + for offset in range(max_levels): + price_tick = depth.best_ask_tick + offset + i = price_tick - roi_lb_tick + + qty = 0.0 + if 0 <= i < len(depth.ask_depth): + qty = depth.ask_depth[i] + + asks.append( + BookLevel( + price=Price( + currency="UNKNOWN", + value=price_tick * tick_size, + ), + quantity=Quantity( + value=qty, + unit="contracts", + ), + ) + ) + + # ----------------------- + # BID side (fixed ticks) + # ----------------------- + for offset in range(max_levels): + price_tick = depth.best_bid_tick - offset + i = price_tick - roi_lb_tick + + qty = 0.0 + if 0 <= i < len(depth.bid_depth): + qty = depth.bid_depth[i] + + bids.append( + BookLevel( + price=Price( + currency="UNKNOWN", + value=price_tick * tick_size, + ), + quantity=Quantity( + value=qty, + unit="contracts", + ), + ) + ) + + market_event = MarketEvent( + ts_ns_exch=sim_now_ns, + ts_ns_local=sim_now_ns, + instrument=instrument, + event_type="book", + book=BookPayload( + book_type="snapshot", + bids=bids, + asks=asks, + depth=min(len(bids), len(asks)), + ), + ) + + self.strategy_state.update_market( + instrument=instrument, + best_bid=depth.best_bid, + best_ask=depth.best_ask, + best_bid_qty=depth.best_bid_qty, + best_ask_qty=depth.best_ask_qty, + tick_size=depth.tick_size, + lot_size=depth.lot_size, + contract_size=contract_size, + ts_ns_local=sim_now_ns, + ts_ns_exch=sim_now_ns, + ) + + constraints = self.risk.build_constraints(sim_now_ns) + raw_intents.extend( + self.strategy.on_feed( + self.strategy_state, + market_event, + self.engine_cfg, + constraints, + ) + ) + + # ----------------------------------------------------------------- + # Order / account update + # ----------------------------------------------------------------- + if rc == 3: + state_values, orders = venue.read_orders_snapshot() + + self.strategy_state.update_account( + instrument=instrument, + position=state_values.position, + balance=state_values.balance, + fee=state_values.fee, + trading_volume=state_values.trading_volume, + trading_value=state_values.trading_value, + num_trades=state_values.num_trades, + ) + self.strategy_state.ingest_order_snapshots( + instrument, + orders.values(), + ) + + constraints = self.risk.build_constraints(sim_now_ns) + raw_intents.extend( + self.strategy.on_order_update( + self.strategy_state, + self.engine_cfg, + constraints, + ) + ) + + # ----------------------------------------------------------------- + # Queue flush + # ----------------------------------------------------------------- + if ( + self._next_send_ts_ns_local is not None + and sim_now_ns >= self._next_send_ts_ns_local + ): + raw_intents.extend( + self.strategy_state.pop_queued_intents(instrument) + ) + + # ----------------------------------------------------------------- + # Gate + execution + # ----------------------------------------------------------------- + if raw_intents: + combined = self._sort_intents_for_gate(raw_intents) + + decision = self.risk.decide_intents( + raw_intents=combined, + state=self.strategy_state, + now_ts_ns_local=sim_now_ns, + ) + + execution_errors: list[tuple[OrderIntent, str]] = [] + if decision.accepted_now: + execution_errors = execution.apply_intents( + decision.accepted_now + ) + + failed_keys = { + (it.instrument, it.client_order_id) + for it, _ in execution_errors + } + + for it in decision.accepted_now: + if (it.instrument, it.client_order_id) in failed_keys: + continue + self.strategy_state.mark_intent_sent( + it.instrument, + it.client_order_id, + it.intent_type, + ) + + if execution_errors: + for it, reason in execution_errors: + decision.execution_rejected.append( + RejectedIntent(it, reason) + ) + + self.strategy.on_risk_decision(decision) + self._next_send_ts_ns_local = decision.next_send_ts_ns_local + + # If there are queued intents but the gate did not provide a next_send_ts_ns_local, + # wake up at the next second boundary to ensure progress. + if self._next_send_ts_ns_local is None: + queue = self.strategy_state.queued_intents.setdefault( + instrument, + deque(), + ) + if queue: + sec = sim_now_ns // 1_000_000_000 + self._next_send_ts_ns_local = (sec + 1) * 1_000_000_000 + + venue.record(recorder) diff --git a/trading_runtime/backtest/io/__init__.py b/trading_runtime/backtest/io/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/trading_runtime/backtest/io/s3_adapter.py b/trading_runtime/backtest/io/s3_adapter.py new file mode 100644 index 0000000..4177eaa --- /dev/null +++ b/trading_runtime/backtest/io/s3_adapter.py @@ -0,0 +1,290 @@ +from __future__ import annotations + +import io +from pathlib import Path + +from oci.auth.signers import InstancePrincipalsSecurityTokenSigner +from oci.config import from_file +from oci.object_storage import ObjectStorageClient +from oci.signer import Signer + + +class OCIObjectStorageS3Shim: + """ + Lightweight adapter that exposes a small, S3-like interface on top of + Oracle Cloud Infrastructure (OCI) Object Storage. + + The goal of this class is *API shape compatibility*, not feature parity: + it mimics a minimal subset of the boto3 S3 client that is sufficient for + simple readers/writers and data pipelines. + + Authentication modes: + - "instance_principal": + Uses the OCI Instance Principal of the current Compute instance. + Suitable only when running on OCI infrastructure. + - "api_key": + Uses a user-scoped OCI API key (private PEM key + config file). + Suitable for local development, CI, and non-OCI environments. + + Implemented operations: + - put_object: upload an object (write) + - list_objects: list objects under a bucket/prefix (read) + - get_object: download an object (read) + + Design notes: + - Method signatures and return shapes are intentionally boto3-like. + - This adapter talks directly to OCI Object Storage APIs, NOT to the + S3-compatibility HTTP endpoint. + - Authorization is fully governed by OCI IAM policies. + """ + def __init__( + self, + *, + region: str | None = None, + auth_mode: str = "instance_principal", + oci_config_file: str | None = None, + oci_profile: str = "DEFAULT", + ) -> None: + """ + Create a new Object Storage client wrapper. + + Parameters: + region: + OCI region identifier (e.g. "eu-frankfurt-1"). + If provided, it overrides the region in the OCI config file. + + auth_mode: + Authentication strategy to use: + - "instance_principal": use the instances identity (OCI-only) + - "api_key": use a user API key defined in an OCI config file + + oci_config_file: + Path to an OCI CLI-style config file (required for api_key auth). + Typically "~/.oci/config". + + oci_profile: + Profile name inside the OCI config file to load credentials from. + """ + if auth_mode == "instance_principal": + signer = InstancePrincipalsSecurityTokenSigner() + config = {} + + elif auth_mode == "api_key": + if oci_config_file is None: + raise ValueError("oci_config_file is required for api_key auth") + + config = from_file( + file_location=oci_config_file, + profile_name=oci_profile, + ) + signer = Signer( + tenancy=config["tenancy"], + user=config["user"], + fingerprint=config["fingerprint"], + private_key_file_location=config["key_file"], + pass_phrase=config.get("pass_phrase"), + ) + + else: + raise ValueError(f"Unknown auth_mode: {auth_mode}") + + client_kwargs = {} + if region: + client_kwargs["region"] = region + + self.client = ObjectStorageClient( + config=config, + signer=signer, + **client_kwargs, + ) + + self.namespace = self.client.get_namespace().data + + def put_object(self, bucket: str, key: str, body, content_type: str = "application/octet-stream"): + """ + Upload an object to an OCI Object Storage bucket. + + Parameters mirror boto3 semantics: + Bucket: bucket name + Key: object name (path-like) + Body: bytes or file-like object + ContentType: optional MIME type + + Returns a minimal boto3-like dict containing the object's ETag + (if provided by OCI). + """ + resp = self.client.put_object( + namespace_name=self.namespace, + bucket_name=bucket, + object_name=key, + put_object_body=body, + content_type=content_type, + ) + etag = None + try: + etag = resp.headers.get("etag") + except Exception: + pass + return {"ETag": etag} + + def list_objects( + self, + bucket: str, + prefix: str | None = None, + continuation_token: str | None = None, + max_keys: int = 1000, + ) -> dict[str, object]: + """ + List objects in a bucket, optionally filtered by prefix. + + This method approximates boto3's list_objects behavior: + - 'Prefix' filters object names + - pagination is exposed via ContinuationToken / NextContinuationToken + + Internally, this maps to OCI's 'list_objects' API, using: + - 'prefix' for filtering + - 'start' for pagination + + Returns: + A dict with keys: + - Contents: list of {"Key", "Size"} + - IsTruncated: whether more results are available + - NextContinuationToken: token for the next page (or None) + """ + kwargs = { + "namespace_name": self.namespace, + "bucket_name": bucket, + "limit": max_keys, + } + if prefix: + kwargs["prefix"] = prefix + if continuation_token: + kwargs["start"] = continuation_token + + resp = self.client.list_objects(**kwargs) + objects = [] + for o in resp.data.objects or []: + objects.append({"Key": o.name, "Size": getattr(o, "size", None)}) + + next_token = getattr(resp.data, "next_start_with", None) + return { + "Contents": objects, + "IsTruncated": bool(next_token), + "NextContinuationToken": next_token, + } + + def get_object(self, bucket: str, key: str) -> dict[str, object]: + """ + Download an object from OCI Object Storage. + + Returns a boto3-like response where: + - 'Body' is a file-like object (io.BytesIO) + - 'ContentLength' and 'ContentType' are best-effort metadata + + The OCI Python SDK exposes response bodies in different shapes + depending on transport and SDK version; this method normalizes + them into a single bytes buffer. + """ + resp = self.client.get_object( + namespace_name=self.namespace, + bucket_name=bucket, + object_name=key, + ) + + data_bytes = None + d = resp.data + + # Case 1: direct .read() + if hasattr(d, "read") and callable(getattr(d, "read")): + data_bytes = d.read() + + # Case 2: .content (bytes already) + elif hasattr(d, "content"): + data_bytes = d.content + + # Case 3: raw.read() + elif hasattr(d, "raw") and hasattr(d.raw, "read") and callable(getattr(d.raw, "read")): + data_bytes = d.raw.read() + + # Case 4: stream chunks (fallback) + elif hasattr(d, "raw") and hasattr(d.raw, "stream") and callable(getattr(d.raw, "stream")): + chunks = [] + for chunk in d.raw.stream(1024 * 1024, decode_content=False): + chunks.append(chunk) + data_bytes = b"".join(chunks) + + else: + raise TypeError("Unsupported OCI get_object response type; no readable data attribute found.") + + # Content-Length/Type (best effort) + content_length = None + try: + content_length = int(resp.headers.get("content-length", "0")) + except Exception: + pass + if not content_length and data_bytes is not None: + content_length = len(data_bytes) + + content_type = "" + try: + content_type = resp.headers.get("content-type", "") + except Exception: + pass + + return { + "Body": io.BytesIO(data_bytes if data_bytes is not None else b""), + "ContentLength": content_length or 0, + "ContentType": content_type, + } + + def download_to_file( + self, + bucket: str, + key: str, + destination: str | Path, + *, + chunk_size_bytes: int = 8 * 1024 * 1024, + ) -> None: + """ + Stream an object from OCI Object Storage directly to a local file. + + This method performs a chunked download over HTTPS and writes each + chunk incrementally to disk. The entire object is never loaded into + memory at once, ensuring constant and predictable RAM usage. + + Parameters: + bucket: + Name of the OCI Object Storage bucket. + key: + Object name (path-like key) within the bucket. + destination: + Local filesystem path where the object will be written. + Parent directories must already exist. + chunk_size_bytes: + Size of each streamed chunk in bytes. Defaults to 8 MiB. + + Raises: + RuntimeError: + If the OCI response does not expose a streamable body. + """ + destination_path = Path(destination) + + response = self.client.get_object( + namespace_name=self.namespace, + bucket_name=bucket, + object_name=key, + ) + + data = response.data + + if not hasattr(data, "raw") or not hasattr(data.raw, "stream"): + raise RuntimeError( + "OCI get_object response does not expose a streamable body." + ) + + with destination_path.open("wb") as file_handle: + for chunk in data.raw.stream( + chunk_size_bytes, + decode_content=False, + ): + file_handle.write(chunk) diff --git a/trading_runtime/backtest/orchestrator/__init__.py b/trading_runtime/backtest/orchestrator/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/trading_runtime/backtest/orchestrator/manifest.py b/trading_runtime/backtest/orchestrator/manifest.py new file mode 100644 index 0000000..9aa8a8a --- /dev/null +++ b/trading_runtime/backtest/orchestrator/manifest.py @@ -0,0 +1,46 @@ +""" +Dataset manifest definitions. + +This module defines metadata structures and protocols used to describe +datasets and their underlying data files. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Protocol + + +@dataclass(frozen=True, slots=True) +class DataFileMeta: + """ + Immutable metadata describing a single data file. + """ + + file_id: str + object_key: str + start_ts_ns: int + end_ts_ns: int + size_bytes: int + symbol: str + venue: str + datatype: str + + +class DatasetManifest(Protocol): + """ + Protocol describing a dataset manifest interface. + """ + + def iter_files( + self, + *, + start_ts_ns: int, + end_ts_ns: int, + symbol: str, + venue: str, + datatype: str, + ) -> list[DataFileMeta]: + """ + Iterate over data files matching the given constraints. + """ diff --git a/trading_runtime/backtest/orchestrator/planner.py b/trading_runtime/backtest/orchestrator/planner.py new file mode 100644 index 0000000..01601e5 --- /dev/null +++ b/trading_runtime/backtest/orchestrator/planner.py @@ -0,0 +1,151 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from trading_runtime.backtest.orchestrator.manifest import DataFileMeta, DatasetManifest + +from trading_runtime.backtest.orchestrator.planner_models import ( + ExperimentPlan, + SegmentPlan, +) +from trading_runtime.backtest.orchestrator.segmenter import segment_files +from trading_runtime.backtest.orchestrator.sweeps import ( + expand_parameter_grid, + expand_ranges, +) + + +def plan_experiment( + *, + experiment_id: str, + start_ts_ns: int, + end_ts_ns: int, + symbol: str, + venue: str, + datatype: str, + sweep_spec: dict[str, Any], + manifest: DatasetManifest, + max_segment_bytes: int, +) -> ExperimentPlan: + """ + Build a deterministic execution plan for an experiment. + + This function performs *planning only*. + It does not access S3 directly, does not allocate scratch space, + and does not execute any backtests. + + Responsibilities: + - resolve relevant data files via the manifest + - segment data according to scratch size limits + - expand parameter sweeps + - produce a pure ExperimentPlan + + Parameters + ---------- + experiment_id: + Stable identifier for the experiment. + + start_ts_ns / end_ts_ns: + Experiment time range (unix timestamp, nanoseconds). + + symbol: + Instrument included in the experiment. + + sweep_spec: + User-facing sweep specification. May contain: + - explicit values + - iterables + - RangeSpec instances + + manifest: + Dataset manifest used to resolve physical data files. + + max_segment_bytes: + Maximum total size (bytes) allowed per segment. + + Returns + ------- + ExperimentPlan + Fully expanded execution plan. + """ + + if start_ts_ns >= end_ts_ns: + raise ValueError("start_ts_ns must be < end_ts_ns") + + if max_segment_bytes <= 0: + raise ValueError("max_segment_bytes must be > 0") + + # ------------------------------------------------------------------ + # 1. Resolve all relevant data files + # ------------------------------------------------------------------ + + files: list[DataFileMeta] = manifest.iter_files( + start_ts_ns=start_ts_ns, + end_ts_ns=end_ts_ns, + symbol=symbol, + venue=venue, + datatype=datatype, + ) + + if not files: + raise RuntimeError("No data files found for given experiment range") + + # ------------------------------------------------------------------ + # 2. Segment files according to scratch constraints + # ------------------------------------------------------------------ + + file_segments: list[list[DataFileMeta]] = segment_files( + files=files, + max_bytes=max_segment_bytes, + ) + + if not file_segments: + raise RuntimeError("Segmenter produced no segments") + + # ------------------------------------------------------------------ + # 3. Expand parameter sweeps + # ------------------------------------------------------------------ + + normalized_grid = expand_ranges(sweep_spec) + sweep_plans = expand_parameter_grid(normalized_grid) + + # ------------------------------------------------------------------ + # 4. Build SegmentPlans + # ------------------------------------------------------------------ + + segments: list[SegmentPlan] = [] + + for index, segment in enumerate(file_segments): + segment_id = f"segment_{index:04d}" + + segment_start = min(f.start_ts_ns for f in segment) + segment_end = max(f.end_ts_ns for f in segment) + + estimated_bytes = sum(f.size_bytes for f in segment) + + if estimated_bytes > max_segment_bytes: + raise RuntimeError( + f"Segment {segment_id} exceeds max_segment_bytes " + f"({estimated_bytes} > {max_segment_bytes})" + ) + + segments.append( + SegmentPlan( + segment_id=segment_id, + start_ts_ns=segment_start, + end_ts_ns=segment_end, + estimated_bytes=estimated_bytes, + files=[f.object_key for f in segment], + sweeps=sweep_plans, + ) + ) + + # ------------------------------------------------------------------ + # 5. Return final experiment plan + # ------------------------------------------------------------------ + + return ExperimentPlan( + experiment_id=experiment_id, + segments=segments, + ) diff --git a/trading_runtime/backtest/orchestrator/planner_models.py b/trading_runtime/backtest/orchestrator/planner_models.py new file mode 100644 index 0000000..506f6a0 --- /dev/null +++ b/trading_runtime/backtest/orchestrator/planner_models.py @@ -0,0 +1,38 @@ +""" +Planning model definitions. + +This module contains immutable planning structures used to describe +experiments, segments, and sweeps. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from trading_runtime.backtest.orchestrator.sweeps import SweepPlan + + +@dataclass(frozen=True, slots=True) +class SegmentPlan: + """ + Execution plan for a single segment of data. + """ + + segment_id: str + start_ts_ns: int + end_ts_ns: int + estimated_bytes: int + files: list[str] + sweeps: list[SweepPlan] + + +@dataclass(frozen=True, slots=True) +class ExperimentPlan: + """ + High-level execution plan for an experiment. + """ + + experiment_id: str + segments: list[SegmentPlan] diff --git a/trading_runtime/backtest/orchestrator/s3_manifest.py b/trading_runtime/backtest/orchestrator/s3_manifest.py new file mode 100644 index 0000000..cc55cd1 --- /dev/null +++ b/trading_runtime/backtest/orchestrator/s3_manifest.py @@ -0,0 +1,128 @@ +from __future__ import annotations + +import json + +from trading_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim +from trading_runtime.backtest.orchestrator.manifest import DataFileMeta, DatasetManifest + + +class S3DatasetManifest(DatasetManifest): + """ + DatasetManifest implementation backed by S3. + + Semantics: + - Manifests live under a canonical prefix (e.g. s3://data/canonical/) + - All filtering is semantic (venue, datatype, symbol, time) + - Path layout is NOT part of the contract + """ + + def __init__( + self, + *, + bucket: str, + stage: str, + ) -> None: + self._s3 = OCIObjectStorageS3Shim(region="eu-frankfurt-1") + self._bucket = bucket + self._prefix = stage.rstrip("/") + + # ------------------------------------------------------------------ + + def iter_files( + self, + *, + start_ts_ns: int, + end_ts_ns: int, + symbol: str, + venue: str, + datatype: str, + ) -> list[DataFileMeta]: + files: list[DataFileMeta] = [] + + for key in self._list_manifest_keys(): + manifest = self._load_manifest(key) + + dataset = manifest["dataset"] + + if dataset["venue"] != venue: + continue + + if dataset["datatype"] != datatype: + continue + + time_range = manifest["time_range_ns"] + if not self._overlaps( + start_ts_ns, + end_ts_ns, + time_range["start"], + time_range["end"], + ): + continue + + for entry in manifest["files"]: + if not self._overlaps( + start_ts_ns, + end_ts_ns, + entry["start_ts_ns"], + entry["end_ts_ns"], + ): + continue + + manifest_key = key + manifest_dir = manifest_key.rsplit("/", 1)[0] + object_key = f"{manifest_dir}/{entry['file_id']}" + + files.append( + DataFileMeta( + file_id=entry["file_id"], + object_key=object_key, + start_ts_ns=entry["start_ts_ns"], + end_ts_ns=entry["end_ts_ns"], + size_bytes=entry["size_bytes"], + symbol=symbol, + venue=venue, + datatype=datatype, + ) + ) + + return files + + # ------------------------------------------------------------------ + + def _list_manifest_keys(self) -> list[str]: + resp = self._s3.list_objects( + bucket=self._bucket, + prefix=self._prefix, + ) + + contents = resp.get("Contents", []) + + return [ + obj["Key"] + for obj in contents + if obj["Key"].endswith("/manifest.json") + ] + + def _load_manifest(self, key: str) -> dict: + resp = self._s3.get_object( + bucket=self._bucket, + key=key, + ) + + body = resp["Body"] + + if hasattr(body, "read"): + raw_bytes = body.read() + else: + raw_bytes = body + + return json.loads(raw_bytes) + + @staticmethod + def _overlaps( + a_start: int, + a_end: int, + b_start: int, + b_end: int, + ) -> bool: + return a_start < b_end and b_start < a_end diff --git a/trading_runtime/backtest/orchestrator/segmenter.py b/trading_runtime/backtest/orchestrator/segmenter.py new file mode 100644 index 0000000..d06bb27 --- /dev/null +++ b/trading_runtime/backtest/orchestrator/segmenter.py @@ -0,0 +1,44 @@ +""" +File segmentation logic. + +This module contains utilities for splitting data files into +byte-size-constrained segments. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from trading_runtime.backtest.orchestrator.manifest import DataFileMeta + + +def segment_files( + files: list[DataFileMeta], + max_bytes: int, +) -> list[list[DataFileMeta]]: + """ + Split files into ordered segments such that each segment does not + exceed the given maximum size in bytes. + """ + + segments: list[list[DataFileMeta]] = [] + current_segment: list[DataFileMeta] = [] + current_bytes = 0 + + # Sort files by start timestamp to ensure deterministic segmentation + for file_meta in sorted(files, key=lambda item: item.start_ts_ns): + exceeds_limit = current_bytes + file_meta.size_bytes > max_bytes + + if current_segment and exceeds_limit: + segments.append(current_segment) + current_segment = [] + current_bytes = 0 + + current_segment.append(file_meta) + current_bytes += file_meta.size_bytes + + if current_segment: + segments.append(current_segment) + + return segments diff --git a/trading_runtime/backtest/orchestrator/summary.py b/trading_runtime/backtest/orchestrator/summary.py new file mode 100644 index 0000000..a6c9aee --- /dev/null +++ b/trading_runtime/backtest/orchestrator/summary.py @@ -0,0 +1,141 @@ +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING, List + +if TYPE_CHECKING: + from trading_runtime.backtest.orchestrator.planner_models import ExperimentPlan + + +# --------------------------------------------------------------------------- +# Data models +# --------------------------------------------------------------------------- + +@dataclass(frozen=True, slots=True) +class SegmentSummary: + segment_id: str + start_ts_ns: int + end_ts_ns: int + estimated_bytes: int + file_count: int + sweep_count: int + scratch_utilization: float # 0.0 - 1.0 + + +@dataclass(frozen=True, slots=True) +class ExperimentSummary: + experiment_id: str + segment_count: int + sweeps_per_segment: int + total_backtests: int + max_segment_bytes: int + segments: List[SegmentSummary] + warnings: List[str] + + +# --------------------------------------------------------------------------- +# Summary builder +# --------------------------------------------------------------------------- + +def summarize_experiment( + *, + plan: ExperimentPlan, + max_segment_bytes: int, +) -> ExperimentSummary: + warnings: list[str] = [] + segments: list[SegmentSummary] = [] + + if not plan.segments: + warnings.append("Experiment contains no segments") + + sweeps_per_segment = ( + len(plan.segments[0].sweeps) if plan.segments else 0 + ) + + total_backtests = len(plan.segments) * sweeps_per_segment + + if sweeps_per_segment == 0: + warnings.append("No sweeps defined (0 backtests will run)") + + if total_backtests > 500: + warnings.append( + f"High number of backtests ({total_backtests}); runtime may be long" + ) + + if len(plan.segments) > 50: + warnings.append( + f"High number of segments ({len(plan.segments)})" + ) + + for segment in plan.segments: + utilization = segment.estimated_bytes / max_segment_bytes + + if utilization > 1.0: + warnings.append( + f"{segment.segment_id} exceeds scratch size " + f"({utilization:.0%})" + ) + elif utilization > 0.9: + warnings.append( + f"{segment.segment_id} uses {utilization:.0%} of scratch size" + ) + + if segment.estimated_bytes < max_segment_bytes * 0.1: + warnings.append( + f"{segment.segment_id} is very small " + f"({utilization:.0%} of scratch)" + ) + + segments.append( + SegmentSummary( + segment_id=segment.segment_id, + start_ts_ns=segment.start_ts_ns, + end_ts_ns=segment.end_ts_ns, + estimated_bytes=segment.estimated_bytes, + file_count=len(segment.files), + sweep_count=len(segment.sweeps), + scratch_utilization=utilization, + ) + ) + + return ExperimentSummary( + experiment_id=plan.experiment_id, + segment_count=len(plan.segments), + sweeps_per_segment=sweeps_per_segment, + total_backtests=total_backtests, + max_segment_bytes=max_segment_bytes, + segments=segments, + warnings=warnings, + ) + + +# --------------------------------------------------------------------------- +# Pretty printer +# --------------------------------------------------------------------------- + +def print_experiment_summary(summary: ExperimentSummary) -> None: + max_gb = summary.max_segment_bytes / 1024**3 + + print(f"Experiment: {summary.experiment_id}") + print(f"Segments: {summary.segment_count}") + print(f"Sweeps per segment: {summary.sweeps_per_segment}") + print(f"Total backtests: {summary.total_backtests}") + print(f"Max segment size: {max_gb:.2f} GB") + print() + + if summary.warnings: + print("Warnings:") + for w in summary.warnings: + print(f" - {w}") + print() + + print("Segments:") + for s in summary.segments: + used_gb = s.estimated_bytes / 1024**3 + print( + f" - {s.segment_id}: " + f"{s.file_count} files | " + f"{used_gb:.2f} / {max_gb:.2f} GB | " + f"{s.sweep_count} sweeps | " + f"{s.scratch_utilization:.0%} scratch" + ) diff --git a/trading_runtime/backtest/orchestrator/sweeps.py b/trading_runtime/backtest/orchestrator/sweeps.py new file mode 100644 index 0000000..7eb2960 --- /dev/null +++ b/trading_runtime/backtest/orchestrator/sweeps.py @@ -0,0 +1,83 @@ +""" +Parameter sweep utilities. + +This module provides helpers to expand parameter specifications into +concrete sweep plans. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from itertools import product +from typing import Any, Iterable + + +@dataclass(frozen=True, slots=True) +class RangeSpec: + """ + Numeric range specification used for parameter sweeps. + """ + + start: float + stop: float + step: float + + +@dataclass(frozen=True, slots=True) +class SweepPlan: + """ + Concrete parameter sweep configuration. + """ + + sweep_id: str + parameters: dict[str, Any] + + +def expand_ranges(spec: dict[str, Any]) -> dict[str, list[Any]]: + """ + Expand range and iterable specifications into explicit value lists. + """ + + expanded: dict[str, list[Any]] = {} + + for key, value in spec.items(): + if isinstance(value, RangeSpec): + values: list[Any] = [] + current = value.start + + # Add small epsilon to avoid floating point termination issues + while current <= value.stop + 1e-12: + values.append(round(current, 10)) + current += value.step + + expanded[key] = values + continue + + if isinstance(value, Iterable) and not isinstance(value, (str, bytes)): + expanded[key] = list(value) + continue + + expanded[key] = [value] + + return expanded + + +def expand_parameter_grid(grid: dict[str, list[Any]]) -> list[SweepPlan]: + """ + Generate all parameter combinations from a parameter grid. + """ + + if not grid: + return [SweepPlan("sweep_0000", {})] + + keys = sorted(grid.keys()) + values = [grid[key] for key in keys] + + sweeps: list[SweepPlan] = [] + + for index, combination in enumerate(product(*values)): + parameters = dict(zip(keys, combination, strict=True)) + sweep_id = f"sweep_{index:04d}" + sweeps.append(SweepPlan(sweep_id, parameters)) + + return sweeps diff --git a/trading_runtime/backtest/runtime/__init__.py b/trading_runtime/backtest/runtime/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/trading_runtime/backtest/runtime/context.py b/trading_runtime/backtest/runtime/context.py new file mode 100644 index 0000000..e9301bf --- /dev/null +++ b/trading_runtime/backtest/runtime/context.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +from dataclasses import dataclass +from datetime import datetime +from pathlib import Path +from typing import Mapping + + +@dataclass(frozen=True, slots=True) +class ExperimentContext: + experiment_id: str + + expected_segments: int + completed_segments: int + failed_segments: int + + experiment_started_at: datetime + + scratch_root: Path + + def __post_init__(self) -> None: + object.__setattr__(self, "scratch_root", Path(self.scratch_root)) + + @property + def scratch_experiment_dir(self) -> Path: + return self.scratch_root / self.experiment_id + + +@dataclass(frozen=True, slots=True) +class SegmentContext: + experiment_id: str + segment_id: str + + expected_sweeps: int + completed_sweeps: int + failed_sweeps: int + + segment_started_at: datetime + + scratch_root: Path + + def __post_init__(self) -> None: + object.__setattr__(self, "scratch_root", Path(self.scratch_root)) + + @property + def scratch_segment_dir(self) -> Path: + return ( + self.scratch_root + / self.experiment_id + / self.segment_id + ) + + +@dataclass(frozen=True, slots=True) +class SweepContext: + """ + Immutable runtime context for a single backtest sweep. + + One SweepContext == one Pod == one backtest execution. + """ + + # Identity + experiment_id: str + segment_id: str + sweep_id: str + + # Data + stage: str + venue: str + datatype: str + symbol: str + file_keys: tuple[str, ...] + + # Parameters + parameters: Mapping[str, object] + + # Runtime paths + scratch_root: Path + results_root: Path + + def __post_init__(self) -> None: + """ + Normalize runtime paths after JSON deserialization. + + JSON has no Path type, so scratch_root / results_root + may arrive as strings in worker pods. + """ + object.__setattr__(self, "scratch_root", Path(self.scratch_root)) + object.__setattr__(self, "results_root", Path(self.results_root)) + + @property + def scratch_segment_dir(self) -> Path: + return ( + self.scratch_root + / self.experiment_id + / self.segment_id + ) + + @property + def scratch_data_dir(self) -> Path: + return self.scratch_segment_dir / "data" + + @property + def scratch_results_dir(self) -> Path: + return self.scratch_segment_dir / "results" / self.sweep_id diff --git a/trading_runtime/backtest/runtime/entrypoint.py b/trading_runtime/backtest/runtime/entrypoint.py new file mode 100644 index 0000000..22e49cf --- /dev/null +++ b/trading_runtime/backtest/runtime/entrypoint.py @@ -0,0 +1,268 @@ +from __future__ import annotations + +import argparse +import json +import sys +from dataclasses import asdict +from pathlib import Path +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from trading_runtime.backtest.orchestrator.planner_models import ExperimentPlan + +from trading_runtime.backtest.orchestrator.planner import plan_experiment +from trading_runtime.backtest.orchestrator.s3_manifest import S3DatasetManifest +from trading_runtime.backtest.orchestrator.summary import ( + print_experiment_summary, + summarize_experiment, +) +from trading_runtime.backtest.orchestrator.sweeps import RangeSpec +from trading_runtime.backtest.runtime.context import SweepContext + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + +def _load_json(path: Path) -> dict[str, Any]: + if not path.exists(): + raise FileNotFoundError(path) + return json.loads(path.read_text(encoding="utf-8")) + + +def _parse_sweep_spec(raw: dict[str, Any]) -> dict[str, Any]: + """ + Same semantics as your planner CLI: + dict -> RangeSpec or explicit lists + """ + parsed: dict[str, Any] = {} + for key, value in raw.items(): + if isinstance(value, dict): + parsed[key] = RangeSpec( + start=value["start"], + stop=value["stop"], + step=value["step"], + ) + else: + parsed[key] = value + return parsed + + +def _emit_sweep_context( + *, + plan: ExperimentPlan, + base_cfg: dict[str, Any], + scratch_root: Path, + results_root: Path, + out_dir: Path, +) -> None: + """ + Emit one SweepContext JSON per sweep. + These JSON files are what Argo consumes. + """ + out_dir.mkdir(parents=True, exist_ok=True) + + experiment: dict = base_cfg["experiment"] + stage: str = experiment.get("stage", "derived") + venue: str = experiment["venue"] + datatype: str = experiment["datatype"] + symbol: str = experiment["symbol"] + + for segment in plan.segments: + for sweep in segment.sweeps: + ctx = SweepContext( + experiment_id=plan.experiment_id, + segment_id=segment.segment_id, + sweep_id=sweep.sweep_id, + stage=stage, + venue=venue, + datatype=datatype, + symbol=symbol, + file_keys=tuple(segment.files), + parameters={ + # pass through full engine/strategy/risk blocks + "engine": base_cfg["engine"], + "strategy": base_cfg["strategy"], + "risk": base_cfg["risk"], + # plus sweep-specific parameters + "sweep": sweep.parameters, + }, + scratch_root=scratch_root, + results_root=results_root, + ) + + out_path = out_dir / f"{segment.segment_id}__{sweep.sweep_id}.json" + out_path.write_text( + json.dumps(asdict(ctx), indent=2, default=str), + encoding="utf-8", + ) + + +# --------------------------------------------------------------------------- +# Main +# --------------------------------------------------------------------------- + +def main() -> None: + parser = argparse.ArgumentParser( + description="Backtest entrypoint (plan or run via sweep fan-out)" + ) + + parser.add_argument( + "--config", + type=Path, + required=True, + help="Path to experiment JSON config (inside image or mounted).", + ) + + parser.add_argument( + "--plan", + action="store_true", + help="Plan experiment and print summary (no execution).", + ) + + parser.add_argument( + "--run", + action="store_true", + help="Plan experiment and emit sweep contexts for execution.", + ) + + parser.add_argument( + "--scratch-root", + type=Path, + default=Path("/mnt/scratch"), + help="Root directory for scratch volume.", + ) + + parser.add_argument( + "--results-root", + type=Path, + default=Path("/results"), + help="Logical results root (used for context only).", + ) + + parser.add_argument( + "--emit-dir", + type=Path, + default=Path("/mnt/scratch/sweeps"), + help="Directory where SweepContext JSONs are emitted.", + ) + + args = parser.parse_args() + + if not args.plan and not args.run: + print("Error: one of --plan or --run must be specified.", file=sys.stderr) + sys.exit(2) + + # ------------------------------------------------------------------ + # Load config + # ------------------------------------------------------------------ + + cfg = _load_json(args.config) + + experiment_id: str = cfg["id"] + experiment_cfg = cfg["experiment"] + + start_ts_ns: int = experiment_cfg["start_ts_ns"] + end_ts_ns: int = experiment_cfg["end_ts_ns"] + symbol: str = experiment_cfg["symbol"] + venue: str = experiment_cfg["venue"] + datatype: str = experiment_cfg["datatype"] + + segmentation: dict = experiment_cfg.get("segmentation", {}) + max_segment_gb: float = segmentation.get("max_segment_gb", 100) + max_segment_bytes = max_segment_gb * 1024**3 + + sweep_spec = _parse_sweep_spec(experiment_cfg.get("sweeps", {})) + + manifest = S3DatasetManifest( + bucket="data", + stage=experiment_cfg.get("stage", "derived"), + ) + + # ------------------------------------------------------------------ + # Planning + # ------------------------------------------------------------------ + + plan = plan_experiment( + experiment_id=experiment_id, + start_ts_ns=start_ts_ns, + end_ts_ns=end_ts_ns, + symbol=symbol, + venue=venue, + datatype=datatype, + sweep_spec=sweep_spec, + manifest=manifest, + max_segment_bytes=max_segment_bytes, + ) + + summary = summarize_experiment( + plan=plan, + max_segment_bytes=max_segment_bytes, + ) + + # Always show the plan (this is what you want in Argo logs) + print_experiment_summary(summary) + + if args.plan and not args.run: + # Plan-only mode: exit after printing + return + + # ------------------------------------------------------------------ + # Run preparation (emit sweep contexts) + # ------------------------------------------------------------------ + + index: list[str] = [] + segments_index: list[dict[str, object]] = [] + + out_dir: Path = args.emit_dir + out_dir.mkdir(parents=True, exist_ok=True) + + (out_dir / "experiment_id.txt").write_text( + plan.experiment_id, + encoding="utf-8", + ) + + expected_segments = len(plan.segments) + + (out_dir / "expected_segments.txt").write_text( + str(expected_segments), + encoding="utf-8", + ) + + _emit_sweep_context( + plan=plan, + base_cfg=cfg, + scratch_root=args.scratch_root, + results_root=args.results_root, + out_dir=out_dir, + ) + + for segment in plan.segments: + segments_index.append( + { + "segment_id": segment.segment_id, + "expected_sweeps": len(segment.sweeps), + } + ) + + (out_dir / "segments.json").write_text( + json.dumps(segments_index, indent=2), + encoding="utf-8", + ) + + for segment in plan.segments: + for sweep in segment.sweeps: + out_path = out_dir / f"{segment.segment_id}__{sweep.sweep_id}.json" + index.append(str(out_path)) + + (out_dir / "index.json").write_text( + json.dumps(index, indent=2), + encoding="utf-8", + ) + + print() + print(f"Emitted sweep contexts to: {args.emit_dir}") + print("Each JSON represents exactly one sweep (one Pod).") + + +if __name__ == "__main__": + main() diff --git a/trading_runtime/backtest/runtime/experiment_finalize_entrypoint.py b/trading_runtime/backtest/runtime/experiment_finalize_entrypoint.py new file mode 100644 index 0000000..6f24a43 --- /dev/null +++ b/trading_runtime/backtest/runtime/experiment_finalize_entrypoint.py @@ -0,0 +1,197 @@ +from __future__ import annotations + +import argparse +import json +import logging +import os +import shutil +from datetime import datetime, timezone +from pathlib import Path + +from trading_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim +from trading_runtime.backtest.runtime.context import ExperimentContext +from trading_runtime.backtest.runtime.prometheus_metrics import PrometheusMetricsClient + +LOGGER = logging.getLogger(__name__) + + +class ExperimentFinalizer: + """ + Finalizes an experiment after all segments have completed. + + Responsibilities: + - write experiment_metadata.json + - write _DONE marker + """ + + def finalize(self, *, ctx: ExperimentContext) -> None: + finished_at = datetime.now(timezone.utc) + + status = "success" + if ctx.failed_segments > 0: + status = "failed" + + metadata = { + "schema_version": "1.0", + "identity": { + "experiment_id": ctx.experiment_id, + }, + "lifecycle": { + "status": status, + "started_at": ctx.experiment_started_at.isoformat(), + "finished_at": finished_at.isoformat(), + "duration_seconds": ( + finished_at - ctx.experiment_started_at + ).total_seconds(), + }, + "segments": { + "expected": ctx.expected_segments, + "completed": ctx.completed_segments, + "failed": ctx.failed_segments, + }, + } + + experiment_dir = ctx.scratch_experiment_dir + experiment_dir.mkdir(parents=True, exist_ok=True) + + (experiment_dir / "experiment_metadata.json").write_text( + json.dumps(metadata, indent=2), + encoding="utf-8", + ) + + (experiment_dir / "_DONE").write_text( + finished_at.isoformat(), + encoding="utf-8", + ) + + # --- Prometheus metrics (side-effect only) --- + metrics = PrometheusMetricsClient() + + if metrics.is_enabled(): + try: + labels = { + "experiment_id": ctx.experiment_id, + "status": status, + } + + metrics.push_gauge( + name="backtest_experiment_duration_seconds", + value=metadata["lifecycle"]["duration_seconds"], + labels=labels, + ) + + metrics.push_gauge( + name="backtest_experiment_completed_segments", + value=float(ctx.completed_segments), + labels=labels, + ) + + metrics.push_gauge( + name="backtest_experiment_failed_segments", + value=float(ctx.failed_segments), + labels=labels, + ) + + metrics.push_all(job="backtest_experiment") + + except Exception: + LOGGER.exception("Prometheus push failed") + + +class ExperimentMetadataPersister: + def __init__( + self, + *, + bucket: str, + prefix: str = "backtests", + ) -> None: + self._s3 = OCIObjectStorageS3Shim(region="eu-frankfurt-1") + self._bucket = bucket + self._prefix = prefix + + def persist( + self, + *, + experiment_id: str, + experiment_dir: Path, + ) -> None: + prefix = f"{self._prefix}/{experiment_id}" + + for name in ("experiment_metadata.json", "_DONE"): + path = experiment_dir / name + if not path.exists(): + continue + + with path.open("rb") as fh: + self._s3.put_object( + bucket=self._bucket, + key=f"{prefix}/{name}", + body=fh, + ) + + +def _cleanup_scratch(*, experiment_id: str, scratch_root: Path) -> None: + """ + Remove all scratch data for this workflow + experiment. + This is safe to call ONLY after successful finalization. + """ + + workflow_uid = os.environ.get("ARGO_WORKFLOW_UID") + if not workflow_uid: + raise RuntimeError("ARGO_WORKFLOW_UID is not set") + + sweeps_dir = scratch_root / "sweeps" / workflow_uid + experiment_dir = scratch_root / experiment_id + + if sweeps_dir.exists(): + shutil.rmtree(sweeps_dir) + + if experiment_dir.exists(): + shutil.rmtree(experiment_dir) + + +def main() -> None: + parser = argparse.ArgumentParser("finalize experiment") + + parser.add_argument("--experiment-id", type=str, required=True) + + parser.add_argument("--expected-segments", type=int, required=True) + parser.add_argument("--completed-segments", type=int, required=True) + parser.add_argument("--failed-segments", type=int, required=True) + + parser.add_argument( + "--experiment-started-at", + type=str, + required=True, + help="ISO-8601 timestamp (UTC)", + ) + + parser.add_argument("--scratch-root", type=Path, required=True) + + args = parser.parse_args() + + ctx = ExperimentContext( + experiment_id=args.experiment_id, + expected_segments=args.expected_segments, + completed_segments=args.completed_segments, + failed_segments=args.failed_segments, + experiment_started_at=datetime.fromisoformat(args.experiment_started_at), + scratch_root=args.scratch_root, + ) + + ExperimentFinalizer().finalize(ctx=ctx) + + persister = ExperimentMetadataPersister(bucket="data") + persister.persist( + experiment_id=ctx.experiment_id, + experiment_dir=ctx.scratch_experiment_dir, + ) + + _cleanup_scratch( + experiment_id=ctx.experiment_id, + scratch_root=ctx.scratch_root, + ) + + +if __name__ == "__main__": + main() diff --git a/trading_runtime/backtest/runtime/mlflow_segment_logger.py b/trading_runtime/backtest/runtime/mlflow_segment_logger.py new file mode 100644 index 0000000..3ad7326 --- /dev/null +++ b/trading_runtime/backtest/runtime/mlflow_segment_logger.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import logging +import os +from typing import TYPE_CHECKING + +import mlflow + +if TYPE_CHECKING: + from trading_runtime.backtest.runtime.context import SegmentContext + +LOGGER = logging.getLogger(__name__) + + +class MlflowSegmentLogger: + """Logs segment-level health & progress information to MLflow. + + Tracking is configured via environment variables (recommended for Kubernetes): + - MLFLOW_TRACKING_URI: HTTP(S) address of the MLflow tracking server. + Example: http://mlflow.ml.svc.cluster.local:5000 + + This logger is best-effort. Callers should catch exceptions and continue. + """ + + def __init__(self) -> None: + tracking_uri = os.environ.get("MLFLOW_TRACKING_URI") + if tracking_uri: + mlflow.set_tracking_uri(tracking_uri) + + def log( + self, + *, + ctx: SegmentContext, + duration_seconds: float, + status: str, + ) -> None: + """Log segment metadata as MLflow parameters/metrics/tags.""" + + # mlflow.set_experiment creates the experiment if it does not exist and + # avoids an explicit get/create race. + mlflow.set_experiment(ctx.experiment_id) + + with mlflow.start_run(run_name=ctx.segment_id): + # Parameters (stable, comparable) + mlflow.log_param("expected_sweeps", ctx.expected_sweeps) + mlflow.log_param("completed_sweeps", ctx.completed_sweeps) + mlflow.log_param("failed_sweeps", ctx.failed_sweeps) + + # Metrics + mlflow.log_metric("duration_seconds", duration_seconds) + + # Tags (UI / filtering) + mlflow.set_tag("status", status) + mlflow.set_tag("experiment_id", ctx.experiment_id) + mlflow.set_tag("segment_id", ctx.segment_id) + + LOGGER.info( + "MLflow segment log submitted", + extra={ + "experiment_id": ctx.experiment_id, + "segment_id": ctx.segment_id, + "status": status, + }, + ) \ No newline at end of file diff --git a/trading_runtime/backtest/runtime/prometheus_metrics.py b/trading_runtime/backtest/runtime/prometheus_metrics.py new file mode 100644 index 0000000..39ccda9 --- /dev/null +++ b/trading_runtime/backtest/runtime/prometheus_metrics.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +import json +import logging +import os + +from prometheus_client import CollectorRegistry, Gauge, push_to_gateway + +LOGGER = logging.getLogger(__name__) + + +class PrometheusMetricsClient: + """Minimal Prometheus Pushgateway client for batch-style jobs. + + Expected environment: + - PROMETHEUS_PUSHGATEWAY_URL: URL to the Pushgateway. + Example: http://pushgateway.monitoring.svc.cluster.local:9091 + + Optional: + - PROMETHEUS_PUSHGATEWAY_GROUPING_KEY_JSON: JSON object used as grouping key. + If not set, metrics are grouped only by the 'job' argument, which often + causes pushes from different pods to overwrite each other. + + Example: + {"workflow_uid": "ARGO_WORKFLOW_UID"} + + This client is intentionally best-effort: callers should treat it as a + side-effect and never fail the workflow because of metrics delivery. + """ + + def __init__(self) -> None: + self._pushgateway_url = os.environ.get("PROMETHEUS_PUSHGATEWAY_URL") + self._grouping_key = self._load_grouping_key() + self._registry = CollectorRegistry() + + def is_enabled(self) -> bool: + return self._pushgateway_url is not None + + @staticmethod + def _load_grouping_key() -> dict[str, str]: + raw = os.environ.get("PROMETHEUS_PUSHGATEWAY_GROUPING_KEY_JSON") + if not raw: + return {} + + try: + data = json.loads(raw) + except json.JSONDecodeError: + LOGGER.warning( + "Invalid PROMETHEUS_PUSHGATEWAY_GROUPING_KEY_JSON; ignoring" + ) + return {} + + if not isinstance(data, dict): + return {} + + grouping: dict[str, str] = {} + for key, value in data.items(): + if isinstance(key, str) and isinstance(value, str): + grouping[key] = value + return grouping + + def push_gauge( + self, + *, + name: str, + value: float, + labels: dict[str, str], + ) -> None: + if not self._pushgateway_url: + return + + gauge = Gauge( + name, + documentation=name, + labelnames=list(labels.keys()), + registry=self._registry, + ) + + gauge.labels(**labels).set(value) + + def push_all(self, *, job: str) -> None: + if not self._pushgateway_url: + return + + push_to_gateway( + gateway=self._pushgateway_url, + job=job, + registry=self._registry, + grouping_key=self._grouping_key, + ) + + LOGGER.info( + "Prometheus metrics pushed", + extra={"job": job, "grouping_key": self._grouping_key}, + ) diff --git a/trading_runtime/backtest/runtime/run_sweep.py b/trading_runtime/backtest/runtime/run_sweep.py new file mode 100644 index 0000000..37ebfc5 --- /dev/null +++ b/trading_runtime/backtest/runtime/run_sweep.py @@ -0,0 +1,496 @@ +from __future__ import annotations + +import argparse +import importlib.metadata +import json +import os +import platform +import shutil +import sys +import tomllib +from dataclasses import replace +from datetime import datetime, timezone +from pathlib import Path +from typing import Any + +from trading_runtime.backtest.engine.hft_engine import ( + HftBacktestConfig, + HftBacktestEngine, + HftEngineConfig, +) +from trading_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim +from trading_runtime.backtest.runtime.context import SweepContext +from trading_framework.core.risk.risk_config import RiskConfig +from trading_framework.strategies.strategy_config import StrategyConfig + + +class SweepMaterializer: + """ + Materializes sweep input data from S3 into a local scratch directory. + """ + + def __init__( + self, + *, + bucket: str, + ) -> None: + self._s3 = OCIObjectStorageS3Shim(region="eu-frankfurt-1") + self._bucket = bucket + + def materialize(self, ctx: SweepContext) -> None: + """ + Ensure all input files for the sweep are present locally. + + This operation is idempotent. + """ + data_dir = ctx.scratch_data_dir + ready_marker = data_dir / "_READY" + + if ready_marker.exists(): + return + + data_dir.mkdir(parents=True, exist_ok=True) + + for key in ctx.file_keys: + filename = Path(key).name + target_path = data_dir / filename + + if target_path.exists(): + continue + + self._s3.download_to_file( + bucket=self._bucket, + key=key, + destination=target_path, + ) + + ready_marker.touch() + + +class SweepEngineRunner: + """ + Runs exactly one HFT backtest sweep. + + One runner instance == one sweep == one engine.run(). + """ + + def __init__( + self, + *, + engine_cfg: HftEngineConfig, + strategy_cfg: StrategyConfig, + risk_cfg: RiskConfig, + ) -> None: + self._engine_cfg = engine_cfg + self._strategy_cfg = strategy_cfg + self._risk_cfg = risk_cfg + + def run(self, ctx: SweepContext) -> dict[str, Any]: + """ + Execute the backtest for this sweep. + + Returns lightweight metadata about the run. + """ + results_dir = ctx.scratch_results_dir + results_dir.mkdir(parents=True, exist_ok=True) + + # IMPORTANT: + # Engine expects a FIXED list of local file paths. + data_files = [ + str(ctx.scratch_data_dir / Path(key).name) + for key in ctx.file_keys + ] + + engine_cfg = self._build_engine_cfg(data_files, results_dir) + + # Defensive: numpy will not create parent directories for output files. + Path(engine_cfg.stats_npz_path).parent.mkdir(parents=True, exist_ok=True) + + backtest_cfg = HftBacktestConfig( + # Keep IDs filesystem-safe. Some engines/libraries may use the ID + # as part of output paths. + id=f"{ctx.experiment_id}__{ctx.segment_id}__{ctx.sweep_id}", + description="sweep execution", + engine_cfg=engine_cfg, + strategy_cfg=self._strategy_cfg, + risk_cfg=self._risk_cfg, + ) + + engine = HftBacktestEngine(backtest_cfg) + + # Ensure any relative writes performed by the engine end up inside the + # scratch subtree of this sweep. + previous_cwd = Path.cwd() + try: + os.chdir(ctx.scratch_segment_dir) + result = engine.run() + finally: + os.chdir(previous_cwd) + + done_marker = ctx.scratch_results_dir / "_DONE" + done_marker.touch() + + return { + "experiment_id": ctx.experiment_id, + "segment_id": ctx.segment_id, + "sweep_id": ctx.sweep_id, + "stats_file": result.stats_file, + "extra_metadata": result.extra_metadata, + } + + def _build_engine_cfg( + self, + data_files: list[str], + results_dir: Path, + ) -> HftEngineConfig: + """ + Clone the base engine config and inject sweep-specific paths. + """ + cfg = replace(self._engine_cfg) + + # THIS is the critical binding to the engine semantics + cfg.data_files = data_files + cfg.stats_npz_path = str(results_dir / "stats.npz") + cfg.event_bus_path = str(results_dir / "events.jsonl") + + return cfg + + +class SweepMetadataWriter: + """Writes immutable metadata.json for a completed sweep.""" + + def __init__(self, *, runner: str) -> None: + self._runner = runner + + @staticmethod + def _read_pyproject_project_info(pyproject_path: Path) -> tuple[str | None, str | None]: + """Read [project] name/version from pyproject.toml. + + This is used as a fallback when the project is executed from source without + being installed as a distribution (importlib.metadata won't find it). + """ + + try: + raw = pyproject_path.read_bytes() + except OSError: + return (None, None) + + try: + data = tomllib.loads(raw.decode("utf-8")) + except (UnicodeDecodeError, tomllib.TOMLDecodeError): + return (None, None) + + if "project" not in data: + return (None, None) + + project = data["project"] + name = project["name"] if isinstance(project, dict) and "name" in project else None + version = project["version"] if isinstance(project, dict) and "version" in project else None + + if not isinstance(name, str): + name = None + if not isinstance(version, str): + version = None + + return (name, version) + + @staticmethod + def _guess_repo_root(start: Path) -> Path | None: + """Walk upwards until pyproject.toml is found.""" + + current = start + for _ in range(20): + candidate = current / "pyproject.toml" + if candidate.exists(): + return current + if current.parent == current: + return None + current = current.parent + return None + + @classmethod + def _resolve_project_metadata(cls) -> dict[str, str | None]: + """Resolve project name/version without failing the sweep.""" + + repo_root = cls._guess_repo_root(Path(__file__).resolve()) + pyproject_path = (repo_root / "pyproject.toml") if repo_root is not None else None + + name_from_pyproject: str | None = None + version_from_pyproject: str | None = None + + if pyproject_path is not None: + name_from_pyproject, version_from_pyproject = cls._read_pyproject_project_info( + pyproject_path + ) + + distribution_name = name_from_pyproject or "trading-framework" + + version: str | None + source: str + try: + version = importlib.metadata.version(distribution_name) + source = "importlib.metadata" + except importlib.metadata.PackageNotFoundError: + version = version_from_pyproject + source = "pyproject.toml" if version is not None else "unknown" + + return { + "name": distribution_name, + "version": version, + "source": source, + } + + def write( + self, + *, + ctx: SweepContext, + status: str, + started_at: datetime, + finished_at: datetime, + ) -> None: + duration_seconds = (finished_at - started_at).total_seconds() + + project_meta = self._resolve_project_metadata() + + metadata = { + "schema_version": "1.0", + "identity": { + "experiment_id": ctx.experiment_id, + "segment_id": ctx.segment_id, + "sweep_id": ctx.sweep_id, + }, + "lifecycle": { + "status": status, + "started_at": started_at.isoformat(), + "finished_at": finished_at.isoformat(), + "duration_seconds": duration_seconds, + "runner": self._runner, + }, + "parameters": ctx.parameters, + "code": { + "git": { + "commit": os.environ.get("GIT_COMMIT"), + "dirty": os.environ.get("GIT_DIRTY") == "1", + "branch": os.environ.get("GIT_BRANCH"), + }, + "project": { + "name": project_meta["name"], + "version": project_meta["version"], + "version_source": project_meta["source"], + }, + }, + "environment": { + "python": sys.version.split()[0], + "framework": platform.platform(), + "container_image": os.environ.get("IMAGE_TAG"), + }, + "artifacts": { + "stats": "stats.npz", + "events": "events.jsonl", + }, + "links": {}, + } + + target = ctx.scratch_results_dir / "sweep_metadata.json" + target.write_text(json.dumps(metadata, indent=2), encoding="utf-8") + + +class SweepResultPersister: + """ + Persists sweep results from scratch to S3. + + Upload is atomic at sweep level via a _DONE marker. + """ + + def __init__( + self, + *, + bucket: str, + prefix: str = "backtests", + ) -> None: + self._s3 = OCIObjectStorageS3Shim(region="eu-frankfurt-1") + self._bucket = bucket + self._prefix = prefix.rstrip("/") + + def persist(self, ctx: SweepContext) -> None: + results_dir = ctx.scratch_results_dir + done_marker = results_dir / "_DONE" + + if not results_dir.exists(): + raise RuntimeError(f"Results directory does not exist: {results_dir}") + + if not done_marker.exists(): + raise RuntimeError( + f"Sweep results not finalized (_DONE missing): {results_dir}" + ) + + s3_base = self._s3_base_scratch_prefix(ctx) + + for path in results_dir.iterdir(): + if path.is_dir(): + continue + + key = f"{s3_base}/{path.name}" + self._upload_file(path, key) + + def _upload_file(self, path: Path, key: str) -> None: + with path.open("rb") as fh: + self._s3.put_object( + bucket=self._bucket, + key=key, + body=fh, + ) + + def _s3_base_scratch_prefix(self, ctx: SweepContext) -> str: + return ( + f"{self._prefix}/" + f"{ctx.experiment_id}/" + f"{ctx.segment_id}/" + f"{ctx.sweep_id}" + ) + + +class SweepCleaner: + """ + Handles safe cleanup of sweep scratch directories. + + Invariant: + - Only sweep-private state may be removed during parallel execution. + - Segment-level directories are shared across sweeps and must not be + deleted by a single sweep. + + Cleanup is allowed ONLY after successful persistence. + """ + + def __init__(self, *, keep_scratch: bool) -> None: + self._keep_scratch = keep_scratch + + def cleanup(self, ctx: SweepContext) -> None: + """ + Remove the sweep's private scratch subtree. + + This deletes only: + + ///results// + + It intentionally does NOT delete the segment directory itself, since that + directory is shared by all sweeps in the segment (parallel execution). + """ + if self._keep_scratch: + return + + sweep_results_dir = ctx.scratch_results_dir + if not sweep_results_dir.exists(): + return + + self._validate_target(ctx, sweep_results_dir) + shutil.rmtree(sweep_results_dir) + + @staticmethod + def _validate_target(ctx: SweepContext, target_dir: Path) -> None: + """ + Guard rails against accidental deletion of shared directories. + + This method raises if the computed target does not match the expected + sweep results layout. + """ + if target_dir.name != ctx.sweep_id: + raise RuntimeError( + "Refusing to delete: target_dir does not match sweep_id " + f"({target_dir} vs {ctx.sweep_id})" + ) + + if target_dir.parent.name != "results": + raise RuntimeError( + "Refusing to delete: target_dir is not under a 'results' folder " + f"({target_dir})" + ) + + segment_dir = ctx.scratch_segment_dir + try: + resolved_target = target_dir.resolve() + resolved_segment = segment_dir.resolve() + except FileNotFoundError: + # If a parent directory was removed concurrently, treat as no-op. + return + + if not resolved_target.is_relative_to(resolved_segment): + raise RuntimeError( + "Refusing to delete: target_dir is outside scratch_segment_dir " + f"({resolved_target} not under {resolved_segment})" + ) + + +def main() -> None: + parser = argparse.ArgumentParser("run single backtest sweep") + parser.add_argument("--context", type=Path, required=True) + parser.add_argument("--scratch-root", type=Path, required=True) + args = parser.parse_args() + + # ------------------------------------------------------------------ + # Load sweep context + # ------------------------------------------------------------------ + + if not args.context.exists(): + raise FileNotFoundError( + f"SweepContext file does not exist: {args.context}. " + "Ensure it is mounted as an Argo artifact." + ) + + ctx = SweepContext(**json.loads(args.context.read_text(encoding="utf-8"))) + ctx = replace(ctx, scratch_root=args.scratch_root) + + # ------------------------------------------------------------------ + # Setup + # ------------------------------------------------------------------ + + materializer = SweepMaterializer(bucket="data") + materializer.materialize(ctx) + + engine_cfg = HftEngineConfig(**ctx.parameters["engine"]) + strategy_cfg = StrategyConfig(**ctx.parameters["strategy"]) + risk_cfg = RiskConfig(**ctx.parameters["risk"]) + + runner = SweepEngineRunner( + engine_cfg=engine_cfg, + strategy_cfg=strategy_cfg, + risk_cfg=risk_cfg, + ) + + persister = SweepResultPersister(bucket="data") + + metadata_writer = SweepMetadataWriter(runner="argo") + cleaner = SweepCleaner(keep_scratch=False) + + # ------------------------------------------------------------------ + # Execute sweep + # ------------------------------------------------------------------ + + started_at = datetime.now(timezone.utc) + status = "success" + + try: + print(runner.run(ctx)) + except Exception: + status = "failed" + raise + else: + finished_at = datetime.now(timezone.utc) + + # Metadata is ALWAYS written + metadata_writer.write( + ctx=ctx, + status=status, + started_at=started_at, + finished_at=finished_at, + ) + + # Persist results ONLY on success + persister.persist(ctx) + finally: + # Sweep-level cleanup is ALWAYS allowed + cleaner.cleanup(ctx) + + +if __name__ == "__main__": + main() diff --git a/trading_runtime/backtest/runtime/segment_finalize_entrypoint.py b/trading_runtime/backtest/runtime/segment_finalize_entrypoint.py new file mode 100644 index 0000000..063de9a --- /dev/null +++ b/trading_runtime/backtest/runtime/segment_finalize_entrypoint.py @@ -0,0 +1,194 @@ +from __future__ import annotations + +import argparse +import json +import logging +from datetime import datetime, timezone +from pathlib import Path + +from trading_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim +from trading_runtime.backtest.runtime.context import SegmentContext +from trading_runtime.backtest.runtime.mlflow_segment_logger import MlflowSegmentLogger +from trading_runtime.backtest.runtime.prometheus_metrics import PrometheusMetricsClient + +LOGGER = logging.getLogger(__name__) + + +class SegmentFinalizer: + """ + Finalizes a segment after all sweeps have completed. + + Responsibilities: + - write segment_metadata.json + - write _DONE marker + """ + + def finalize( + self, + *, + ctx: SegmentContext, + ) -> None: + finished_at = datetime.now(timezone.utc) + + status = "success" + if ctx.failed_sweeps > 0: + status = "failed" + + metadata = { + "schema_version": "1.0", + "identity": { + "experiment_id": ctx.experiment_id, + "segment_id": ctx.segment_id, + }, + "lifecycle": { + "status": status, + "started_at": ctx.segment_started_at.isoformat(), + "finished_at": finished_at.isoformat(), + "duration_seconds": ( + finished_at - ctx.segment_started_at + ).total_seconds(), + }, + "sweeps": { + "expected": ctx.expected_sweeps, + "completed": ctx.completed_sweeps, + "failed": ctx.failed_sweeps, + }, + } + + segment_dir = ctx.scratch_segment_dir + segment_dir.mkdir(parents=True, exist_ok=True) + + (segment_dir / "segment_metadata.json").write_text( + json.dumps(metadata, indent=2), + encoding="utf-8", + ) + + (segment_dir / "_DONE").write_text( + finished_at.isoformat(), + encoding="utf-8", + ) + + # --- MLflow logging (side-effect only) --- + try: + MlflowSegmentLogger().log( + ctx=ctx, + duration_seconds=metadata["lifecycle"]["duration_seconds"], + status=status, + ) + except Exception: + LOGGER.exception("MLflow logging failed") + + # --- Prometheus metrics (side-effect only) --- + metrics = PrometheusMetricsClient() + + if metrics.is_enabled(): + try: + labels = { + "experiment_id": ctx.experiment_id, + "segment_id": ctx.segment_id, + "status": status, + } + + metrics.push_gauge( + name="backtest_segment_duration_seconds", + value=metadata["lifecycle"]["duration_seconds"], + labels=labels, + ) + + metrics.push_gauge( + name="backtest_segment_completed_sweeps", + value=float(ctx.completed_sweeps), + labels=labels, + ) + + metrics.push_gauge( + name="backtest_segment_failed_sweeps", + value=float(ctx.failed_sweeps), + labels=labels, + ) + + metrics.push_all(job="backtest_segment") + + except Exception: + LOGGER.exception("Prometheus push failed") + + +class SegmentMetadataPersister: + def __init__( + self, + *, + bucket: str, + prefix: str = "backtests", + ) -> None: + self._s3 = OCIObjectStorageS3Shim(region="eu-frankfurt-1") + self._bucket = bucket + self._prefix = prefix + + def persist( + self, + *, + experiment_id: str, + segment_id: str, + segment_dir: Path, + ) -> None: + prefix = f"{self._prefix}/{experiment_id}/{segment_id}" + + for name in ("segment_metadata.json", "_DONE"): + path = segment_dir / name + if not path.exists(): + continue + + with path.open("rb") as fh: + self._s3.put_object( + bucket=self._bucket, + key=f"{prefix}/{name}", + body=fh, + ) + + +def main() -> None: + parser = argparse.ArgumentParser("finalize segment") + + parser.add_argument("--experiment-id", type=str, required=True) + parser.add_argument("--segment-id", type=str, required=True) + + parser.add_argument("--expected-sweeps", type=int, required=True) + parser.add_argument("--completed-sweeps", type=int, required=True) + parser.add_argument("--failed-sweeps", type=int, required=True) + + parser.add_argument( + "--segment-started-at", + type=str, + required=True, + help="ISO-8601 timestamp (UTC)", + ) + + parser.add_argument("--scratch-root", type=Path, required=True) + + args = parser.parse_args() + + ctx = SegmentContext( + experiment_id=args.experiment_id, + segment_id=args.segment_id, + expected_sweeps=args.expected_sweeps, + completed_sweeps=args.completed_sweeps, + failed_sweeps=args.failed_sweeps, + segment_started_at=datetime.fromisoformat(args.segment_started_at), + scratch_root=args.scratch_root, + ) + + finalizer = SegmentFinalizer() + finalizer.finalize( + ctx=ctx, + ) + + persister = SegmentMetadataPersister(bucket="data") + persister.persist( + experiment_id=ctx.experiment_id, + segment_id=ctx.segment_id, + segment_dir=ctx.scratch_segment_dir, + ) + + +if __name__ == "__main__": + main() diff --git a/trading_runtime/core/__init__.py b/trading_runtime/core/__init__.py new file mode 100644 index 0000000..27d3575 --- /dev/null +++ b/trading_runtime/core/__init__.py @@ -0,0 +1,2 @@ +"""Runtime-owned modules that are not part of the semantic core.""" + diff --git a/trading_runtime/core/events/__init__.py b/trading_runtime/core/events/__init__.py new file mode 100644 index 0000000..f8d3dcb --- /dev/null +++ b/trading_runtime/core/events/__init__.py @@ -0,0 +1,2 @@ +"""Runtime event plumbing (sinks, emitters, wiring).""" + diff --git a/trading_runtime/core/events/sinks/__init__.py b/trading_runtime/core/events/sinks/__init__.py new file mode 100644 index 0000000..8749baf --- /dev/null +++ b/trading_runtime/core/events/sinks/__init__.py @@ -0,0 +1,2 @@ +"""Concrete runtime event sinks (I/O).""" + diff --git a/trading_runtime/core/events/sinks/file_recorder.py b/trading_runtime/core/events/sinks/file_recorder.py new file mode 100644 index 0000000..bfe46bf --- /dev/null +++ b/trading_runtime/core/events/sinks/file_recorder.py @@ -0,0 +1,30 @@ +""" +Append-only file recorder sink. +""" +from __future__ import annotations + +import json +from pathlib import Path +from typing import Any + + +class FileRecorderSink: + """Writes each event as a JSON line to a file.""" + + def __init__(self, path: str | Path) -> None: + self._path = Path(path) + self._path.parent.mkdir(parents=True, exist_ok=True) + self._fh = self._path.open("a", encoding="utf-8") + self._closed = False + + def on_event(self, event: Any) -> None: + record = event.__dict__ if hasattr(event, "__dict__") else {"event": str(event)} + self._fh.write(json.dumps(record) + "\n") + self._fh.flush() + + def close(self) -> None: + if self._closed: + return + self._fh.flush() + self._fh.close() + self._closed = True From ca05be101cd0d48819b11a723e7df94881264956 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Wed, 29 Apr 2026 15:21:32 +0200 Subject: [PATCH 04/36] m1-slice2b-clean-core-runtime --- pyproject.toml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index a538ec5..49b83d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,6 +18,13 @@ classifiers = [ "Operating System :: OS Independent" ] +dependencies = [ + "hftbacktest>=2,<3", + "mlflow>=3,<4", + "oci>=2,<3", + "prometheus-client>=0.24,<1", +] + [project.optional-dependencies] dev = [ "pytest>=9,<10", From 1c7a5c09172c45cd1347fce7eefdfd2d916b08b0 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Wed, 29 Apr 2026 14:28:35 +0000 Subject: [PATCH 05/36] m1 slice2: done --- requirements-dev.txt | 18 +++++++++++++----- requirements.txt | 18 +++++++++++++----- scripts/compile-requirements.sh | 4 ++-- 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 9c929cb..f88d77f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -99,7 +99,9 @@ gunicorn==23.0.0 h11==0.16.0 # via uvicorn hftbacktest==2.4.4 - # via trading-framework + # via + # trading-framework + # trading-runtime (pyproject.toml) holoviews==1.22.1 # via hftbacktest huey==2.6.0 @@ -159,7 +161,9 @@ mdit-py-plugins==0.5.0 mdurl==0.1.2 # via markdown-it-py mlflow==3.9.0 - # via trading-framework + # via + # trading-framework + # trading-runtime (pyproject.toml) mlflow-skinny==3.9.0 # via mlflow mlflow-tracing==3.9.0 @@ -191,7 +195,9 @@ numpy==2.2.6 # skops # trading-framework oci==2.167.1 - # via trading-framework + # via + # trading-framework + # trading-runtime (pyproject.toml) opentelemetry-api==1.39.1 # via # mlflow-skinny @@ -247,7 +253,9 @@ polars-runtime-32==1.38.1 prettytable==3.17.0 # via skops prometheus-client==0.24.1 - # via trading-framework + # via + # trading-framework + # trading-runtime (pyproject.toml) protobuf==6.33.5 # via # databricks-sdk @@ -352,7 +360,7 @@ tqdm==4.67.3 # via # panel # trading-framework -trading-framework @ git+https://github.com/trading-engineering/trading-framework.git@934d332c21bef56fa76c19f477143d8d438238c2 +trading-framework @ git+https://github.com/TradingChassis/core.git@95fff6a5e8bb52856efe37632187ee450fa52a3e # via -r _git_deps.in typing-extensions==4.15.0 # via diff --git a/requirements.txt b/requirements.txt index 4259c45..7f2013f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -96,7 +96,9 @@ gunicorn==23.0.0 h11==0.16.0 # via uvicorn hftbacktest==2.4.4 - # via trading-framework + # via + # trading-framework + # trading-runtime (pyproject.toml) holoviews==1.22.1 # via hftbacktest huey==2.6.0 @@ -150,7 +152,9 @@ mdit-py-plugins==0.5.0 mdurl==0.1.2 # via markdown-it-py mlflow==3.9.0 - # via trading-framework + # via + # trading-framework + # trading-runtime (pyproject.toml) mlflow-skinny==3.9.0 # via mlflow mlflow-tracing==3.9.0 @@ -178,7 +182,9 @@ numpy==2.2.6 # skops # trading-framework oci==2.167.1 - # via trading-framework + # via + # trading-framework + # trading-runtime (pyproject.toml) opentelemetry-api==1.39.1 # via # mlflow-skinny @@ -229,7 +235,9 @@ polars-runtime-32==1.38.1 prettytable==3.17.0 # via skops prometheus-client==0.24.1 - # via trading-framework + # via + # trading-framework + # trading-runtime (pyproject.toml) protobuf==6.33.5 # via # databricks-sdk @@ -328,7 +336,7 @@ tqdm==4.67.3 # via # panel # trading-framework -trading-framework @ git+https://github.com/trading-engineering/trading-framework.git@934d332c21bef56fa76c19f477143d8d438238c2 +trading-framework @ git+https://github.com/TradingChassis/core.git@95fff6a5e8bb52856efe37632187ee450fa52a3e # via -r _git_deps.in typing-extensions==4.15.0 # via diff --git a/scripts/compile-requirements.sh b/scripts/compile-requirements.sh index 1bfdcf4..495a7a8 100755 --- a/scripts/compile-requirements.sh +++ b/scripts/compile-requirements.sh @@ -9,7 +9,7 @@ set +a : "${TRADING_FRAMEWORK_COMMIT:?Missing TRADING_FRAMEWORK_COMMIT in .env}" echo "🔧 Compiling requirements with pip-tools..." -echo "📌 Pinning trading-framework at commit: $TRADING_FRAMEWORK_COMMIT" +echo "📌 Pinning core at commit: $TRADING_FRAMEWORK_COMMIT" python -m pip install --upgrade \ "pip>=23.3,<25" \ @@ -19,7 +19,7 @@ python -m pip install --upgrade \ # Temporary requirements input for git dependency cat > _git_deps.in < Date: Wed, 29 Apr 2026 15:22:15 +0000 Subject: [PATCH 06/36] m1 slice3: move examples --- examples/__init__.py | 0 examples/argo/argo.json | 106 +++++++++++++++ examples/local/__init__.py | 0 examples/local/backtest.py | 84 ++++++++++++ examples/local/local.json | 88 ++++++++++++ examples/local/oci.config.example | 6 + examples/strategies/__init__.py | 0 examples/strategies/debug_strategy.py | 184 ++++++++++++++++++++++++++ 8 files changed, 468 insertions(+) create mode 100644 examples/__init__.py create mode 100644 examples/argo/argo.json create mode 100644 examples/local/__init__.py create mode 100644 examples/local/backtest.py create mode 100644 examples/local/local.json create mode 100644 examples/local/oci.config.example create mode 100644 examples/strategies/__init__.py create mode 100644 examples/strategies/debug_strategy.py diff --git a/examples/__init__.py b/examples/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/argo/argo.json b/examples/argo/argo.json new file mode 100644 index 0000000..0e30d6e --- /dev/null +++ b/examples/argo/argo.json @@ -0,0 +1,106 @@ +{ + "id": "debug_strategy_v0", + "description": "Debug Strategy V0", + + "engine": { + "initial_snapshot": null, + "data_files": null, + + "instrument": "BTC_USDC-PERPETUAL", + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1, + + "maker_fee_rate": 0.0, + "taker_fee_rate": 0.0, + + "entry_latency_ns": 10000000, + "response_latency_ns": 10000000, + + "use_risk_adverse_queue_model": true, + "partial_fill_venue": true, + + "max_steps": 5000000, + + "last_trades_capacity": 10, + "max_price_tick_levels": 20, + + "roi_lb": 40000, + "roi_ub": 80000, + + "stats_npz_path": null, + "event_bus_path": null + }, + + "risk": { + "scope": "debug_strategy_v0", + + "position_limits": { + "currency": "USDC", + "max_position": 10 + }, + + "notional_limits": { + "currency": "USDC", + "max_gross_notional": 200000.0, + "max_single_order_notional": 10000.0 + }, + + "quote_limits": { + "currency": "USDC", + "max_gross_quote_notional": 20000.0, + "max_net_quote_notional": 10000.0, + "max_active_quotes": 20000 + }, + + "order_rate_limits": { + "max_orders_per_second": 20, + "max_cancels_per_second": 20 + }, + + "max_loss": { + "currency": "USDC", + "max_drawdown": -2000.0, + "rolling_loss": -200.0, + "rolling_loss_window": 60 + }, + + "extra": { + "venue_policy": { + "min_order_notional": 5.0, + "post_only_mode": "reject" + } + } + }, + + "strategy": { + "class_path": "examples.strategies.debug_strategy:DebugStrategyV0", + "spread": 5.0, + "order_qty": 0.1, + "use_price_tick_levels": 3, + "post_only": true + }, + + "experiment": { + "start_ts_ns": 1636035200000000000, + "end_ts_ns": 1836121600000000000, + "symbol": "BTC_USDC-PERPETUAL", + + "venue": "deribit", + "datatype": "mixed", + + "segmentation": { + "max_segment_gb": 0.00001 + }, + + "sweeps": { + "strategy.spread": { + "start": 2.0, + "stop": 3.0, + "step": 1.0 + }, + "strategy.order_qty": [0.1, 0.2] + } + } + +} diff --git a/examples/local/__init__.py b/examples/local/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/local/backtest.py b/examples/local/backtest.py new file mode 100644 index 0000000..cd723fe --- /dev/null +++ b/examples/local/backtest.py @@ -0,0 +1,84 @@ +"""Command-line interface for running backtests in devcontainer.""" + +from __future__ import annotations + +import argparse +import json +import sys +from pathlib import Path +from typing import TYPE_CHECKING + +# Enable importing plugin-style modules outside the core package (e.g. examples/) +if __name__ == "__main__" or True: + PROJECT_ROOT = Path(__file__).resolve().parents[2] + sys.path.insert(0, str(PROJECT_ROOT)) + +if TYPE_CHECKING: + from trading_runtime.backtest.engine.engine_base import BacktestResult + +from trading_framework.core.risk.risk_config import RiskConfig +from trading_framework.strategies.strategy_config import StrategyConfig +from trading_runtime.backtest.engine.hft_engine import ( + HftBacktestConfig, + HftBacktestEngine, + HftEngineConfig, +) + + +def load_config(path: str) -> HftBacktestConfig: + """Load a backtest configuration from a JSON file.""" + config_path = Path(path) + raw_json = json.loads(config_path.read_text(encoding="utf-8")) + + try: + engine_raw = raw_json["engine"] + strategy_raw = raw_json["strategy"] + risk_raw = raw_json["risk"] + except KeyError as exc: + raise ValueError( + f"Missing top-level section in {config_path}: {exc}" + ) from exc + + engine_cfg = HftEngineConfig(**engine_raw) + strategy_cfg = StrategyConfig(**strategy_raw) + risk_cfg = RiskConfig(**risk_raw) + + return HftBacktestConfig( + id=raw_json["id"], + description=raw_json.get("description", ""), + engine_cfg=engine_cfg, + strategy_cfg=strategy_cfg, + risk_cfg=risk_cfg, + ) + + +def main() -> None: + """Entry point for the backtest command-line interface.""" + parser = argparse.ArgumentParser( + description="Run a strategy-based hftbacktest backtest." + ) + parser.add_argument( + "--config", + type=str, + required=True, + help="Path to JSON config file (HftBacktestConfig).", + ) + args = parser.parse_args() + + cfg = load_config(args.config) + engine = HftBacktestEngine(cfg) + + print("Backtest started.") + result: BacktestResult = engine.run() + + print("Backtest finished.") + print(f" id: {result.id}") + print(f" stats_npz: {result.stats_file}") + if result.extra_metadata is not None: + print(" metadata:") + for key, value in result.extra_metadata.items(): + print(f" {key}: {value}") + + +if __name__ == "__main__": + main() diff --git a/examples/local/local.json b/examples/local/local.json new file mode 100644 index 0000000..7cdb2de --- /dev/null +++ b/examples/local/local.json @@ -0,0 +1,88 @@ +{ + "id": "debug_strategy_v0", + "description": "Debug Strategy V0", + + "engine": { + "initial_snapshot": null, + "data_files": [ + "/workspaces/core-runtime/tests/data/parts/part-000.npz", + "/workspaces/core-runtime/tests/data/parts/part-001.npz", + "/workspaces/core-runtime/tests/data/parts/part-002.npz" + ], + + "instrument": "BTC_USDC-PERPETUAL", + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1, + + "maker_fee_rate": 0.0, + "taker_fee_rate": 0.0, + + "entry_latency_ns": 10000000, + "response_latency_ns": 10000000, + + "use_risk_adverse_queue_model": true, + "partial_fill_venue": true, + + "max_steps": 5000000, + + "last_trades_capacity": 10, + "max_price_tick_levels": 20, + + "roi_lb": 40000, + "roi_ub": 80000, + + "stats_npz_path": "/workspaces/core-runtime/tests/data/results/stats.npz", + "event_bus_path": "/workspaces/core-runtime/tests/data/results/events.json" + }, + + "risk": { + "scope": "debug_strategy_v0", + + "position_limits": { + "currency": "USDC", + "max_position": 10 + }, + + "notional_limits": { + "currency": "USDC", + "max_gross_notional": 200000.0, + "max_single_order_notional": 10000.0 + }, + + "quote_limits": { + "currency": "USDC", + "max_gross_quote_notional": 20000.0, + "max_net_quote_notional": 10000.0, + "max_active_quotes": 20000 + }, + + "order_rate_limits": { + "max_orders_per_second": 20, + "max_cancels_per_second": 20 + }, + + "max_loss": { + "currency": "USDC", + "max_drawdown": -2000.0, + "rolling_loss": -200.0, + "rolling_loss_window": 60 + }, + + "extra": { + "venue_policy": { + "min_order_notional": 5.0, + "post_only_mode": "reject" + } + } + }, + + "strategy": { + "class_path": "examples.strategies.debug_strategy:DebugStrategyV0", + "spread": 5.0, + "order_qty": 0.1, + "use_price_tick_levels": 3, + "post_only": true + } + +} diff --git a/examples/local/oci.config.example b/examples/local/oci.config.example new file mode 100644 index 0000000..031317e --- /dev/null +++ b/examples/local/oci.config.example @@ -0,0 +1,6 @@ +[DEFAULT] +user=ocid1.user.oc1..REPLACE_ME +tenancy=ocid1.tenancy.oc1..REPLACE_ME +region=eu-frankfurt-1 +fingerprint=aa:bb:cc:dd:REPLACE_ME +key_file=/absolute/path/to/.oci/oci_api_key.pem diff --git a/examples/strategies/__init__.py b/examples/strategies/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/examples/strategies/debug_strategy.py b/examples/strategies/debug_strategy.py new file mode 100644 index 0000000..4789159 --- /dev/null +++ b/examples/strategies/debug_strategy.py @@ -0,0 +1,184 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from trading_framework import ( + EngineContext, + GateDecision, + MarketEvent, + RiskConstraints, + StrategyState, + ) + +from trading_framework import ( + NewOrderIntent, + OrderIntent, + Price, + Quantity, + ReplaceOrderIntent, + SlotKey, + Strategy, + stable_slot_order_id, +) + +_SLOT_NAMESPACE = "debug_strategy_v0" + + +class DebugStrategyV0(Strategy): + """Very simple market making example strategy.""" + + def __init__( + self, + spread: float, + order_qty: float, + use_price_tick_levels: int, + post_only: bool, + ) -> None: + self.spread = spread + self.order_qty = order_qty + self.use_price_tick_levels = use_price_tick_levels + self.post_only = post_only + + self.intents_on_event: list[OrderIntent] = [] + self.intents_after_risk: list[OrderIntent] = [] + + def round_to_tick(self, price: float, tick: float) -> float: + if tick <= 0: + raise ValueError("tick must be positive") + return round(price / tick) * tick + + def on_feed( + self, + state: StrategyState, + event: MarketEvent, + engine_cfg: EngineContext, + constraints: RiskConstraints, + ) -> list[OrderIntent]: + """Feed-triggered logic (rc=2). Inputs are read-only for Strategy, otherwise considered a bug.""" + + self.intents_on_event = [] + + # NOTE: keep existing logic as-is for now; we will align field names/types later. + # This block is only to satisfy the new interface. + if not constraints.trading_enabled: + return self.intents_on_event + + if not event.is_book() or event.book is None: + return self.intents_on_event + + if not event.book.bids or not event.book.asks: + return self.intents_on_event + + best_bid = float(event.book.bids[0].price.value) + best_ask = float(event.book.asks[0].price.value) + mid = 0.5 * (best_bid + best_ask) + + tick = float(engine_cfg.tick_size) + tif = "POST_ONLY" if self.post_only else "GTC" + + num_levels = int(self.use_price_tick_levels) + if num_levels <= 0: + num_levels = 1 + + instrument = str(event.instrument) + + def is_slot_busy(client_order_id: str) -> bool: + return state.is_order_id_busy(instrument, client_order_id) + + def bid_price_for_level(level_index: int) -> float: + if level_index < len(event.book.bids): + px = float(event.book.bids[level_index].price.value) + else: + px = mid - (self.spread * 0.5) - (float(level_index) * tick) + return self.round_to_tick(px, tick) + + def ask_price_for_level(level_index: int) -> float: + if level_index < len(event.book.asks): + px = float(event.book.asks[level_index].price.value) + else: + px = mid + (self.spread * 0.5) + (float(level_index) * tick) + return self.round_to_tick(px, tick) + + intents: list[OrderIntent] = [] + + for level in range(num_levels): + bid_slot = SlotKey(instrument=instrument, side="buy", level_index=level) + ask_slot = SlotKey(instrument=instrument, side="sell", level_index=level) + + bid_id = stable_slot_order_id(bid_slot, namespace=_SLOT_NAMESPACE) + ask_id = stable_slot_order_id(ask_slot, namespace=_SLOT_NAMESPACE) + + bid_px = bid_price_for_level(level) + ask_px = ask_price_for_level(level) + + if is_slot_busy(bid_id): + intents.append( + ReplaceOrderIntent( + ts_ns_local=event.ts_ns_local, + instrument=instrument, + client_order_id=bid_id, + intent_type="replace", + order_type="limit", + side="buy", + intended_price=Price(currency="UNKNOWN", value=bid_px), + intended_qty=Quantity(value=self.order_qty, unit="contracts"), + ) + ) + else: + intents.append( + NewOrderIntent( + ts_ns_local=event.ts_ns_local, + instrument=instrument, + client_order_id=bid_id, + intent_type="new", + order_type="limit", + side="buy", + intended_price=Price(currency="UNKNOWN", value=bid_px), + intended_qty=Quantity(value=self.order_qty, unit="contracts"), + time_in_force=tif, + ) + ) + + if is_slot_busy(ask_id): + intents.append( + ReplaceOrderIntent( + ts_ns_local=event.ts_ns_local, + instrument=instrument, + client_order_id=ask_id, + intent_type="replace", + order_type="limit", + side="sell", + intended_price=Price(currency="UNKNOWN", value=ask_px), + intended_qty=Quantity(value=self.order_qty, unit="contracts"), + ) + ) + else: + intents.append( + NewOrderIntent( + ts_ns_local=event.ts_ns_local, + instrument=instrument, + client_order_id=ask_id, + intent_type="new", + order_type="limit", + side="sell", + intended_price=Price(currency="UNKNOWN", value=ask_px), + intended_qty=Quantity(value=self.order_qty, unit="contracts"), + time_in_force=tif, + ) + ) + + self.intents_on_event.extend(intents) + return self.intents_on_event + + def on_order_update( + self, + state: StrategyState, + engine_cfg: EngineContext, + constraints: RiskConstraints, + ) -> list[OrderIntent]: + """Order-update-triggered logic (rc=3). Inputs are read-only for Strategy, otherwise considered a bug.""" + return [] + + def on_risk_decision(self, decision: GateDecision) -> None: + self.intents_after_risk = decision.accepted_now From c4680b60d6ff33326dc48952dd5b8f704700663a Mon Sep 17 00:00:00 2001 From: bxvtr Date: Wed, 29 Apr 2026 19:58:37 +0200 Subject: [PATCH 07/36] m1 slice3: done --- trading_runtime/local/local.json | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/trading_runtime/local/local.json b/trading_runtime/local/local.json index b596453..eb2d32d 100644 --- a/trading_runtime/local/local.json +++ b/trading_runtime/local/local.json @@ -5,9 +5,9 @@ "engine": { "initial_snapshot": null, "data_files": [ - "/workspaces/trading-runtime/tests/data/parts/part-000.npz", - "/workspaces/trading-runtime/tests/data/parts/part-001.npz", - "/workspaces/trading-runtime/tests/data/parts/part-002.npz" + "/workspaces/core-runtime/tests/data/parts/part-000.npz", + "/workspaces/core-runtime/tests/data/parts/part-001.npz", + "/workspaces/core-runtime/tests/data/parts/part-002.npz" ], "instrument": "BTC_USDC-PERPETUAL", @@ -32,8 +32,8 @@ "roi_lb": 40000, "roi_ub": 80000, - "stats_npz_path": "/workspaces/trading-runtime/tests/data/results/stats.npz", - "event_bus_path": "/workspaces/trading-runtime/tests/data/results/events.json" + "stats_npz_path": "/workspaces/core-runtime/tests/data/results/stats.npz", + "event_bus_path": "/workspaces/core-runtime/tests/data/results/events.json" }, "risk": { From f52d79f9973fb1e2f6f97a68f35ea13df8e16610 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sat, 2 May 2026 16:10:27 +0000 Subject: [PATCH 08/36] m2 p3 sliceB4: add strict runtime mapping from run config to CoreConfiguration --- .env.example | 2 +- examples/argo/argo.json | 13 ++ examples/local/backtest.py | 5 + examples/local/local.json | 13 ++ requirements-dev.txt | 46 +--- requirements.txt | 46 +--- tests/conftest.py | 18 ++ .../runtime/test_core_configuration_mapper.py | 170 ++++++++++++++ ..._runtime_core_configuration_integration.py | 213 ++++++++++++++++++ trading_runtime/argo/argo.json | 13 ++ .../backtest/adapters/execution.py | 1 - trading_runtime/backtest/engine/hft_engine.py | 8 +- .../backtest/engine/strategy_runner.py | 6 +- .../runtime/core_configuration_mapper.py | 167 ++++++++++++++ .../backtest/runtime/entrypoint.py | 7 +- trading_runtime/backtest/runtime/run_sweep.py | 19 +- trading_runtime/local/backtest.py | 6 + trading_runtime/local/local.json | 13 ++ 18 files changed, 679 insertions(+), 87 deletions(-) create mode 100644 tests/conftest.py create mode 100644 tests/runtime/test_core_configuration_mapper.py create mode 100644 tests/runtime/test_runtime_core_configuration_integration.py create mode 100644 trading_runtime/backtest/runtime/core_configuration_mapper.py diff --git a/.env.example b/.env.example index 6fc9aa8..e06d920 100644 --- a/.env.example +++ b/.env.example @@ -1 +1 @@ -TRADING_FRAMEWORK_COMMIT=934d332c21bef56fa76c19f477143d8d438238c2 +TRADING_FRAMEWORK_COMMIT=934d332c21bef5... diff --git a/examples/argo/argo.json b/examples/argo/argo.json index 0e30d6e..1f26301 100644 --- a/examples/argo/argo.json +++ b/examples/argo/argo.json @@ -81,6 +81,19 @@ "post_only": true }, + "core": { + "version": "v1", + "market": { + "instruments": { + "BTC_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1 + } + } + } + }, + "experiment": { "start_ts_ns": 1636035200000000000, "end_ts_ns": 1836121600000000000, diff --git a/examples/local/backtest.py b/examples/local/backtest.py index cd723fe..1d2d115 100644 --- a/examples/local/backtest.py +++ b/examples/local/backtest.py @@ -23,6 +23,9 @@ HftBacktestEngine, HftEngineConfig, ) +from trading_runtime.backtest.runtime.core_configuration_mapper import ( + build_core_configuration_from_run_config, +) def load_config(path: str) -> HftBacktestConfig: @@ -42,6 +45,7 @@ def load_config(path: str) -> HftBacktestConfig: engine_cfg = HftEngineConfig(**engine_raw) strategy_cfg = StrategyConfig(**strategy_raw) risk_cfg = RiskConfig(**risk_raw) + core_cfg = build_core_configuration_from_run_config(raw_json) return HftBacktestConfig( id=raw_json["id"], @@ -49,6 +53,7 @@ def load_config(path: str) -> HftBacktestConfig: engine_cfg=engine_cfg, strategy_cfg=strategy_cfg, risk_cfg=risk_cfg, + core_cfg=core_cfg, ) diff --git a/examples/local/local.json b/examples/local/local.json index 7cdb2de..c34145f 100644 --- a/examples/local/local.json +++ b/examples/local/local.json @@ -83,6 +83,19 @@ "order_qty": 0.1, "use_price_tick_levels": 3, "post_only": true + }, + + "core": { + "version": "v1", + "market": { + "instruments": { + "BTC_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1 + } + } + } } } diff --git a/requirements-dev.txt b/requirements-dev.txt index f88d77f..cd7028e 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -12,10 +12,6 @@ annotated-types==0.7.0 # via pydantic anyio==4.12.1 # via starlette -attrs==25.4.0 - # via - # jsonschema - # referencing bleach==6.3.0 # via panel blinker==1.9.0 @@ -99,9 +95,7 @@ gunicorn==23.0.0 h11==0.16.0 # via uvicorn hftbacktest==2.4.4 - # via - # trading-framework - # trading-runtime (pyproject.toml) + # via trading-runtime (pyproject.toml) holoviews==1.22.1 # via hftbacktest huey==2.6.0 @@ -126,10 +120,6 @@ jinja2==3.1.6 # flask joblib==1.5.3 # via scikit-learn -jsonschema==4.26.0 - # via trading-framework -jsonschema-specifications==2025.9.1 - # via jsonschema kiwisolver==1.4.9 # via matplotlib librt==0.8.0 @@ -161,9 +151,7 @@ mdit-py-plugins==0.5.0 mdurl==0.1.2 # via markdown-it-py mlflow==3.9.0 - # via - # trading-framework - # trading-runtime (pyproject.toml) + # via trading-runtime (pyproject.toml) mlflow-skinny==3.9.0 # via mlflow mlflow-tracing==3.9.0 @@ -193,11 +181,8 @@ numpy==2.2.6 # scikit-learn # scipy # skops - # trading-framework oci==2.167.1 - # via - # trading-framework - # trading-runtime (pyproject.toml) + # via trading-runtime (pyproject.toml) opentelemetry-api==1.39.1 # via # mlflow-skinny @@ -253,9 +238,7 @@ polars-runtime-32==1.38.1 prettytable==3.17.0 # via skops prometheus-client==0.24.1 - # via - # trading-framework - # trading-runtime (pyproject.toml) + # via trading-runtime (pyproject.toml) protobuf==6.33.5 # via # databricks-sdk @@ -263,9 +246,7 @@ protobuf==6.33.5 # mlflow-tracing # opentelemetry-proto pyarrow==16.1.0 - # via - # mlflow - # trading-framework + # via mlflow pyasn1==0.6.2 # via # pyasn1-modules @@ -311,20 +292,12 @@ pyyaml==6.0.3 # via # bokeh # mlflow-skinny -referencing==0.37.0 - # via - # jsonschema - # jsonschema-specifications requests==2.32.5 # via # databricks-sdk # docker # mlflow-skinny # panel -rpds-py==0.30.0 - # via - # jsonschema - # referencing rsa==4.9.1 # via google-auth ruff==0.15.1 @@ -357,10 +330,8 @@ threadpoolctl==3.6.0 tornado==6.5.4 # via bokeh tqdm==4.67.3 - # via - # panel - # trading-framework -trading-framework @ git+https://github.com/TradingChassis/core.git@95fff6a5e8bb52856efe37632187ee450fa52a3e + # via panel +trading-framework @ git+https://github.com/TradingChassis/core.git@2a73a08cf352bdb7eb357eaca25af0d9858883de # via -r _git_deps.in typing-extensions==4.15.0 # via @@ -379,7 +350,6 @@ typing-extensions==4.15.0 # pydantic # pydantic-core # pyopenssl - # referencing # sqlalchemy # starlette # typing-inspection @@ -391,8 +361,6 @@ tzdata==2025.3 # via pandas uc-micro-py==1.0.3 # via linkify-it-py -ujson==5.11.0 - # via trading-framework urllib3==2.6.3 # via # docker diff --git a/requirements.txt b/requirements.txt index 7f2013f..1117ca1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,10 +12,6 @@ annotated-types==0.7.0 # via pydantic anyio==4.12.1 # via starlette -attrs==25.4.0 - # via - # jsonschema - # referencing bleach==6.3.0 # via panel blinker==1.9.0 @@ -96,9 +92,7 @@ gunicorn==23.0.0 h11==0.16.0 # via uvicorn hftbacktest==2.4.4 - # via - # trading-framework - # trading-runtime (pyproject.toml) + # via trading-runtime (pyproject.toml) holoviews==1.22.1 # via hftbacktest huey==2.6.0 @@ -119,10 +113,6 @@ jinja2==3.1.6 # flask joblib==1.5.3 # via scikit-learn -jsonschema==4.26.0 - # via trading-framework -jsonschema-specifications==2025.9.1 - # via jsonschema kiwisolver==1.4.9 # via matplotlib linkify-it-py==2.0.3 @@ -152,9 +142,7 @@ mdit-py-plugins==0.5.0 mdurl==0.1.2 # via markdown-it-py mlflow==3.9.0 - # via - # trading-framework - # trading-runtime (pyproject.toml) + # via trading-runtime (pyproject.toml) mlflow-skinny==3.9.0 # via mlflow mlflow-tracing==3.9.0 @@ -180,11 +168,8 @@ numpy==2.2.6 # scikit-learn # scipy # skops - # trading-framework oci==2.167.1 - # via - # trading-framework - # trading-runtime (pyproject.toml) + # via trading-runtime (pyproject.toml) opentelemetry-api==1.39.1 # via # mlflow-skinny @@ -235,9 +220,7 @@ polars-runtime-32==1.38.1 prettytable==3.17.0 # via skops prometheus-client==0.24.1 - # via - # trading-framework - # trading-runtime (pyproject.toml) + # via trading-runtime (pyproject.toml) protobuf==6.33.5 # via # databricks-sdk @@ -245,9 +228,7 @@ protobuf==6.33.5 # mlflow-tracing # opentelemetry-proto pyarrow==16.1.0 - # via - # mlflow - # trading-framework + # via mlflow pyasn1==0.6.2 # via # pyasn1-modules @@ -289,20 +270,12 @@ pyyaml==6.0.3 # via # bokeh # mlflow-skinny -referencing==0.37.0 - # via - # jsonschema - # jsonschema-specifications requests==2.32.5 # via # databricks-sdk # docker # mlflow-skinny # panel -rpds-py==0.30.0 - # via - # jsonschema - # referencing rsa==4.9.1 # via google-auth scikit-learn==1.8.0 @@ -333,10 +306,8 @@ threadpoolctl==3.6.0 tornado==6.5.4 # via bokeh tqdm==4.67.3 - # via - # panel - # trading-framework -trading-framework @ git+https://github.com/TradingChassis/core.git@95fff6a5e8bb52856efe37632187ee450fa52a3e + # via panel +trading-framework @ git+https://github.com/TradingChassis/core.git@2a73a08cf352bdb7eb357eaca25af0d9858883de # via -r _git_deps.in typing-extensions==4.15.0 # via @@ -352,7 +323,6 @@ typing-extensions==4.15.0 # pydantic # pydantic-core # pyopenssl - # referencing # sqlalchemy # starlette # typing-inspection @@ -364,8 +334,6 @@ tzdata==2025.3 # via pandas uc-micro-py==1.0.3 # via linkify-it-py -ujson==5.11.0 - # via trading-framework urllib3==2.6.3 # via # docker diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..3b815c2 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,18 @@ +from __future__ import annotations + +import sys +from pathlib import Path + + +def _ensure_workspace_import_paths() -> None: + workspace_root = Path(__file__).resolve().parents[2] + runtime_root = workspace_root / "core-runtime" + core_root = workspace_root / "core" + + for path in (runtime_root, core_root): + path_str = str(path) + if path_str not in sys.path: + sys.path.insert(0, path_str) + + +_ensure_workspace_import_paths() diff --git a/tests/runtime/test_core_configuration_mapper.py b/tests/runtime/test_core_configuration_mapper.py new file mode 100644 index 0000000..ac93136 --- /dev/null +++ b/tests/runtime/test_core_configuration_mapper.py @@ -0,0 +1,170 @@ +from __future__ import annotations + +import pytest +from trading_framework.core.domain.configuration import CoreConfiguration + +from trading_runtime.backtest.runtime.core_configuration_mapper import ( + build_core_configuration_from_run_config, +) + + +def _valid_run_config() -> dict[str, object]: + return { + "engine": { + "instrument": "BTC_USDC-PERPETUAL", + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1.0, + }, + "core": { + "version": "v1", + "market": { + "instruments": { + "BTC_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1.0, + } + } + }, + }, + } + + +def test_valid_explicit_core_builds_core_configuration() -> None: + cfg = build_core_configuration_from_run_config(_valid_run_config()) + + assert isinstance(cfg, CoreConfiguration) + assert cfg.version == "v1" + assert cfg.payload["market"]["instruments"]["BTC_USDC-PERPETUAL"]["tick_size"] == 0.1 + + +def test_missing_core_fails() -> None: + run_config = _valid_run_config() + run_config.pop("core") + + with pytest.raises(ValueError, match="Missing required top-level section: core"): + build_core_configuration_from_run_config(run_config) + + +def test_missing_version_fails() -> None: + run_config = _valid_run_config() + run_config["core"] = { + "market": run_config["core"]["market"], # type: ignore[index] + } + + with pytest.raises(ValueError, match="core.version"): + build_core_configuration_from_run_config(run_config) + + +def test_missing_market_instruments_fails() -> None: + run_config = _valid_run_config() + run_config["core"] = {"version": "v1", "market": {}} + + with pytest.raises(ValueError, match="core.market.instruments"): + build_core_configuration_from_run_config(run_config) + + +def test_missing_instrument_entry_fails() -> None: + run_config = _valid_run_config() + run_config["core"] = { + "version": "v1", + "market": { + "instruments": { + "ETH_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1.0, + } + } + }, + } + + with pytest.raises(ValueError, match="core.market.instruments.BTC_USDC-PERPETUAL"): + build_core_configuration_from_run_config(run_config) + + +@pytest.mark.parametrize("field_name", ["tick_size", "lot_size", "contract_size"]) +def test_missing_required_metadata_field_fails(field_name: str) -> None: + run_config = _valid_run_config() + instrument_cfg = run_config["core"]["market"]["instruments"]["BTC_USDC-PERPETUAL"] # type: ignore[index] + instrument_cfg.pop(field_name) + + with pytest.raises(ValueError, match=field_name): + build_core_configuration_from_run_config(run_config) + + +@pytest.mark.parametrize("field_name", ["tick_size", "lot_size", "contract_size"]) +def test_none_value_fails(field_name: str) -> None: + run_config = _valid_run_config() + run_config["core"]["market"]["instruments"]["BTC_USDC-PERPETUAL"][field_name] = None # type: ignore[index] + + with pytest.raises(ValueError, match=field_name): + build_core_configuration_from_run_config(run_config) + + +@pytest.mark.parametrize("field_name", ["tick_size", "lot_size", "contract_size"]) +def test_bool_value_fails(field_name: str) -> None: + run_config = _valid_run_config() + run_config["core"]["market"]["instruments"]["BTC_USDC-PERPETUAL"][field_name] = True # type: ignore[index] + + with pytest.raises(TypeError, match="must be numeric"): + build_core_configuration_from_run_config(run_config) + + +@pytest.mark.parametrize("field_name", ["tick_size", "lot_size", "contract_size"]) +def test_non_numeric_value_fails(field_name: str) -> None: + run_config = _valid_run_config() + run_config["core"]["market"]["instruments"]["BTC_USDC-PERPETUAL"][field_name] = "x" # type: ignore[index] + + with pytest.raises(TypeError, match="must be numeric"): + build_core_configuration_from_run_config(run_config) + + +@pytest.mark.parametrize("bad", [float("nan"), float("inf"), float("-inf")]) +def test_non_finite_value_fails(bad: float) -> None: + run_config = _valid_run_config() + run_config["core"]["market"]["instruments"]["BTC_USDC-PERPETUAL"]["tick_size"] = bad # type: ignore[index] + + with pytest.raises(ValueError, match="must be finite"): + build_core_configuration_from_run_config(run_config) + + +@pytest.mark.parametrize("bad", [0.0, -1.0]) +def test_non_positive_value_fails(bad: float) -> None: + run_config = _valid_run_config() + run_config["core"]["market"]["instruments"]["BTC_USDC-PERPETUAL"]["tick_size"] = bad # type: ignore[index] + + with pytest.raises(ValueError, match="must be > 0"): + build_core_configuration_from_run_config(run_config) + + +def test_no_fallback_from_engine_when_core_missing() -> None: + run_config = { + "engine": { + "instrument": "BTC_USDC-PERPETUAL", + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1.0, + } + } + + with pytest.raises(ValueError, match="Missing required top-level section: core"): + build_core_configuration_from_run_config(run_config) + + +def test_engine_duplicate_exact_match_allowed() -> None: + run_config = _valid_run_config() + + cfg = build_core_configuration_from_run_config(run_config) + + assert isinstance(cfg, CoreConfiguration) + + +@pytest.mark.parametrize("field_name", ["tick_size", "lot_size", "contract_size"]) +def test_engine_duplicate_mismatch_fails(field_name: str) -> None: + run_config = _valid_run_config() + run_config["engine"][field_name] = 999.0 # type: ignore[index] + + with pytest.raises(ValueError, match="Conflicting duplicate field values"): + build_core_configuration_from_run_config(run_config) diff --git a/tests/runtime/test_runtime_core_configuration_integration.py b/tests/runtime/test_runtime_core_configuration_integration.py new file mode 100644 index 0000000..5a9c6b1 --- /dev/null +++ b/tests/runtime/test_runtime_core_configuration_integration.py @@ -0,0 +1,213 @@ +from __future__ import annotations + +import json +import sys +import types +from pathlib import Path + +import pytest +from trading_framework.core.domain.configuration import CoreConfiguration + +from trading_runtime.local.backtest import load_config + + +def _repo_root() -> Path: + return Path(__file__).resolve().parents[2] + + +def _load_sample_config(path: Path) -> dict[str, object]: + return json.loads(path.read_text(encoding="utf-8")) + + +def _install_oci_stubs(monkeypatch: pytest.MonkeyPatch) -> None: + oci_mod = types.ModuleType("oci") + auth_mod = types.ModuleType("oci.auth") + signers_mod = types.ModuleType("oci.auth.signers") + config_mod = types.ModuleType("oci.config") + object_storage_mod = types.ModuleType("oci.object_storage") + signer_mod = types.ModuleType("oci.signer") + + class _InstancePrincipalsSecurityTokenSigner: # pragma: no cover - stub only + pass + + class _ObjectStorageClient: # pragma: no cover - stub only + pass + + class _Signer: # pragma: no cover - stub only + pass + + def _from_file(*, file_location: str, profile_name: str) -> dict[str, object]: + _ = (file_location, profile_name) + return {} + + signers_mod.InstancePrincipalsSecurityTokenSigner = _InstancePrincipalsSecurityTokenSigner + config_mod.from_file = _from_file + object_storage_mod.ObjectStorageClient = _ObjectStorageClient + signer_mod.Signer = _Signer + + monkeypatch.setitem(sys.modules, "oci", oci_mod) + monkeypatch.setitem(sys.modules, "oci.auth", auth_mod) + monkeypatch.setitem(sys.modules, "oci.auth.signers", signers_mod) + monkeypatch.setitem(sys.modules, "oci.config", config_mod) + monkeypatch.setitem(sys.modules, "oci.object_storage", object_storage_mod) + monkeypatch.setitem(sys.modules, "oci.signer", signer_mod) + + +def test_local_loader_fails_early_when_core_missing(tmp_path: Path) -> None: + sample_path = _repo_root() / "trading_runtime/local/local.json" + config = _load_sample_config(sample_path) + config.pop("core", None) + + test_path = tmp_path / "missing-core.json" + test_path.write_text(json.dumps(config), encoding="utf-8") + + with pytest.raises(ValueError, match="Missing required top-level section: core"): + load_config(str(test_path)) + + +def test_local_loader_succeeds_with_valid_core() -> None: + sample_path = _repo_root() / "trading_runtime/local/local.json" + cfg = load_config(str(sample_path)) + + assert isinstance(cfg.core_cfg, CoreConfiguration) + assert cfg.core_cfg.version == "v1" + + +def test_argo_entrypoint_rejects_invalid_run_config_before_planning( + tmp_path: Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + _install_oci_stubs(monkeypatch) + + from trading_runtime.backtest.runtime.entrypoint import main as argo_entrypoint_main + + sample_path = _repo_root() / "trading_runtime/argo/argo.json" + config = _load_sample_config(sample_path) + config.pop("core", None) + + config_path = tmp_path / "argo-missing-core.json" + config_path.write_text(json.dumps(config), encoding="utf-8") + + monkeypatch.setattr( + sys, + "argv", + [ + "entrypoint.py", + "--config", + str(config_path), + "--plan", + ], + ) + + with pytest.raises(ValueError, match="Missing required top-level section: core"): + argo_entrypoint_main() + + +def test_argo_sweep_worker_rejects_context_missing_core( + tmp_path: Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + _install_oci_stubs(monkeypatch) + + from trading_runtime.backtest.runtime.run_sweep import main as run_sweep_main + + context = { + "experiment_id": "exp-1", + "segment_id": "seg-1", + "sweep_id": "sweep-1", + "stage": "derived", + "venue": "deribit", + "datatype": "mixed", + "symbol": "BTC_USDC-PERPETUAL", + "file_keys": [], + "parameters": { + "engine": { + "instrument": "BTC_USDC-PERPETUAL", + }, + "strategy": {}, + "risk": {}, + }, + "scratch_root": str(tmp_path / "scratch"), + "results_root": str(tmp_path / "results"), + } + + context_path = tmp_path / "context.json" + context_path.write_text(json.dumps(context), encoding="utf-8") + + monkeypatch.setattr( + sys, + "argv", + [ + "run_sweep.py", + "--context", + str(context_path), + "--scratch-root", + str(tmp_path / "scratch"), + ], + ) + + with pytest.raises(ValueError, match="Missing required top-level section: core"): + run_sweep_main() + + +def test_argo_emit_includes_core_section_in_sweep_context( + tmp_path: Path, + monkeypatch: pytest.MonkeyPatch, +) -> None: + _install_oci_stubs(monkeypatch) + + from trading_runtime.backtest.orchestrator.planner_models import ( + ExperimentPlan, + SegmentPlan, + ) + from trading_runtime.backtest.orchestrator.sweeps import SweepPlan + from trading_runtime.backtest.runtime.entrypoint import _emit_sweep_context + + plan = ExperimentPlan( + experiment_id="exp-1", + segments=[ + SegmentPlan( + segment_id="seg-1", + start_ts_ns=1, + end_ts_ns=2, + estimated_bytes=123, + files=["file-1.npz"], + sweeps=[SweepPlan(sweep_id="sweep-0000", parameters={})], + ) + ], + ) + base_cfg = { + "experiment": { + "venue": "deribit", + "datatype": "mixed", + "symbol": "BTC_USDC-PERPETUAL", + }, + "engine": {"instrument": "BTC_USDC-PERPETUAL"}, + "strategy": {"class_path": "x:y"}, + "risk": {"scope": "s", "notional_limits": {"currency": "USDC", "max_gross_notional": 1.0}}, + "core": { + "version": "v1", + "market": { + "instruments": { + "BTC_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1.0, + } + } + }, + }, + } + + _emit_sweep_context( + plan=plan, + base_cfg=base_cfg, + scratch_root=tmp_path / "scratch", + results_root=tmp_path / "results", + out_dir=tmp_path / "emit", + ) + + emitted = json.loads( + (tmp_path / "emit" / "seg-1__sweep-0000.json").read_text(encoding="utf-8") + ) + assert emitted["parameters"]["core"] == base_cfg["core"] diff --git a/trading_runtime/argo/argo.json b/trading_runtime/argo/argo.json index a59f091..604c9c5 100644 --- a/trading_runtime/argo/argo.json +++ b/trading_runtime/argo/argo.json @@ -81,6 +81,19 @@ "post_only": true }, + "core": { + "version": "v1", + "market": { + "instruments": { + "BTC_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1 + } + } + } + }, + "experiment": { "start_ts_ns": 1636035200000000000, "end_ts_ns": 1836121600000000000, diff --git a/trading_runtime/backtest/adapters/execution.py b/trading_runtime/backtest/adapters/execution.py index 206f5e7..1563cdd 100644 --- a/trading_runtime/backtest/adapters/execution.py +++ b/trading_runtime/backtest/adapters/execution.py @@ -8,7 +8,6 @@ if TYPE_CHECKING: from hftbacktest import ROIVectorMarketDepthBacktest - from trading_framework.core.domain.types import OrderIntent from trading_framework.core.domain.reject_reasons import RejectReason diff --git a/trading_runtime/backtest/engine/hft_engine.py b/trading_runtime/backtest/engine/hft_engine.py index 1a4f7eb..f8a6e7a 100644 --- a/trading_runtime/backtest/engine/hft_engine.py +++ b/trading_runtime/backtest/engine/hft_engine.py @@ -13,8 +13,12 @@ ) if TYPE_CHECKING: + from trading_framework.core.domain.configuration import CoreConfiguration from trading_framework.core.risk.risk_config import RiskConfig +from trading_framework.strategies.base import Strategy +from trading_framework.strategies.strategy_config import StrategyConfig + from trading_runtime.backtest.adapters.execution import HftBacktestExecutionAdapter from trading_runtime.backtest.adapters.venue import HftBacktestVenueAdapter from trading_runtime.backtest.engine.engine_base import ( @@ -23,8 +27,6 @@ BacktestResult, ) from trading_runtime.backtest.engine.strategy_runner import HftStrategyRunner -from trading_framework.strategies.base import Strategy -from trading_framework.strategies.strategy_config import StrategyConfig # pylint: disable=too-many-instance-attributes @@ -75,6 +77,8 @@ class HftBacktestConfig(BacktestConfig): engine_cfg: HftEngineConfig strategy_cfg: StrategyConfig risk_cfg: RiskConfig + # Boundary-prepared config for canonical core processing adoption. + core_cfg: CoreConfiguration def _build_backtester(engine_cfg: HftEngineConfig) -> ROIVectorMarketDepthBacktest: diff --git a/trading_runtime/backtest/engine/strategy_runner.py b/trading_runtime/backtest/engine/strategy_runner.py index 51c0dbb..014a11b 100644 --- a/trading_runtime/backtest/engine/strategy_runner.py +++ b/trading_runtime/backtest/engine/strategy_runner.py @@ -17,16 +17,18 @@ Quantity, ) from trading_framework.core.events.event_bus import EventBus -from trading_runtime.core.events.sinks.file_recorder import FileRecorderSink from trading_framework.core.events.sinks.sink_logging import LoggingEventSink from trading_framework.core.ports.venue_adapter import VenueAdapter from trading_framework.core.risk.risk_config import RiskConfig from trading_framework.core.risk.risk_engine import RejectedIntent, RiskEngine +from trading_runtime.core.events.sinks.file_recorder import FileRecorderSink + if TYPE_CHECKING: + from trading_framework.strategies.base import Strategy + from trading_runtime.backtest.adapters.execution import HftBacktestExecutionAdapter from trading_runtime.backtest.engine.hft_engine import HftEngineConfig - from trading_framework.strategies.base import Strategy MAX_TIMEOUT_NS = 1 << 62 # Effectively "wait forever" without a heartbeat diff --git a/trading_runtime/backtest/runtime/core_configuration_mapper.py b/trading_runtime/backtest/runtime/core_configuration_mapper.py new file mode 100644 index 0000000..51ae3e1 --- /dev/null +++ b/trading_runtime/backtest/runtime/core_configuration_mapper.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +import math +from collections.abc import Collection, Mapping + +from trading_framework.core.domain.configuration import CoreConfiguration + +_REQUIRED_METADATA_FIELDS = ("tick_size", "lot_size", "contract_size") + + +def _require_mapping(value: object, *, field_path: str) -> Mapping[str, object]: + if not isinstance(value, Mapping): + raise TypeError(f"{field_path} must be a mapping") + + normalized: dict[str, object] = {} + for key, nested in value.items(): + if not isinstance(key, str): + raise TypeError(f"{field_path} keys must be strings") + normalized[key] = nested + return normalized + + +def _require_non_empty_string(value: object, *, field_path: str) -> str: + if value is None: + raise ValueError(f"Missing required field: {field_path}") + if not isinstance(value, str): + raise TypeError(f"{field_path} must be a string") + if not value: + raise ValueError(f"{field_path} must be non-empty") + return value + + +def _require_positive_number(value: object, *, field_path: str) -> float: + if value is None: + raise ValueError(f"Missing required field: {field_path}") + if isinstance(value, bool) or not isinstance(value, (int, float)): + raise TypeError(f"{field_path} must be numeric") + + numeric = float(value) + if not math.isfinite(numeric): + raise ValueError(f"{field_path} must be finite") + if numeric <= 0.0: + raise ValueError(f"{field_path} must be > 0") + return numeric + + +def _validate_instrument_metadata( + *, + instruments: Mapping[str, object], + instrument: str, +) -> dict[str, float]: + instrument_raw = instruments.get(instrument) + if instrument_raw is None: + raise ValueError( + "Missing required core instrument entry: " + f"core.market.instruments.{instrument}" + ) + + instrument_cfg = _require_mapping( + instrument_raw, + field_path=f"core.market.instruments.{instrument}", + ) + + validated: dict[str, float] = {} + for field in _REQUIRED_METADATA_FIELDS: + validated[field] = _require_positive_number( + instrument_cfg.get(field), + field_path=f"core.market.instruments.{instrument}.{field}", + ) + return validated + + +def build_core_configuration_from_sections( + *, + core_section: Mapping[str, object], + engine_section: Mapping[str, object] | None = None, + processed_instruments: Collection[str] | None = None, +) -> CoreConfiguration: + core = _require_mapping(core_section, field_path="core") + version = _require_non_empty_string(core.get("version"), field_path="core.version") + + market_raw = core.get("market") + if market_raw is None: + raise ValueError("Missing required field: core.market") + market = _require_mapping(market_raw, field_path="core.market") + + instruments_raw = market.get("instruments") + if instruments_raw is None: + raise ValueError("Missing required field: core.market.instruments") + instruments = _require_mapping( + instruments_raw, + field_path="core.market.instruments", + ) + + if not instruments: + raise ValueError("core.market.instruments must contain at least one instrument") + + to_validate = set(instruments.keys()) + if processed_instruments is not None: + to_validate.update(processed_instruments) + + validated_core_values: dict[str, dict[str, float]] = {} + for instrument in sorted(to_validate): + validated_core_values[instrument] = _validate_instrument_metadata( + instruments=instruments, + instrument=instrument, + ) + + if engine_section is not None: + engine = _require_mapping(engine_section, field_path="engine") + instrument_raw = engine.get("instrument") + if instrument_raw is not None: + instrument = _require_non_empty_string( + instrument_raw, + field_path="engine.instrument", + ) + if instrument not in instruments: + raise ValueError( + "engine.instrument must exist in core.market.instruments: " + f"{instrument}" + ) + + core_values = validated_core_values[instrument] + for field in _REQUIRED_METADATA_FIELDS: + if field not in engine: + continue + engine_value = _require_positive_number( + engine[field], + field_path=f"engine.{field}", + ) + if engine_value != core_values[field]: + raise ValueError( + f"Conflicting duplicate field values for {field}: " + f"engine.{field}={engine_value} != " + f"core.market.instruments.{instrument}.{field}={core_values[field]}" + ) + + # CoreConfiguration is constructed from the explicit core section only. + payload = {k: v for k, v in core.items() if k != "version"} + return CoreConfiguration(version=version, payload=payload) + + +def build_core_configuration_from_run_config( + run_config: Mapping[str, object], +) -> CoreConfiguration: + config = _require_mapping(run_config, field_path="run_config") + if "core" not in config: + raise ValueError("Missing required top-level section: core") + + core_section = _require_mapping(config["core"], field_path="core") + + engine_section: Mapping[str, object] | None = None + processed_instruments: list[str] = [] + if "engine" in config: + engine_section = _require_mapping(config["engine"], field_path="engine") + if "instrument" in engine_section: + instrument = _require_non_empty_string( + engine_section["instrument"], + field_path="engine.instrument", + ) + processed_instruments.append(instrument) + + return build_core_configuration_from_sections( + core_section=core_section, + engine_section=engine_section, + processed_instruments=processed_instruments or None, + ) diff --git a/trading_runtime/backtest/runtime/entrypoint.py b/trading_runtime/backtest/runtime/entrypoint.py index 22e49cf..4a7bcd0 100644 --- a/trading_runtime/backtest/runtime/entrypoint.py +++ b/trading_runtime/backtest/runtime/entrypoint.py @@ -18,6 +18,9 @@ ) from trading_runtime.backtest.orchestrator.sweeps import RangeSpec from trading_runtime.backtest.runtime.context import SweepContext +from trading_runtime.backtest.runtime.core_configuration_mapper import ( + build_core_configuration_from_run_config, +) # --------------------------------------------------------------------------- # Helpers @@ -79,10 +82,11 @@ def _emit_sweep_context( symbol=symbol, file_keys=tuple(segment.files), parameters={ - # pass through full engine/strategy/risk blocks + # pass through full engine/strategy/risk/core blocks "engine": base_cfg["engine"], "strategy": base_cfg["strategy"], "risk": base_cfg["risk"], + "core": base_cfg["core"], # plus sweep-specific parameters "sweep": sweep.parameters, }, @@ -157,6 +161,7 @@ def main() -> None: # ------------------------------------------------------------------ cfg = _load_json(args.config) + _ = build_core_configuration_from_run_config(cfg) experiment_id: str = cfg["id"] experiment_cfg = cfg["experiment"] diff --git a/trading_runtime/backtest/runtime/run_sweep.py b/trading_runtime/backtest/runtime/run_sweep.py index 37ebfc5..2c845ed 100644 --- a/trading_runtime/backtest/runtime/run_sweep.py +++ b/trading_runtime/backtest/runtime/run_sweep.py @@ -13,6 +13,9 @@ from pathlib import Path from typing import Any +from trading_framework.core.risk.risk_config import RiskConfig +from trading_framework.strategies.strategy_config import StrategyConfig + from trading_runtime.backtest.engine.hft_engine import ( HftBacktestConfig, HftBacktestEngine, @@ -20,8 +23,9 @@ ) from trading_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim from trading_runtime.backtest.runtime.context import SweepContext -from trading_framework.core.risk.risk_config import RiskConfig -from trading_framework.strategies.strategy_config import StrategyConfig +from trading_runtime.backtest.runtime.core_configuration_mapper import ( + build_core_configuration_from_run_config, +) class SweepMaterializer: @@ -80,10 +84,12 @@ def __init__( engine_cfg: HftEngineConfig, strategy_cfg: StrategyConfig, risk_cfg: RiskConfig, + core_cfg: object, ) -> None: self._engine_cfg = engine_cfg self._strategy_cfg = strategy_cfg self._risk_cfg = risk_cfg + self._core_cfg = core_cfg def run(self, ctx: SweepContext) -> dict[str, Any]: """ @@ -114,6 +120,7 @@ def run(self, ctx: SweepContext) -> dict[str, Any]: engine_cfg=engine_cfg, strategy_cfg=self._strategy_cfg, risk_cfg=self._risk_cfg, + core_cfg=self._core_cfg, ) engine = HftBacktestEngine(backtest_cfg) @@ -440,6 +447,13 @@ def main() -> None: ctx = SweepContext(**json.loads(args.context.read_text(encoding="utf-8"))) ctx = replace(ctx, scratch_root=args.scratch_root) + run_config_for_core: dict[str, object] = {} + if "engine" in ctx.parameters: + run_config_for_core["engine"] = ctx.parameters["engine"] + if "core" in ctx.parameters: + run_config_for_core["core"] = ctx.parameters["core"] + core_cfg = build_core_configuration_from_run_config(run_config_for_core) + # ------------------------------------------------------------------ # Setup # ------------------------------------------------------------------ @@ -455,6 +469,7 @@ def main() -> None: engine_cfg=engine_cfg, strategy_cfg=strategy_cfg, risk_cfg=risk_cfg, + core_cfg=core_cfg, ) persister = SweepResultPersister(bucket="data") diff --git a/trading_runtime/local/backtest.py b/trading_runtime/local/backtest.py index 1116a31..a8495c6 100644 --- a/trading_runtime/local/backtest.py +++ b/trading_runtime/local/backtest.py @@ -12,11 +12,15 @@ from trading_framework.core.risk.risk_config import RiskConfig from trading_framework.strategies.strategy_config import StrategyConfig + from trading_runtime.backtest.engine.hft_engine import ( HftBacktestConfig, HftBacktestEngine, HftEngineConfig, ) +from trading_runtime.backtest.runtime.core_configuration_mapper import ( + build_core_configuration_from_run_config, +) def load_config(path: str) -> HftBacktestConfig: @@ -36,6 +40,7 @@ def load_config(path: str) -> HftBacktestConfig: engine_cfg = HftEngineConfig(**engine_raw) strategy_cfg = StrategyConfig(**strategy_raw) risk_cfg = RiskConfig(**risk_raw) + core_cfg = build_core_configuration_from_run_config(raw_json) return HftBacktestConfig( id=raw_json["id"], @@ -43,6 +48,7 @@ def load_config(path: str) -> HftBacktestConfig: engine_cfg=engine_cfg, strategy_cfg=strategy_cfg, risk_cfg=risk_cfg, + core_cfg=core_cfg, ) diff --git a/trading_runtime/local/local.json b/trading_runtime/local/local.json index eb2d32d..153b81e 100644 --- a/trading_runtime/local/local.json +++ b/trading_runtime/local/local.json @@ -83,6 +83,19 @@ "order_qty": 0.1, "use_price_tick_levels": 3, "post_only": true + }, + + "core": { + "version": "v1", + "market": { + "instruments": { + "BTC_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1 + } + } + } } } From aace2d51856ba83fd23e04aecfcbda44cdd13220 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sat, 2 May 2026 16:34:13 +0000 Subject: [PATCH 09/36] m2 p3 sliceB5: adopt the core canonical processing boundary for runtime MarketEvent processing --- ...rategy_runner_canonical_market_adoption.py | 322 ++++++++++++++++++ trading_runtime/backtest/engine/hft_engine.py | 1 + .../backtest/engine/strategy_runner.py | 38 ++- 3 files changed, 347 insertions(+), 14 deletions(-) create mode 100644 tests/runtime/test_strategy_runner_canonical_market_adoption.py diff --git a/tests/runtime/test_strategy_runner_canonical_market_adoption.py b/tests/runtime/test_strategy_runner_canonical_market_adoption.py new file mode 100644 index 0000000..2e83542 --- /dev/null +++ b/tests/runtime/test_strategy_runner_canonical_market_adoption.py @@ -0,0 +1,322 @@ +from __future__ import annotations + +from types import SimpleNamespace +from typing import Any + +import pytest +from trading_framework.core.domain.configuration import CoreConfiguration +from trading_framework.core.domain.state import StrategyState +from trading_framework.core.domain.types import ( + BookLevel, + BookPayload, + MarketEvent, + Price, + Quantity, +) +from trading_framework.core.events.event_bus import EventBus +from trading_framework.core.risk.risk_config import RiskConfig +from trading_framework.strategies.base import Strategy + +import trading_runtime.backtest.engine.strategy_runner as strategy_runner_module +from trading_runtime.backtest.engine.hft_engine import HftEngineConfig +from trading_runtime.backtest.engine.strategy_runner import HftStrategyRunner + + +class _NoopStrategy(Strategy): + def on_feed(self, state: Any, event: Any, engine_cfg: Any, constraints: Any) -> list[Any]: + _ = (state, event, engine_cfg, constraints) + return [] + + def on_order_update(self, state: Any, engine_cfg: Any, constraints: Any) -> list[Any]: + _ = (state, engine_cfg, constraints) + return [] + + def on_risk_decision(self, decision: Any) -> None: + _ = decision + + +class _NoopExecution: + def apply_intents(self, intents: list[Any]) -> list[tuple[Any, str]]: + _ = intents + return [] + + +class _RecorderWrapper: + recorder: Any + + def __init__(self) -> None: + self.recorder = SimpleNamespace(record=lambda _hbt: None) + + +class _StubVenue: + def __init__( + self, + *, + rc_sequence: list[int], + ts_sequence: list[int], + depth: object | None = None, + state_values: object | None = None, + orders: object | None = None, + ) -> None: + self._rc = list(rc_sequence) + self._ts = list(ts_sequence) + self._depth = depth + self._state_values = state_values + self._orders = orders + self._current_ts = 0 + + def wait_next(self, *, timeout_ns: int, include_order_resp: bool) -> int: + _ = (timeout_ns, include_order_resp) + self._current_ts = self._ts.pop(0) + return self._rc.pop(0) + + def current_timestamp_ns(self) -> int: + return self._current_ts + + def read_market_snapshot(self) -> object: + return self._depth + + def read_orders_snapshot(self) -> tuple[object, object]: + return self._state_values, self._orders + + def record(self, recorder: Any) -> None: + recorder.recorder.record(self) + + +def _core_cfg() -> CoreConfiguration: + return CoreConfiguration( + version="v1", + payload={ + "market": { + "instruments": { + "BTC_USDC-PERPETUAL": { + "tick_size": 0.1, + "lot_size": 0.01, + "contract_size": 1.0, + } + } + } + }, + ) + + +def _engine_cfg() -> HftEngineConfig: + return HftEngineConfig( + initial_snapshot=None, + data_files=[], + instrument="BTC_USDC-PERPETUAL", + tick_size=0.1, + lot_size=0.01, + contract_size=1.0, + maker_fee_rate=0.0, + taker_fee_rate=0.0, + entry_latency_ns=0, + response_latency_ns=0, + use_risk_adverse_queue_model=False, + partial_fill_venue=False, + max_steps=1, + last_trades_capacity=1, + max_price_tick_levels=1, + roi_lb=0, + roi_ub=1, + stats_npz_path="/tmp/stats.npz", + event_bus_path="/tmp/events.jsonl", + ) + + +def _risk_cfg() -> RiskConfig: + return RiskConfig( + scope="test", + notional_limits={"currency": "USDC", "max_gross_notional": 1.0}, + ) + + +def _market_event(ts_ns: int) -> MarketEvent: + return MarketEvent( + ts_ns_exch=ts_ns, + ts_ns_local=ts_ns, + instrument="BTC_USDC-PERPETUAL", + event_type="book", + book=BookPayload( + book_type="snapshot", + bids=[ + BookLevel( + price=Price(currency="UNKNOWN", value=100.0), + quantity=Quantity(value=1.0, unit="contracts"), + ) + ], + asks=[ + BookLevel( + price=Price(currency="UNKNOWN", value=101.0), + quantity=Quantity(value=1.0, unit="contracts"), + ) + ], + depth=1, + ), + ) + + +def _depth_snapshot() -> object: + return SimpleNamespace( + roi_lb_tick=100, + tick_size=0.1, + best_ask_tick=101, + best_bid_tick=100, + ask_depth=[1.0, 0.0], + bid_depth=[1.0, 0.0], + best_bid=100.0, + best_ask=101.0, + best_bid_qty=1.0, + best_ask_qty=1.0, + ) + + +def test_process_market_event_routes_through_event_entry_with_core_configuration( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = object.__new__(HftStrategyRunner) + runner.strategy_state = object() + runner._core_cfg = _core_cfg() + runner._next_market_processing_position_index = 0 + + captured: list[tuple[int, object]] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = state + captured.append((entry.position.index, configuration)) + + monkeypatch.setattr( + strategy_runner_module, + "process_event_entry", + _spy_process_event_entry, + ) + + runner._process_canonical_market_event(_market_event(1)) + runner._process_canonical_market_event(_market_event(2)) + + assert [idx for idx, _ in captured] == [0, 1] + assert captured[0][1] is runner._core_cfg + assert captured[1][1] is runner._core_cfg + assert runner._next_market_processing_position_index == 2 + + +def test_market_branch_calls_canonical_boundary_not_update_market( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + monkeypatch.setattr( + runner.strategy_state, + "update_market", + lambda *args, **kwargs: (_ for _ in ()).throw(AssertionError("update_market must not be called")), + ) + monkeypatch.setattr( + runner.strategy_state, + "apply_fill_event", + lambda *args, **kwargs: (_ for _ in ()).throw(AssertionError("apply_fill_event must not be called")), + ) + + captured: list[tuple[int, object]] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = state + captured.append((entry.position.index, configuration)) + + monkeypatch.setattr( + strategy_runner_module, + "process_event_entry", + _spy_process_event_entry, + ) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1, 2, 3], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert captured == [(0, runner._core_cfg)] + + +def test_missing_core_cfg_fails_before_market_mutation() -> None: + runner = object.__new__(HftStrategyRunner) + runner.strategy_state = StrategyState(event_bus=EventBus(sinks=[])) + runner._core_cfg = None + runner._next_market_processing_position_index = 0 + + with pytest.raises(ValueError, match="CoreConfiguration is required"): + runner._process_canonical_market_event(_market_event(42)) + + assert runner.strategy_state.market == {} + assert runner.strategy_state._last_processing_position_index is None + assert runner._next_market_processing_position_index == 0 + + +def test_invalid_core_cfg_type_fails_before_market_mutation() -> None: + runner = object.__new__(HftStrategyRunner) + runner.strategy_state = StrategyState(event_bus=EventBus(sinks=[])) + runner._core_cfg = object() + runner._next_market_processing_position_index = 0 + + with pytest.raises(TypeError, match="configuration must be CoreConfiguration or None"): + runner._process_canonical_market_event(_market_event(42)) + + assert runner.strategy_state.market == {} + assert runner.strategy_state._last_processing_position_index is None + assert runner._next_market_processing_position_index == 0 + + +def test_order_snapshot_branch_keeps_compatibility_path( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + monkeypatch.setattr( + runner.strategy_state, + "apply_fill_event", + lambda *args, **kwargs: (_ for _ in ()).throw(AssertionError("apply_fill_event must not be called")), + ) + + calls = {"update_account": 0, "ingest_order_snapshots": 0} + + def _spy_update_account(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["update_account"] += 1 + + def _spy_ingest_order_snapshots(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["ingest_order_snapshots"] += 1 + + monkeypatch.setattr(runner.strategy_state, "update_account", _spy_update_account) + monkeypatch.setattr( + runner.strategy_state, + "ingest_order_snapshots", + _spy_ingest_order_snapshots, + ) + + venue = _StubVenue( + rc_sequence=[0, 3, 1], + ts_sequence=[1, 2, 3], + state_values=SimpleNamespace( + position=0.0, + balance=1000.0, + fee=0.0, + trading_volume=0.0, + trading_value=0.0, + num_trades=0, + ), + orders={}, + ) + + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert calls["update_account"] == 1 + assert calls["ingest_order_snapshots"] == 1 diff --git a/trading_runtime/backtest/engine/hft_engine.py b/trading_runtime/backtest/engine/hft_engine.py index f8a6e7a..c327d5a 100644 --- a/trading_runtime/backtest/engine/hft_engine.py +++ b/trading_runtime/backtest/engine/hft_engine.py @@ -158,6 +158,7 @@ def run(self) -> BacktestResult: engine_cfg=engine_cfg, strategy=strategy, risk_cfg=risk_cfg, + core_cfg=cfg.core_cfg, ) # 4) Backtest-only venue and execution adapters diff --git a/trading_runtime/backtest/engine/strategy_runner.py b/trading_runtime/backtest/engine/strategy_runner.py index 014a11b..db2503a 100644 --- a/trading_runtime/backtest/engine/strategy_runner.py +++ b/trading_runtime/backtest/engine/strategy_runner.py @@ -7,6 +7,12 @@ from pathlib import Path from typing import TYPE_CHECKING, Any +from trading_framework.core.domain.configuration import CoreConfiguration +from trading_framework.core.domain.processing import process_event_entry +from trading_framework.core.domain.processing_order import ( + EventStreamEntry, + ProcessingPosition, +) from trading_framework.core.domain.state import StrategyState from trading_framework.core.domain.types import ( BookLevel, @@ -49,9 +55,11 @@ def __init__( engine_cfg: HftEngineConfig, strategy: Strategy, risk_cfg: RiskConfig, + core_cfg: CoreConfiguration, ) -> None: self.engine_cfg = engine_cfg self.strategy = strategy + self._core_cfg = core_cfg event_bus = self._build_event_bus( path=Path(engine_cfg.event_bus_path), @@ -67,6 +75,7 @@ def __init__( ) self._next_send_ts_ns_local: int | None = None + self._next_market_processing_position_index: int = 0 def _build_event_bus( self, @@ -107,6 +116,20 @@ def intent_priority(intent: OrderIntent) -> int: return sorted(intents, key=lambda it: (intent_priority(it), it.ts_ns_local)) + def _process_canonical_market_event(self, market_event: MarketEvent) -> None: + entry = EventStreamEntry( + position=ProcessingPosition( + index=self._next_market_processing_position_index, + ), + event=market_event, + ) + process_event_entry( + self.strategy_state, + entry, + configuration=self._core_cfg, + ) + self._next_market_processing_position_index += 1 + def run( self, venue: VenueAdapter, @@ -117,8 +140,6 @@ def run( # pylint: disable=too-many-locals,too-many-branches,too-many-statements instrument = self.engine_cfg.instrument - contract_size = self.engine_cfg.contract_size - # Initialize hftbacktest engine # Fetch very first event block to set local timestamp venue.wait_next(timeout_ns=MAX_TIMEOUT_NS, include_order_resp=False) @@ -215,18 +236,7 @@ def run( ), ) - self.strategy_state.update_market( - instrument=instrument, - best_bid=depth.best_bid, - best_ask=depth.best_ask, - best_bid_qty=depth.best_bid_qty, - best_ask_qty=depth.best_ask_qty, - tick_size=depth.tick_size, - lot_size=depth.lot_size, - contract_size=contract_size, - ts_ns_local=sim_now_ns, - ts_ns_exch=sim_now_ns, - ) + self._process_canonical_market_event(market_event) constraints = self.risk.build_constraints(sim_now_ns) raw_intents.extend( From e11dbf3d645f0a30d0d1be60d6abfe196fbd7c4c Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sat, 2 May 2026 18:37:56 +0000 Subject: [PATCH 10/36] m2 p3 sliceD6: wire runtime successful NEW dispatch into canonical OrderSubmittedEvent processing --- requirements-dev.txt | 2 +- requirements.txt | 2 +- ...rategy_runner_canonical_market_adoption.py | 327 +++++++++++++++++- .../backtest/engine/strategy_runner.py | 55 ++- 4 files changed, 366 insertions(+), 20 deletions(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index cd7028e..c0eb36a 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -331,7 +331,7 @@ tornado==6.5.4 # via bokeh tqdm==4.67.3 # via panel -trading-framework @ git+https://github.com/TradingChassis/core.git@2a73a08cf352bdb7eb357eaca25af0d9858883de +trading-framework @ git+https://github.com/TradingChassis/core.git@71dc8effdb6f9ffb9ae289fe58dcbbaf87793e68 # via -r _git_deps.in typing-extensions==4.15.0 # via diff --git a/requirements.txt b/requirements.txt index 1117ca1..4f326c5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -307,7 +307,7 @@ tornado==6.5.4 # via bokeh tqdm==4.67.3 # via panel -trading-framework @ git+https://github.com/TradingChassis/core.git@2a73a08cf352bdb7eb357eaca25af0d9858883de +trading-framework @ git+https://github.com/TradingChassis/core.git@71dc8effdb6f9ffb9ae289fe58dcbbaf87793e68 # via -r _git_deps.in typing-extensions==4.15.0 # via diff --git a/tests/runtime/test_strategy_runner_canonical_market_adoption.py b/tests/runtime/test_strategy_runner_canonical_market_adoption.py index 2e83542..2d1af52 100644 --- a/tests/runtime/test_strategy_runner_canonical_market_adoption.py +++ b/tests/runtime/test_strategy_runner_canonical_market_adoption.py @@ -9,12 +9,17 @@ from trading_framework.core.domain.types import ( BookLevel, BookPayload, + CancelOrderIntent, MarketEvent, + NewOrderIntent, + OrderSubmittedEvent, Price, Quantity, + ReplaceOrderIntent, ) from trading_framework.core.events.event_bus import EventBus from trading_framework.core.risk.risk_config import RiskConfig +from trading_framework.core.risk.risk_engine import GateDecision from trading_framework.strategies.base import Strategy import trading_runtime.backtest.engine.strategy_runner as strategy_runner_module @@ -171,13 +176,79 @@ def _depth_snapshot() -> object: ) +def _new_intent(ts_ns_local: int = 2) -> NewOrderIntent: + return NewOrderIntent( + ts_ns_local=ts_ns_local, + instrument="BTC_USDC-PERPETUAL", + client_order_id="cid-new-1", + intents_correlation_id="corr-new-1", + side="buy", + order_type="limit", + intended_qty=Quantity(value=1.0, unit="contracts"), + intended_price=Price(currency="USDC", value=100.0), + time_in_force="GTC", + ) + + +def _replace_intent(ts_ns_local: int = 2) -> ReplaceOrderIntent: + return ReplaceOrderIntent( + ts_ns_local=ts_ns_local, + instrument="BTC_USDC-PERPETUAL", + client_order_id="cid-existing-1", + intents_correlation_id="corr-replace-1", + side="buy", + order_type="limit", + intended_qty=Quantity(value=2.0, unit="contracts"), + intended_price=Price(currency="USDC", value=101.0), + ) + + +def _cancel_intent(ts_ns_local: int = 2) -> CancelOrderIntent: + return CancelOrderIntent( + ts_ns_local=ts_ns_local, + instrument="BTC_USDC-PERPETUAL", + client_order_id="cid-existing-1", + intents_correlation_id="corr-cancel-1", + ) + + +class _EmitIntentsStrategy(Strategy): + def __init__(self, intents: list[object]) -> None: + self._intents = intents + + def on_feed(self, state: Any, event: Any, engine_cfg: Any, constraints: Any) -> list[Any]: + _ = (state, event, engine_cfg, constraints) + return list(self._intents) + + def on_order_update(self, state: Any, engine_cfg: Any, constraints: Any) -> list[Any]: + _ = (state, engine_cfg, constraints) + return [] + + def on_risk_decision(self, decision: Any) -> None: + _ = decision + + +def _decision_for(accepted_now: list[Any]) -> GateDecision: + return GateDecision( + ts_ns_local=2, + accepted_now=accepted_now, + queued=[], + rejected=[], + replaced_in_queue=[], + dropped_in_queue=[], + handled_in_queue=[], + execution_rejected=[], + next_send_ts_ns_local=None, + ) + + def test_process_market_event_routes_through_event_entry_with_core_configuration( monkeypatch: pytest.MonkeyPatch, ) -> None: runner = object.__new__(HftStrategyRunner) runner.strategy_state = object() runner._core_cfg = _core_cfg() - runner._next_market_processing_position_index = 0 + runner._next_canonical_processing_position_index = 0 captured: list[tuple[int, object]] = [] @@ -197,7 +268,7 @@ def _spy_process_event_entry(state: object, entry: object, *, configuration: obj assert [idx for idx, _ in captured] == [0, 1] assert captured[0][1] is runner._core_cfg assert captured[1][1] is runner._core_cfg - assert runner._next_market_processing_position_index == 2 + assert runner._next_canonical_processing_position_index == 2 def test_market_branch_calls_canonical_boundary_not_update_market( @@ -246,28 +317,28 @@ def test_missing_core_cfg_fails_before_market_mutation() -> None: runner = object.__new__(HftStrategyRunner) runner.strategy_state = StrategyState(event_bus=EventBus(sinks=[])) runner._core_cfg = None - runner._next_market_processing_position_index = 0 + runner._next_canonical_processing_position_index = 0 with pytest.raises(ValueError, match="CoreConfiguration is required"): runner._process_canonical_market_event(_market_event(42)) assert runner.strategy_state.market == {} assert runner.strategy_state._last_processing_position_index is None - assert runner._next_market_processing_position_index == 0 + assert runner._next_canonical_processing_position_index == 0 def test_invalid_core_cfg_type_fails_before_market_mutation() -> None: runner = object.__new__(HftStrategyRunner) runner.strategy_state = StrategyState(event_bus=EventBus(sinks=[])) runner._core_cfg = object() - runner._next_market_processing_position_index = 0 + runner._next_canonical_processing_position_index = 0 with pytest.raises(TypeError, match="configuration must be CoreConfiguration or None"): runner._process_canonical_market_event(_market_event(42)) assert runner.strategy_state.market == {} assert runner.strategy_state._last_processing_position_index is None - assert runner._next_market_processing_position_index == 0 + assert runner._next_canonical_processing_position_index == 0 def test_order_snapshot_branch_keeps_compatibility_path( @@ -320,3 +391,247 @@ def _spy_ingest_order_snapshots(*args: object, **kwargs: object) -> None: assert calls["update_account"] == 1 assert calls["ingest_order_snapshots"] == 1 + + +def test_successful_new_dispatch_processes_order_submitted_before_mark_sent( + monkeypatch: pytest.MonkeyPatch, +) -> None: + new_intent = _new_intent() + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_EmitIntentsStrategy([new_intent]), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + monkeypatch.setattr(runner.strategy_state, "apply_fill_event", lambda *args, **kwargs: None) + monkeypatch.setattr( + runner.risk, + "decide_intents", + lambda **_: _decision_for([new_intent]), + ) + + ordering: list[str] = [] + submitted_events: list[OrderSubmittedEvent] = [] + marks: list[tuple[str, str, str]] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = (state, configuration) + if isinstance(entry.event, OrderSubmittedEvent): + ordering.append("submitted") + submitted_events.append(entry.event) + + def _spy_mark_intent_sent(instrument: str, client_order_id: str, intent_type: str) -> None: + ordering.append("mark") + marks.append((instrument, client_order_id, intent_type)) + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + monkeypatch.setattr(runner.strategy_state, "mark_intent_sent", _spy_mark_intent_sent) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1_111, 5_000_000_000, 5_000_000_001], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert len(submitted_events) == 1 + event = submitted_events[0] + assert event.instrument == new_intent.instrument + assert event.client_order_id == new_intent.client_order_id + assert event.side == new_intent.side + assert event.order_type == new_intent.order_type + assert event.intended_price == new_intent.intended_price + assert event.intended_qty == new_intent.intended_qty + assert event.time_in_force == new_intent.time_in_force + assert event.intent_correlation_id == new_intent.intents_correlation_id + assert event.dispatch_attempt_id is None + assert event.runtime_correlation is None + assert event.ts_ns_local_dispatch == 5_000_000_000 + assert ordering == ["submitted", "mark"] + assert marks == [(new_intent.instrument, new_intent.client_order_id, "new")] + + +def test_failed_new_dispatch_processes_no_order_submitted_event( + monkeypatch: pytest.MonkeyPatch, +) -> None: + new_intent = _new_intent() + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_EmitIntentsStrategy([new_intent]), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + monkeypatch.setattr( + runner.risk, + "decide_intents", + lambda **_: _decision_for([new_intent]), + ) + + submitted_event_count = 0 + marked_count = 0 + captured_decisions: list[GateDecision] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + nonlocal submitted_event_count + _ = (state, configuration) + if isinstance(entry.event, OrderSubmittedEvent): + submitted_event_count += 1 + + def _spy_mark_intent_sent(instrument: str, client_order_id: str, intent_type: str) -> None: + nonlocal marked_count + _ = (instrument, client_order_id, intent_type) + marked_count += 1 + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + monkeypatch.setattr(runner.strategy_state, "mark_intent_sent", _spy_mark_intent_sent) + monkeypatch.setattr( + runner.strategy, + "on_risk_decision", + lambda decision: captured_decisions.append(decision), + ) + + class _ExecutionFailNew: + def apply_intents(self, intents: list[Any]) -> list[tuple[Any, str]]: + _ = intents + return [(new_intent, "EXCHANGE_REJECT")] + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[10, 20, 30], + depth=_depth_snapshot(), + ) + runner.run( + venue=venue, + execution=_ExecutionFailNew(), + recorder=_RecorderWrapper(), + ) + + assert submitted_event_count == 0 + assert marked_count == 0 + assert len(captured_decisions) == 1 + assert len(captured_decisions[0].execution_rejected) == 1 + assert captured_decisions[0].execution_rejected[0].intent.client_order_id == new_intent.client_order_id + + +def test_successful_replace_cancel_dispatch_processes_no_order_submitted_event( + monkeypatch: pytest.MonkeyPatch, +) -> None: + replace_intent = _replace_intent() + cancel_intent = _cancel_intent() + accepted_now = [replace_intent, cancel_intent] + + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_EmitIntentsStrategy(accepted_now), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + monkeypatch.setattr( + runner.risk, + "decide_intents", + lambda **_: _decision_for(accepted_now), + ) + + submitted_event_count = 0 + marks: list[tuple[str, str, str]] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + nonlocal submitted_event_count + _ = (state, configuration) + if isinstance(entry.event, OrderSubmittedEvent): + submitted_event_count += 1 + + def _spy_mark_intent_sent(instrument: str, client_order_id: str, intent_type: str) -> None: + marks.append((instrument, client_order_id, intent_type)) + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + monkeypatch.setattr(runner.strategy_state, "mark_intent_sent", _spy_mark_intent_sent) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[100, 200, 300], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert submitted_event_count == 0 + assert marks == [ + ( + replace_intent.instrument, + replace_intent.client_order_id, + "replace", + ), + ( + cancel_intent.instrument, + cancel_intent.client_order_id, + "cancel", + ), + ] + + +def test_global_canonical_counter_shared_between_market_and_order_submitted( + monkeypatch: pytest.MonkeyPatch, +) -> None: + new_intent = _new_intent() + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_EmitIntentsStrategy([new_intent]), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + monkeypatch.setattr( + runner.risk, + "decide_intents", + lambda **_: _decision_for([new_intent]), + ) + + positions: list[tuple[int, str]] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = (state, configuration) + event_name = type(entry.event).__name__ + positions.append((entry.position.index, event_name)) + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[7, 9_999_999_999, 10_000_000_000], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert positions == [ + (0, "MarketEvent"), + (1, "OrderSubmittedEvent"), + ] + assert runner._next_canonical_processing_position_index == 2 + + +def test_canonical_counter_increments_only_after_successful_canonical_processing( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = object.__new__(HftStrategyRunner) + runner.strategy_state = object() + runner._core_cfg = _core_cfg() + runner._next_canonical_processing_position_index = 0 + + def _fail(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + raise RuntimeError("boom") + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _fail) + with pytest.raises(RuntimeError, match="boom"): + runner._process_canonical_market_event(_market_event(1)) + assert runner._next_canonical_processing_position_index == 0 + + called = {"count": 0} + + def _ok(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + called["count"] += 1 + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _ok) + runner._process_canonical_market_event(_market_event(2)) + assert called["count"] == 1 + assert runner._next_canonical_processing_position_index == 1 diff --git a/trading_runtime/backtest/engine/strategy_runner.py b/trading_runtime/backtest/engine/strategy_runner.py index db2503a..f14599c 100644 --- a/trading_runtime/backtest/engine/strategy_runner.py +++ b/trading_runtime/backtest/engine/strategy_runner.py @@ -18,7 +18,9 @@ BookLevel, BookPayload, MarketEvent, + NewOrderIntent, OrderIntent, + OrderSubmittedEvent, Price, Quantity, ) @@ -75,7 +77,21 @@ def __init__( ) self._next_send_ts_ns_local: int | None = None - self._next_market_processing_position_index: int = 0 + self._next_canonical_processing_position_index: int = 0 + + def _process_canonical_event(self, event: object) -> None: + entry = EventStreamEntry( + position=ProcessingPosition( + index=self._next_canonical_processing_position_index, + ), + event=event, + ) + process_event_entry( + self.strategy_state, + entry, + configuration=self._core_cfg, + ) + self._next_canonical_processing_position_index += 1 def _build_event_bus( self, @@ -117,18 +133,28 @@ def intent_priority(intent: OrderIntent) -> int: return sorted(intents, key=lambda it: (intent_priority(it), it.ts_ns_local)) def _process_canonical_market_event(self, market_event: MarketEvent) -> None: - entry = EventStreamEntry( - position=ProcessingPosition( - index=self._next_market_processing_position_index, - ), - event=market_event, - ) - process_event_entry( - self.strategy_state, - entry, - configuration=self._core_cfg, + self._process_canonical_event(market_event) + + def _process_canonical_order_submitted_event( + self, + intent: NewOrderIntent, + *, + ts_ns_local_dispatch: int, + ) -> None: + order_submitted_event = OrderSubmittedEvent( + ts_ns_local_dispatch=ts_ns_local_dispatch, + instrument=intent.instrument, + client_order_id=intent.client_order_id, + side=intent.side, + order_type=intent.order_type, + intended_price=intent.intended_price, + intended_qty=intent.intended_qty, + time_in_force=intent.time_in_force, + intent_correlation_id=intent.intents_correlation_id, + dispatch_attempt_id=None, + runtime_correlation=None, ) - self._next_market_processing_position_index += 1 + self._process_canonical_event(order_submitted_event) def run( self, @@ -314,6 +340,11 @@ def run( for it in decision.accepted_now: if (it.instrument, it.client_order_id) in failed_keys: continue + if it.intent_type == "new": + self._process_canonical_order_submitted_event( + it, + ts_ns_local_dispatch=sim_now_ns, + ) self.strategy_state.mark_intent_sent( it.instrument, it.client_order_id, From 8157bc10a4b0162257dfa7d4813882a51ad4d8e0 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sat, 2 May 2026 19:43:56 +0000 Subject: [PATCH 11/36] m2 p3 sliceE3: wire runtime scheduled control wakeup realization into canonical ControlTimeEvent processing without changing existing scheduling/queue behavior --- requirements-dev.txt | 2 +- requirements.txt | 2 +- ...rategy_runner_canonical_market_adoption.py | 234 ++++++++++++++++++ .../backtest/engine/strategy_runner.py | 30 +++ 4 files changed, 266 insertions(+), 2 deletions(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index c0eb36a..bdceaef 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -331,7 +331,7 @@ tornado==6.5.4 # via bokeh tqdm==4.67.3 # via panel -trading-framework @ git+https://github.com/TradingChassis/core.git@71dc8effdb6f9ffb9ae289fe58dcbbaf87793e68 +trading-framework @ git+https://github.com/TradingChassis/core.git@d36347965f33e2735d233daa59fd8e5840604523 # via -r _git_deps.in typing-extensions==4.15.0 # via diff --git a/requirements.txt b/requirements.txt index 4f326c5..1732f9f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -307,7 +307,7 @@ tornado==6.5.4 # via bokeh tqdm==4.67.3 # via panel -trading-framework @ git+https://github.com/TradingChassis/core.git@71dc8effdb6f9ffb9ae289fe58dcbbaf87793e68 +trading-framework @ git+https://github.com/TradingChassis/core.git@d36347965f33e2735d233daa59fd8e5840604523 # via -r _git_deps.in typing-extensions==4.15.0 # via diff --git a/tests/runtime/test_strategy_runner_canonical_market_adoption.py b/tests/runtime/test_strategy_runner_canonical_market_adoption.py index 2d1af52..e3e5145 100644 --- a/tests/runtime/test_strategy_runner_canonical_market_adoption.py +++ b/tests/runtime/test_strategy_runner_canonical_market_adoption.py @@ -1,5 +1,6 @@ from __future__ import annotations +from collections import deque from types import SimpleNamespace from typing import Any @@ -10,6 +11,7 @@ BookLevel, BookPayload, CancelOrderIntent, + ControlTimeEvent, MarketEvent, NewOrderIntent, OrderSubmittedEvent, @@ -635,3 +637,235 @@ def _ok(*args: object, **kwargs: object) -> None: runner._process_canonical_market_event(_market_event(2)) assert called["count"] == 1 assert runner._next_canonical_processing_position_index == 1 + + +def test_control_time_event_injected_when_scheduled_deadline_is_realized( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + runner._next_send_ts_ns_local = 5 + + control_events: list[ControlTimeEvent] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = (state, configuration) + if isinstance(entry.event, ControlTimeEvent): + control_events.append(entry.event) + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + venue = _StubVenue( + rc_sequence=[0, 0, 1], + ts_sequence=[1, 10, 11], + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert len(control_events) == 1 + event = control_events[0] + assert event.ts_ns_local_control == 10 + assert event.reason == "scheduled_control_recheck" + assert event.due_ts_ns_local == 5 + assert event.realized_ts_ns_local == 10 + assert event.obligation_reason == "rate_limit" + assert event.obligation_due_ts_ns_local == 5 + assert event.runtime_correlation is None + + +def test_no_control_time_event_when_no_deadline_scheduled( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + control_count = 0 + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + nonlocal control_count + _ = (state, configuration) + if isinstance(entry.event, ControlTimeEvent): + control_count += 1 + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1, 2, 3], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert control_count == 0 + + +def test_no_control_time_event_when_deadline_not_yet_realized( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + runner._next_send_ts_ns_local = 50 + control_count = 0 + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + nonlocal control_count + _ = (state, configuration) + if isinstance(entry.event, ControlTimeEvent): + control_count += 1 + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + venue = _StubVenue( + rc_sequence=[0, 0, 1], + ts_sequence=[1, 10, 20], + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert control_count == 0 + + +def test_control_time_deadline_injection_is_not_periodic_for_same_deadline( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + runner._next_send_ts_ns_local = 5 + control_count = 0 + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + nonlocal control_count + _ = (state, configuration) + if isinstance(entry.event, ControlTimeEvent): + control_count += 1 + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + venue = _StubVenue( + rc_sequence=[0, 0, 0, 1], + ts_sequence=[1, 10, 10, 11], + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert control_count == 1 + + +def test_control_time_event_processed_after_pop_and_before_gate( + monkeypatch: pytest.MonkeyPatch, +) -> None: + queued_intent = _new_intent() + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + runner._next_send_ts_ns_local = 5 + + ordering: list[str] = [] + captured_raw_inputs: list[list[Any]] = [] + + def _spy_pop_queued_intents(instrument: str) -> list[Any]: + _ = instrument + ordering.append("pop") + return [queued_intent] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = (state, configuration) + if isinstance(entry.event, ControlTimeEvent): + ordering.append("control") + + def _spy_decide_intents(**kwargs: Any) -> GateDecision: + ordering.append("gate") + captured_raw_inputs.append(list(kwargs["raw_intents"])) + return _decision_for([]) + + monkeypatch.setattr(runner.strategy_state, "pop_queued_intents", _spy_pop_queued_intents) + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + monkeypatch.setattr(runner.risk, "decide_intents", _spy_decide_intents) + + venue = _StubVenue( + rc_sequence=[0, 0, 1], + ts_sequence=[1, 10, 11], + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert ordering == ["pop", "control", "gate"] + assert len(captured_raw_inputs) == 1 + assert [it.client_order_id for it in captured_raw_inputs[0]] == [queued_intent.client_order_id] + + +def test_global_canonical_counter_shared_with_control_time_market_and_submitted( + monkeypatch: pytest.MonkeyPatch, +) -> None: + new_intent = _new_intent() + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_EmitIntentsStrategy([new_intent]), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + runner._next_send_ts_ns_local = 5 + + monkeypatch.setattr( + runner.risk, + "decide_intents", + lambda **_: _decision_for([new_intent]), + ) + + positions: list[tuple[int, str]] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = (state, configuration) + positions.append((entry.position.index, type(entry.event).__name__)) + + monkeypatch.setattr(strategy_runner_module, "process_event_entry", _spy_process_event_entry) + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1, 10, 11], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert positions == [ + (0, "MarketEvent"), + (1, "ControlTimeEvent"), + (2, "OrderSubmittedEvent"), + ] + assert runner._next_canonical_processing_position_index == 3 + + +def test_fallback_second_boundary_wakeup_behavior_unchanged( + monkeypatch: pytest.MonkeyPatch, +) -> None: + intent = _new_intent() + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_EmitIntentsStrategy([intent]), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + runner.strategy_state.queued_intents.setdefault(runner.engine_cfg.instrument, deque()) + runner.strategy_state.queued_intents[runner.engine_cfg.instrument].append( + SimpleNamespace(intent=intent) + ) + + monkeypatch.setattr(runner.risk, "decide_intents", lambda **_: _decision_for([])) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1, 2_000_000_000, 2_000_000_001], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert runner._next_send_ts_ns_local == 3_000_000_000 diff --git a/trading_runtime/backtest/engine/strategy_runner.py b/trading_runtime/backtest/engine/strategy_runner.py index f14599c..216063b 100644 --- a/trading_runtime/backtest/engine/strategy_runner.py +++ b/trading_runtime/backtest/engine/strategy_runner.py @@ -17,6 +17,7 @@ from trading_framework.core.domain.types import ( BookLevel, BookPayload, + ControlTimeEvent, MarketEvent, NewOrderIntent, OrderIntent, @@ -78,6 +79,7 @@ def __init__( self._next_send_ts_ns_local: int | None = None self._next_canonical_processing_position_index: int = 0 + self._last_injected_control_deadline_ns: int | None = None def _process_canonical_event(self, event: object) -> None: entry = EventStreamEntry( @@ -156,6 +158,23 @@ def _process_canonical_order_submitted_event( ) self._process_canonical_event(order_submitted_event) + def _process_canonical_control_time_event( + self, + *, + sim_now_ns: int, + scheduled_deadline_ns: int, + ) -> None: + control_time_event = ControlTimeEvent( + ts_ns_local_control=sim_now_ns, + reason="scheduled_control_recheck", + due_ts_ns_local=scheduled_deadline_ns, + realized_ts_ns_local=sim_now_ns, + obligation_reason="rate_limit", + obligation_due_ts_ns_local=scheduled_deadline_ns, + runtime_correlation=None, + ) + self._process_canonical_event(control_time_event) + def run( self, venue: VenueAdapter, @@ -306,13 +325,24 @@ def run( # ----------------------------------------------------------------- # Queue flush # ----------------------------------------------------------------- + scheduled_deadline_ns: int | None = None if ( self._next_send_ts_ns_local is not None and sim_now_ns >= self._next_send_ts_ns_local ): + scheduled_deadline_ns = self._next_send_ts_ns_local raw_intents.extend( self.strategy_state.pop_queued_intents(instrument) ) + if ( + scheduled_deadline_ns + != self._last_injected_control_deadline_ns + ): + self._process_canonical_control_time_event( + sim_now_ns=sim_now_ns, + scheduled_deadline_ns=scheduled_deadline_ns, + ) + self._last_injected_control_deadline_ns = scheduled_deadline_ns # ----------------------------------------------------------------- # Gate + execution From 3012516ef2837f444f4cb9f35585d68d5065e9c8 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 10:39:04 +0000 Subject: [PATCH 12/36] m2 p4 s5: run bounded probe to determine whether current hftbacktest bindings / current core-runtime wrappers can expose enough structured execution-feedback data to support a future ExecutionFeedbackRecordSource --- ...st_hftbacktest_execution_feedback_probe.py | 127 ++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 tests/runtime/test_hftbacktest_execution_feedback_probe.py diff --git a/tests/runtime/test_hftbacktest_execution_feedback_probe.py b/tests/runtime/test_hftbacktest_execution_feedback_probe.py new file mode 100644 index 0000000..87ced53 --- /dev/null +++ b/tests/runtime/test_hftbacktest_execution_feedback_probe.py @@ -0,0 +1,127 @@ +"""Phase 4K probe: hftbacktest execution feedback feasibility surface.""" + +from __future__ import annotations + +import inspect +from dataclasses import dataclass +from typing import Any + +import pytest + +from trading_runtime.backtest.adapters.execution import HftBacktestExecutionAdapter +from trading_runtime.backtest.adapters.venue import HftBacktestVenueAdapter +from trading_runtime.backtest.engine.strategy_runner import HftStrategyRunner + +hftbacktest = pytest.importorskip("hftbacktest") +from hftbacktest import types as hbt_types # type: ignore # noqa: E402 +from hftbacktest.binding import ROIVectorMarketDepthBacktest # type: ignore # noqa: E402 + + +@dataclass(frozen=True) +class ProbeRow: + source: str + explicit_feedback_boundary: bool + authoritative_filled_price: bool + authoritative_cum_filled_qty: bool + authoritative_liquidity_flag: bool + deterministic_client_order_id_correlation: bool + deterministic_source_sequence: bool + batching_safe: bool + eligible_for_execution_feedback_record_source: bool + + +def _public_methods(cls: type[Any]) -> set[str]: + return { + name + for name, member in inspect.getmembers(cls) + if callable(member) and not name.startswith("_") + } + + +def test_probe_wrapper_surface_is_snapshot_only_for_rc3_branch() -> None: + venue_methods = _public_methods(HftBacktestVenueAdapter) + execution_methods = _public_methods(HftBacktestExecutionAdapter) + runner_source = inspect.getsource(HftStrategyRunner.run) + + assert "read_orders_snapshot" in venue_methods + assert "wait_next" in venue_methods + assert "apply_intents" in execution_methods + + # Probe fact: no adapter-facing execution feedback source is currently exposed. + assert "wait_order_response" not in venue_methods + assert "drain_execution_feedback_records" not in venue_methods + assert "drain_execution_feedback_records" not in execution_methods + + # Probe fact: strategy runner rc==3 branch is snapshot materialization. + assert "if rc == 3" in runner_source + assert "read_orders_snapshot()" in runner_source + assert "state_values, orders = venue.read_orders_snapshot()" in runner_source + assert "wait_order_response" not in runner_source + + +def test_probe_hftbacktest_binding_response_surface() -> None: + _ = hftbacktest + binding_methods = _public_methods(ROIVectorMarketDepthBacktest) + + assert "wait_next_feed" in binding_methods + assert "wait_order_response" in binding_methods + assert "orders" in binding_methods + assert "state_values" in binding_methods + assert "last_trades" in binding_methods + + # Probe fact: there is no direct structured feedback drain API. + assert "drain_execution_feedback_records" not in binding_methods + assert "execution_feedback_records" not in binding_methods + + +def test_probe_order_dtype_diagnostics_and_contract_gaps() -> None: + field_names = set(hbt_types.order_dtype.names or ()) + + # Snapshot/order fields currently visible through `orders()`. + assert {"order_id", "exec_price_tick", "exec_qty", "status", "maker"} <= field_names + + # Required for the conceptual source contract but not present on raw order dtype. + assert "client_order_id" not in field_names + assert "source_sequence" not in field_names + assert "liquidity_flag" not in field_names + + +def test_probe_contract_matrix_for_candidates_a_b_c() -> None: + matrix = [ + ProbeRow( + source="A: direct structured order-response channel", + explicit_feedback_boundary=False, + authoritative_filled_price=False, + authoritative_cum_filled_qty=False, + authoritative_liquidity_flag=False, + deterministic_client_order_id_correlation=False, + deterministic_source_sequence=False, + batching_safe=False, + eligible_for_execution_feedback_record_source=False, + ), + ProbeRow( + source="B: rc==3 wakeup + immediate structured lookup", + explicit_feedback_boundary=False, + authoritative_filled_price=False, + authoritative_cum_filled_qty=False, + authoritative_liquidity_flag=False, + deterministic_client_order_id_correlation=False, + deterministic_source_sequence=False, + batching_safe=False, + eligible_for_execution_feedback_record_source=False, + ), + ProbeRow( + source="C: snapshot deltas (diagnostic only)", + explicit_feedback_boundary=False, + authoritative_filled_price=False, + authoritative_cum_filled_qty=False, + authoritative_liquidity_flag=False, + deterministic_client_order_id_correlation=False, + deterministic_source_sequence=False, + batching_safe=False, + eligible_for_execution_feedback_record_source=False, + ), + ] + + assert all(not row.eligible_for_execution_feedback_record_source for row in matrix) + assert all(not row.explicit_feedback_boundary for row in matrix) From 4db3579d86383aaea3032a72c64ad2851f91b2d4 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 10:44:56 +0000 Subject: [PATCH 13/36] m2 p4 s6: run one final targeted probe to determine whether wait_order_response(order_id, timeout) plus immediate orders().get(order_id) can expose a sufficiently explicit per-order response boundary, or whether it remains snapshot-derived and ineligible --- ...st_hftbacktest_execution_feedback_probe.py | 90 ++++++++++++++++++- 1 file changed, 89 insertions(+), 1 deletion(-) diff --git a/tests/runtime/test_hftbacktest_execution_feedback_probe.py b/tests/runtime/test_hftbacktest_execution_feedback_probe.py index 87ced53..d975588 100644 --- a/tests/runtime/test_hftbacktest_execution_feedback_probe.py +++ b/tests/runtime/test_hftbacktest_execution_feedback_probe.py @@ -8,7 +8,10 @@ import pytest -from trading_runtime.backtest.adapters.execution import HftBacktestExecutionAdapter +from trading_runtime.backtest.adapters.execution import ( + HftBacktestExecutionAdapter, + _to_i64_order_id, +) from trading_runtime.backtest.adapters.venue import HftBacktestVenueAdapter from trading_runtime.backtest.engine.strategy_runner import HftStrategyRunner @@ -86,6 +89,91 @@ def test_probe_order_dtype_diagnostics_and_contract_gaps() -> None: assert "liquidity_flag" not in field_names +def test_probe_wait_order_response_is_status_code_only_and_timeout_ambiguous() -> None: + source = inspect.getsource(ROIVectorMarketDepthBacktest.wait_order_response) + + assert "def wait_order_response" in source + assert "-> int64" in source + assert "reaches the timeout" in source + assert "Returns:" in source + assert "order response" in source + + # Probe fact: no structured payload object is returned from this method. + assert "dict" not in source + assert "payload" not in source + assert "record" not in source + + +def test_probe_wait_next_feed_response_signal_is_any_order_response_only() -> None: + source = inspect.getsource(ROIVectorMarketDepthBacktest.wait_next_feed) + + assert "include_order_resp" in source + assert "`3` when it receives an order response" in source + assert "any order response" in source + assert "source_sequence" not in source + assert "order_id" not in source + + +def test_probe_immediate_order_lookup_fields_and_missing_boundary_fields() -> None: + field_names = set(hbt_types.order_dtype.names or ()) + + # Immediate lookup provides current order state fields. + assert { + "order_id", + "status", + "req", + "exec_qty", + "exec_price_tick", + "maker", + "local_timestamp", + "exch_timestamp", + "leaves_qty", + } <= field_names + + # Missing record-boundary/correlation fields for source contract. + assert "client_order_id" not in field_names + assert "source_sequence" not in field_names + assert "explicit_update_kind" not in field_names + assert "response_sequence" not in field_names + assert "cum_filled_qty" not in field_names + + +def test_probe_client_order_id_correlation_is_one_way_without_reverse_map() -> None: + # Deterministic forward mapping exists. + assert _to_i64_order_id("cid-123") == _to_i64_order_id("cid-123") + assert _to_i64_order_id("42") == 42 + + adapter_fields = set(HftBacktestExecutionAdapter.__dataclass_fields__.keys()) + apply_intents_source = inspect.getsource(HftBacktestExecutionAdapter.apply_intents) + id_mapping_source = inspect.getsource(_to_i64_order_id) + + # Probe fact: adapter persists no reverse order_id -> client_order_id correlation map. + assert adapter_fields == {"hbt", "asset_no"} + assert "blake2b" in id_mapping_source + assert "_to_i64_order_id(intent.client_order_id)" in apply_intents_source + assert "reverse" not in apply_intents_source + assert "mapping" not in apply_intents_source + + +def test_probe_wait_order_response_plus_immediate_lookup_candidate_stays_ineligible() -> None: + row = ProbeRow( + source="L: wait_order_response + immediate orders().get(order_id)", + explicit_feedback_boundary=False, + authoritative_filled_price=False, + authoritative_cum_filled_qty=False, + authoritative_liquidity_flag=False, + deterministic_client_order_id_correlation=False, + deterministic_source_sequence=False, + batching_safe=False, + eligible_for_execution_feedback_record_source=False, + ) + + assert row.eligible_for_execution_feedback_record_source is False + assert row.explicit_feedback_boundary is False + assert row.deterministic_source_sequence is False + assert row.deterministic_client_order_id_correlation is False + + def test_probe_contract_matrix_for_candidates_a_b_c() -> None: matrix = [ ProbeRow( From 6a473bb1a51524bf45ed3fb980cfca4241b94bb1 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 16:37:48 +0000 Subject: [PATCH 14/36] m2 p6 s5: add the smallest runtime tests that directly cover the two EventStreamCursor characterization gaps --- ...rategy_runner_canonical_market_adoption.py | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/tests/runtime/test_strategy_runner_canonical_market_adoption.py b/tests/runtime/test_strategy_runner_canonical_market_adoption.py index e3e5145..19e6348 100644 --- a/tests/runtime/test_strategy_runner_canonical_market_adoption.py +++ b/tests/runtime/test_strategy_runner_canonical_market_adoption.py @@ -273,6 +273,33 @@ def _spy_process_event_entry(state: object, entry: object, *, configuration: obj assert runner._next_canonical_processing_position_index == 2 +def test_first_canonical_event_uses_processing_position_zero( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = object.__new__(HftStrategyRunner) + runner.strategy_state = object() + runner._core_cfg = _core_cfg() + runner._next_canonical_processing_position_index = 0 + + captured: list[int] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = state + assert configuration is runner._core_cfg + captured.append(entry.position.index) + + monkeypatch.setattr( + strategy_runner_module, + "process_event_entry", + _spy_process_event_entry, + ) + + runner._process_canonical_market_event(_market_event(1)) + + assert captured == [0] + assert runner._next_canonical_processing_position_index == 1 + + def test_market_branch_calls_canonical_boundary_not_update_market( monkeypatch: pytest.MonkeyPatch, ) -> None: @@ -393,6 +420,67 @@ def _spy_ingest_order_snapshots(*args: object, **kwargs: object) -> None: assert calls["update_account"] == 1 assert calls["ingest_order_snapshots"] == 1 + assert runner._next_canonical_processing_position_index == 0 + + +def test_snapshot_only_rc3_does_not_consume_canonical_cursor_position( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + + calls = {"update_account": 0, "ingest_order_snapshots": 0, "canonical": 0} + + def _spy_update_account(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["update_account"] += 1 + + def _spy_ingest_order_snapshots(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["ingest_order_snapshots"] += 1 + + def _spy_process_event_entry(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["canonical"] += 1 + + monkeypatch.setattr(runner.strategy_state, "update_account", _spy_update_account) + monkeypatch.setattr( + runner.strategy_state, + "ingest_order_snapshots", + _spy_ingest_order_snapshots, + ) + monkeypatch.setattr( + strategy_runner_module, + "process_event_entry", + _spy_process_event_entry, + ) + + venue = _StubVenue( + rc_sequence=[0, 3, 1], + ts_sequence=[1, 2, 3], + state_values=SimpleNamespace( + position=0.0, + balance=1000.0, + fee=0.0, + trading_volume=0.0, + trading_value=0.0, + num_trades=0, + ), + orders={}, + ) + + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert calls == { + "update_account": 1, + "ingest_order_snapshots": 1, + "canonical": 0, + } + assert runner._next_canonical_processing_position_index == 0 def test_successful_new_dispatch_processes_order_submitted_before_mark_sent( From 1dc20c3d9dd9d0be4fecac57281f22294c74309c Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 16:53:46 +0000 Subject: [PATCH 15/36] m2 p6 s6: replace current raw runtime integer cursor in HftStrategyRunner with runtime-owned EventStreamCursor object while preserving exact behavior --- tests/runtime/test_event_stream_cursor.py | 44 +++++++++++++++++++ ...rategy_runner_canonical_market_adoption.py | 31 ++++++------- .../backtest/engine/event_stream_cursor.py | 29 ++++++++++++ .../backtest/engine/strategy_runner.py | 11 +++-- 4 files changed, 94 insertions(+), 21 deletions(-) create mode 100644 tests/runtime/test_event_stream_cursor.py create mode 100644 trading_runtime/backtest/engine/event_stream_cursor.py diff --git a/tests/runtime/test_event_stream_cursor.py b/tests/runtime/test_event_stream_cursor.py new file mode 100644 index 0000000..9ef2d08 --- /dev/null +++ b/tests/runtime/test_event_stream_cursor.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +import pytest +from trading_framework.core.domain.processing_order import ProcessingPosition + +from trading_runtime.backtest.engine.event_stream_cursor import EventStreamCursor + + +def test_event_stream_cursor_starts_at_zero() -> None: + cursor = EventStreamCursor() + assert cursor.next_index == 0 + + +def test_attempt_position_does_not_advance_cursor() -> None: + cursor = EventStreamCursor() + attempted = cursor.attempt_position() + assert attempted.index == 0 + assert cursor.next_index == 0 + + +def test_commit_success_advances_by_one() -> None: + cursor = EventStreamCursor() + attempted = cursor.attempt_position() + cursor.commit_success(attempted) + assert cursor.next_index == 1 + + +def test_commit_success_rejects_mismatched_position() -> None: + cursor = EventStreamCursor() + with pytest.raises(ValueError, match="Committed position does not match expected next index"): + cursor.commit_success(ProcessingPosition(index=1)) + assert cursor.next_index == 0 + + +def test_repeated_attempt_commit_produces_sequential_positions() -> None: + cursor = EventStreamCursor() + observed: list[int] = [] + for _ in range(3): + position = cursor.attempt_position() + observed.append(position.index) + cursor.commit_success(position) + + assert observed == [0, 1, 2] + assert cursor.next_index == 3 diff --git a/tests/runtime/test_strategy_runner_canonical_market_adoption.py b/tests/runtime/test_strategy_runner_canonical_market_adoption.py index 19e6348..b14208a 100644 --- a/tests/runtime/test_strategy_runner_canonical_market_adoption.py +++ b/tests/runtime/test_strategy_runner_canonical_market_adoption.py @@ -25,6 +25,7 @@ from trading_framework.strategies.base import Strategy import trading_runtime.backtest.engine.strategy_runner as strategy_runner_module +from trading_runtime.backtest.engine.event_stream_cursor import EventStreamCursor from trading_runtime.backtest.engine.hft_engine import HftEngineConfig from trading_runtime.backtest.engine.strategy_runner import HftStrategyRunner @@ -250,7 +251,7 @@ def test_process_market_event_routes_through_event_entry_with_core_configuration runner = object.__new__(HftStrategyRunner) runner.strategy_state = object() runner._core_cfg = _core_cfg() - runner._next_canonical_processing_position_index = 0 + runner._event_stream_cursor = EventStreamCursor() captured: list[tuple[int, object]] = [] @@ -270,7 +271,7 @@ def _spy_process_event_entry(state: object, entry: object, *, configuration: obj assert [idx for idx, _ in captured] == [0, 1] assert captured[0][1] is runner._core_cfg assert captured[1][1] is runner._core_cfg - assert runner._next_canonical_processing_position_index == 2 + assert runner._event_stream_cursor.next_index == 2 def test_first_canonical_event_uses_processing_position_zero( @@ -279,7 +280,7 @@ def test_first_canonical_event_uses_processing_position_zero( runner = object.__new__(HftStrategyRunner) runner.strategy_state = object() runner._core_cfg = _core_cfg() - runner._next_canonical_processing_position_index = 0 + runner._event_stream_cursor = EventStreamCursor() captured: list[int] = [] @@ -297,7 +298,7 @@ def _spy_process_event_entry(state: object, entry: object, *, configuration: obj runner._process_canonical_market_event(_market_event(1)) assert captured == [0] - assert runner._next_canonical_processing_position_index == 1 + assert runner._event_stream_cursor.next_index == 1 def test_market_branch_calls_canonical_boundary_not_update_market( @@ -346,28 +347,28 @@ def test_missing_core_cfg_fails_before_market_mutation() -> None: runner = object.__new__(HftStrategyRunner) runner.strategy_state = StrategyState(event_bus=EventBus(sinks=[])) runner._core_cfg = None - runner._next_canonical_processing_position_index = 0 + runner._event_stream_cursor = EventStreamCursor() with pytest.raises(ValueError, match="CoreConfiguration is required"): runner._process_canonical_market_event(_market_event(42)) assert runner.strategy_state.market == {} assert runner.strategy_state._last_processing_position_index is None - assert runner._next_canonical_processing_position_index == 0 + assert runner._event_stream_cursor.next_index == 0 def test_invalid_core_cfg_type_fails_before_market_mutation() -> None: runner = object.__new__(HftStrategyRunner) runner.strategy_state = StrategyState(event_bus=EventBus(sinks=[])) runner._core_cfg = object() - runner._next_canonical_processing_position_index = 0 + runner._event_stream_cursor = EventStreamCursor() with pytest.raises(TypeError, match="configuration must be CoreConfiguration or None"): runner._process_canonical_market_event(_market_event(42)) assert runner.strategy_state.market == {} assert runner.strategy_state._last_processing_position_index is None - assert runner._next_canonical_processing_position_index == 0 + assert runner._event_stream_cursor.next_index == 0 def test_order_snapshot_branch_keeps_compatibility_path( @@ -420,7 +421,7 @@ def _spy_ingest_order_snapshots(*args: object, **kwargs: object) -> None: assert calls["update_account"] == 1 assert calls["ingest_order_snapshots"] == 1 - assert runner._next_canonical_processing_position_index == 0 + assert runner._event_stream_cursor.next_index == 0 def test_snapshot_only_rc3_does_not_consume_canonical_cursor_position( @@ -480,7 +481,7 @@ def _spy_process_event_entry(*args: object, **kwargs: object) -> None: "ingest_order_snapshots": 1, "canonical": 0, } - assert runner._next_canonical_processing_position_index == 0 + assert runner._event_stream_cursor.next_index == 0 def test_successful_new_dispatch_processes_order_submitted_before_mark_sent( @@ -695,7 +696,7 @@ def _spy_process_event_entry(state: object, entry: object, *, configuration: obj (0, "MarketEvent"), (1, "OrderSubmittedEvent"), ] - assert runner._next_canonical_processing_position_index == 2 + assert runner._event_stream_cursor.next_index == 2 def test_canonical_counter_increments_only_after_successful_canonical_processing( @@ -704,7 +705,7 @@ def test_canonical_counter_increments_only_after_successful_canonical_processing runner = object.__new__(HftStrategyRunner) runner.strategy_state = object() runner._core_cfg = _core_cfg() - runner._next_canonical_processing_position_index = 0 + runner._event_stream_cursor = EventStreamCursor() def _fail(*args: object, **kwargs: object) -> None: _ = (args, kwargs) @@ -713,7 +714,7 @@ def _fail(*args: object, **kwargs: object) -> None: monkeypatch.setattr(strategy_runner_module, "process_event_entry", _fail) with pytest.raises(RuntimeError, match="boom"): runner._process_canonical_market_event(_market_event(1)) - assert runner._next_canonical_processing_position_index == 0 + assert runner._event_stream_cursor.next_index == 0 called = {"count": 0} @@ -724,7 +725,7 @@ def _ok(*args: object, **kwargs: object) -> None: monkeypatch.setattr(strategy_runner_module, "process_event_entry", _ok) runner._process_canonical_market_event(_market_event(2)) assert called["count"] == 1 - assert runner._next_canonical_processing_position_index == 1 + assert runner._event_stream_cursor.next_index == 1 def test_control_time_event_injected_when_scheduled_deadline_is_realized( @@ -929,7 +930,7 @@ def _spy_process_event_entry(state: object, entry: object, *, configuration: obj (1, "ControlTimeEvent"), (2, "OrderSubmittedEvent"), ] - assert runner._next_canonical_processing_position_index == 3 + assert runner._event_stream_cursor.next_index == 3 def test_fallback_second_boundary_wakeup_behavior_unchanged( diff --git a/trading_runtime/backtest/engine/event_stream_cursor.py b/trading_runtime/backtest/engine/event_stream_cursor.py new file mode 100644 index 0000000..2621f8a --- /dev/null +++ b/trading_runtime/backtest/engine/event_stream_cursor.py @@ -0,0 +1,29 @@ +"""Runtime-owned canonical processing position cursor.""" + +from __future__ import annotations + +from trading_framework.core.domain.processing_order import ProcessingPosition + + +class EventStreamCursor: + """Ordering-only helper for canonical ProcessingPosition allocation.""" + + def __init__(self, *, start_index: int = 0) -> None: + if start_index < 0: + raise ValueError("start_index must be >= 0") + self._next_index = start_index + + @property + def next_index(self) -> int: + return self._next_index + + def attempt_position(self) -> ProcessingPosition: + return ProcessingPosition(index=self._next_index) + + def commit_success(self, position: ProcessingPosition) -> None: + if position.index != self._next_index: + raise ValueError( + "Committed position does not match expected next index: " + f"expected={self._next_index} actual={position.index}" + ) + self._next_index += 1 diff --git a/trading_runtime/backtest/engine/strategy_runner.py b/trading_runtime/backtest/engine/strategy_runner.py index 216063b..3ab5eb5 100644 --- a/trading_runtime/backtest/engine/strategy_runner.py +++ b/trading_runtime/backtest/engine/strategy_runner.py @@ -11,7 +11,6 @@ from trading_framework.core.domain.processing import process_event_entry from trading_framework.core.domain.processing_order import ( EventStreamEntry, - ProcessingPosition, ) from trading_framework.core.domain.state import StrategyState from trading_framework.core.domain.types import ( @@ -31,6 +30,7 @@ from trading_framework.core.risk.risk_config import RiskConfig from trading_framework.core.risk.risk_engine import RejectedIntent, RiskEngine +from trading_runtime.backtest.engine.event_stream_cursor import EventStreamCursor from trading_runtime.core.events.sinks.file_recorder import FileRecorderSink if TYPE_CHECKING: @@ -78,14 +78,13 @@ def __init__( ) self._next_send_ts_ns_local: int | None = None - self._next_canonical_processing_position_index: int = 0 + self._event_stream_cursor = EventStreamCursor() self._last_injected_control_deadline_ns: int | None = None def _process_canonical_event(self, event: object) -> None: + position = self._event_stream_cursor.attempt_position() entry = EventStreamEntry( - position=ProcessingPosition( - index=self._next_canonical_processing_position_index, - ), + position=position, event=event, ) process_event_entry( @@ -93,7 +92,7 @@ def _process_canonical_event(self, event: object) -> None: entry, configuration=self._core_cfg, ) - self._next_canonical_processing_position_index += 1 + self._event_stream_cursor.commit_success(position) def _build_event_bus( self, From db6581ab90a7c907ac8a63f5a414f2dfee7a8c19 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 18:06:30 +0000 Subject: [PATCH 16/36] m2 p7 s1: add core-runtime implementation-facing design note for future Venue Adapter Abstraction using split capability protocols --- README.md | 5 + docs/venue-adapter-abstraction-design-v1.md | 203 ++++++++++++++++++++ 2 files changed, 208 insertions(+) create mode 100644 docs/venue-adapter-abstraction-design-v1.md diff --git a/README.md b/README.md index f2705b7..2bd4138 100644 --- a/README.md +++ b/README.md @@ -50,11 +50,16 @@ deterministic runtime environments. ``` .github/workflows/ CI pipelines (tests, Argo template deploy) argo/ Argo workflow templates +docs/ Runtime design notes (implementation-facing) scripts/ environment & build helper scripts trading_runtime/ Python runtime entrypoints tests/ deterministic test data & validation ``` +Implementation-facing design notes: + +- `docs/venue-adapter-abstraction-design-v1.md` + ### Key runtime modules ``` diff --git a/docs/venue-adapter-abstraction-design-v1.md b/docs/venue-adapter-abstraction-design-v1.md new file mode 100644 index 0000000..47fa162 --- /dev/null +++ b/docs/venue-adapter-abstraction-design-v1.md @@ -0,0 +1,203 @@ +# Venue Adapter Abstraction Design v1 (core-runtime) + +--- + +## Purpose and scope + +This document defines an implementation-facing design note for a future +Venue Adapter Abstraction in `core-runtime` using split capability protocols. + +This is a docs-only slice: + +- it does not implement adapter APIs or protocols; +- it does not modify production code or tests; +- it does not change runtime behavior; +- it does not implement canonical `FillEvent` ingress; +- it does not canonicalize `OrderStateEvent`; +- it does not change `DerivedFillEvent` behavior; +- it does not change snapshot ingestion behavior; +- it does not change reducers or event taxonomy; +- it does not implement `ProcessingContext`; +- it does not implement replay/storage/EventStream persistence; +- it does not rename packages or directories. + +`VADN-01` - `core` remains venue-agnostic and must continue to consume canonical +Event Stream input and explicit configuration through existing boundaries. + +`VADN-02` - Adapters expose source capabilities; runtime owns orchestration and +maps capability outputs into canonical `EventStreamEntry` or compatibility paths. + +`VADN-03` - This note follows the split capability direction from Phase 7A and +does not introduce implementation API changes in this slice. + +--- + +## Contract references + +This note is implementation-facing and must stay consistent with: + +- `core/docs/venue-adapter-capability-model-v1.md` +- `core/docs/semantic-core-upgrade-milestone-closure-v1.md` +- `core/docs/runtime-execution-feedback-contract-v1.md` +- `core/docs/runtime-adapter-execution-feedback-source-contract-v1.md` +- `core/docs/post-submission-lifecycle-compatibility-map-v1.md` +- `core/docs/event-stream-cursor-characterization-v1.md` + +Current runtime anchors: + +- `core-runtime/trading_runtime/backtest/adapters/venue.py` +- `core-runtime/trading_runtime/backtest/adapters/execution.py` +- `core-runtime/trading_runtime/backtest/engine/strategy_runner.py` +- `core-runtime/trading_runtime/backtest/engine/event_stream_cursor.py` + +--- + +## Proposed split capability protocols (future, non-implemented) + +`VADN-04` - Future abstraction should be split by source responsibility and +authority class rather than one monolithic adapter interface. + +Conceptual capability names for future implementation planning: + +- `VenueEventWaiter` (or `WakeupSource`) +- `VenueClock` (or runtime clock boundary view) +- `MarketInputSource` +- `OrderSubmissionGateway` +- `OrderSnapshotSource` +- `AccountSnapshotSource` +- `ExecutionFeedbackRecordSource` + +`VADN-05` - Names above are conceptual and documentation-facing in this slice; +they do not define production protocol signatures yet. + +--- + +## Capability classification matrix + +| capability | responsibility | authority classification | current hftbacktest mapping | future live venue possibility | guardrails / non-goals | +| --- | --- | --- | --- | --- | --- | +| `VenueEventWaiter` / `WakeupSource` | wakeup signaling and wait control for runtime loop progression | runtime/internal only | mapped by `wait_next(...)` wrapper calling `wait_next_feed(...)` | may support richer wakeup sources while preserving runner loop ownership | wakeup signaling is not canonical Event authority; no branch ordering changes in this slice | +| `VenueClock` (runtime clock boundary view) | provide adopted venue-local timestamp axis used by runtime timestamp update | runtime/internal only | mapped by `current_timestamp_ns()` wrapper | may expose richer venue receipt/event-time metadata while runtime keeps canonical ordering by `ProcessingPosition` | clock/timestamp must not be treated as `ProcessingOrder` authority | +| `MarketInputSource` | provide market snapshots/deltas for canonical market mapping | canonical event capable | `read_market_snapshot()` mapped to canonical `MarketEvent` in runner | live adapters may map native book/trade feeds into canonical market events under runtime mapping | no hidden mutable snapshot promotion to canonical semantics outside boundary mapping | +| `OrderSubmissionGateway` | submit/modify/cancel outbound intents and expose dispatch result boundary | canonical event capable (submission boundary), plus runtime/internal transport | `apply_intents(...)`; successful `new` dispatch leads to canonical `OrderSubmittedEvent` | live adapters may provide richer dispatch metadata while preserving current canonical submission boundary semantics | no post-submission execution authority from synchronous return codes | +| `OrderSnapshotSource` | provide order snapshots for compatibility lifecycle materialization | compatibility projection only | `read_orders_snapshot()` -> `ingest_order_snapshots()` -> `OrderStateEvent` path | may remain compatibility sidecar where canonical execution feedback is unavailable | no `OrderStateEvent` canonicalization; no snapshot-to-canonical promotion | +| `AccountSnapshotSource` | provide account snapshots for runtime/account views and compatibility projections | compatibility projection only / runtime/internal only | `state_values` adoption into `update_account(...)` | live adapters may offer richer account views without canonical authority by default | no implicit canonical account event expansion in this slice | +| `ExecutionFeedbackRecordSource` | provide authoritative execution-feedback records for future canonical `FillEvent` mapping | optional future capability (canonical only after REFC/RAEFSC gates) | unsupported/ineligible today for hftbacktest integration | live adapters may satisfy this with native execution reports and deterministic source sequencing | no `FillEvent` ingress implementation here; no synthetic required-field authority | + +--- + +## hftbacktest capability map (current snapshot) + +`VADN-06` - Current hftbacktest integration under this model: + +- `MarketInputSource`: supported; canonical `MarketEvent` mapping path exists. +- `OrderSubmissionGateway`: supported for successful `new` dispatch boundary via + canonical `OrderSubmittedEvent` path. +- `OrderSnapshotSource`: supported; remains compatibility-only. +- `AccountSnapshotSource`: supported for compatibility/runtime-internal account + snapshot adoption. +- `VenueEventWaiter` + `VenueClock`: supported through existing wrappers. +- `ExecutionFeedbackRecordSource`: unsupported/ineligible today. + +`VADN-07` - Compatibility authority remains frozen for post-submission lifecycle +progression (`OrderStateEvent` / `DerivedFillEvent` path unchanged). + +--- + +## hftbacktest internals that remain internal + +`VADN-08` - The following are adapter/runtime internals and must not be treated +as canonical source semantics: + +- rc wakeup codes and branch signaling (`rc == 1/2/3`); +- hftbacktest order/depth object schemas; +- numeric enum mapping (time-in-force/order type) in execution adapter; +- string-to-`int64` order id adaptation at adapter boundary; +- recorder plumbing (`record(...)` wrapper behavior). + +--- + +## Future live venue expansion (non-implemented) + +`VADN-09` - A future live adapter may satisfy additional capabilities without +changing `core` semantics: + +- native execution reports exposed as `ExecutionFeedbackRecordSource`; +- source-authoritative `liquidity_flag` values; +- stable canonical correlation to `instrument + client_order_id`; +- deterministic strictly monotone non-timestamp `source_sequence`. + +`VADN-10` - Runner remains owner of global merge into `EventStreamEntry` with +`ProcessingPosition` ordering authority across canonical categories. + +`VADN-11` - `core` remains unchanged and venue-agnostic under this expansion. + +--- + +## Boundary rules + +`VADN-12` - `core` must not import runtime adapter classes. + +`VADN-13` - `core` must not know hftbacktest-specific APIs or structures. + +`VADN-14` - Runtime owns adapter orchestration and capability composition. + +`VADN-15` - Runtime owns mapping from adapter capability outputs to canonical +`EventStreamEntry` or compatibility ingestion paths. + +`VADN-16` - Adapter capabilities must not mutate `StrategyState` directly. + +`VADN-17` - Adapter capabilities must not call `process_event_entry` directly. + +`VADN-18` - Adapters expose source capabilities only; semantic authority is +decided at runtime boundary mapping under existing contracts. + +--- + +## Explicit non-goals for this slice + +`VADN-19` - No adapter protocol implementation. + +`VADN-20` - No runtime branch ordering or wakeup behavior changes. + +`VADN-21` - No canonical `FillEvent` ingress implementation. + +`VADN-22` - No `OrderStateEvent` canonicalization. + +`VADN-23` - No `DerivedFillEvent` behavior change. + +`VADN-24` - No snapshot lifecycle rewrite. + +`VADN-25` - No reducer or event taxonomy changes. + +`VADN-26` - No `ProcessingContext` implementation. + +`VADN-27` - No replay/storage/EventStream persistence implementation. + +`VADN-28` - No package or directory rename. + +--- + +## Future implementation prerequisites (before protocol introduction) + +`VADN-29` - Define a protocol-by-protocol migration strategy (incremental, +behavior-preserving) before introducing concrete protocol interfaces. + +`VADN-30` - Prepare characterization-first test plan for current behavior and +ordering invariants before abstraction refactors. + +`VADN-31` - Require hftbacktest parity tests proving no behavior drift across: + +- wakeup semantics; +- market mapping path; +- snapshot compatibility path; +- submission boundary behavior; +- canonical cursor/position sequencing invariants. + +`VADN-32` - Require explicit no-behavior-change proof for each migration step. + +`VADN-33` - Keep `ExecutionFeedbackRecordSource` gated by REFC/RAEFSC contracts; +no implementation planning should bypass those gate clauses. + +--- + From 799153c35e296fb4ba34048230aa2223af83703a Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 18:22:22 +0000 Subject: [PATCH 17/36] m2 p7 s2: add runner-level characterization tests needed before split adapter capability protocol implementation --- ...rategy_runner_canonical_market_adoption.py | 133 +++++++++++++++++- 1 file changed, 131 insertions(+), 2 deletions(-) diff --git a/tests/runtime/test_strategy_runner_canonical_market_adoption.py b/tests/runtime/test_strategy_runner_canonical_market_adoption.py index b14208a..f0d8431 100644 --- a/tests/runtime/test_strategy_runner_canonical_market_adoption.py +++ b/tests/runtime/test_strategy_runner_canonical_market_adoption.py @@ -12,6 +12,7 @@ BookPayload, CancelOrderIntent, ControlTimeEvent, + FillEvent, MarketEvent, NewOrderIntent, OrderSubmittedEvent, @@ -27,7 +28,10 @@ import trading_runtime.backtest.engine.strategy_runner as strategy_runner_module from trading_runtime.backtest.engine.event_stream_cursor import EventStreamCursor from trading_runtime.backtest.engine.hft_engine import HftEngineConfig -from trading_runtime.backtest.engine.strategy_runner import HftStrategyRunner +from trading_runtime.backtest.engine.strategy_runner import ( + MAX_TIMEOUT_NS, + HftStrategyRunner, +) class _NoopStrategy(Strategy): @@ -72,9 +76,10 @@ def __init__( self._state_values = state_values self._orders = orders self._current_ts = 0 + self.wait_calls: list[tuple[int, bool]] = [] def wait_next(self, *, timeout_ns: int, include_order_resp: bool) -> int: - _ = (timeout_ns, include_order_resp) + self.wait_calls.append((timeout_ns, include_order_resp)) self._current_ts = self._ts.pop(0) return self._rc.pop(0) @@ -343,6 +348,70 @@ def _spy_process_event_entry(state: object, entry: object, *, configuration: obj assert captured == [(0, runner._core_cfg)] +def test_wait_next_bootstrap_uses_include_order_resp_false_then_true_in_loop() -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1, 2, 3], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert len(venue.wait_calls) >= 2 + first_timeout_ns, first_include_order_resp = venue.wait_calls[0] + assert first_timeout_ns == MAX_TIMEOUT_NS + assert first_include_order_resp is False + assert all(include_order_resp is True for _, include_order_resp in venue.wait_calls[1:]) + + +def test_market_mapping_from_depth_snapshot_is_deterministic_golden( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + + captured_market_events: list[MarketEvent] = [] + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + _ = (state, configuration) + if isinstance(entry.event, MarketEvent): + captured_market_events.append(entry.event) + + monkeypatch.setattr( + strategy_runner_module, + "process_event_entry", + _spy_process_event_entry, + ) + + venue = _StubVenue( + rc_sequence=[0, 2, 1], + ts_sequence=[1, 2_000_000_000, 2_000_000_001], + depth=_depth_snapshot(), + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert len(captured_market_events) == 1 + market_event = captured_market_events[0] + assert market_event.instrument == "BTC_USDC-PERPETUAL" + assert market_event.ts_ns_local == 2_000_000_000 + assert market_event.ts_ns_exch == 2_000_000_000 + assert market_event.book is not None + assert market_event.book.bids[0].price.value == 10.0 + assert market_event.book.asks[0].price.value == 10.100000000000001 + assert market_event.book.bids[0].quantity.value == 1.0 + assert market_event.book.asks[0].quantity.value == 0.0 + + def test_missing_core_cfg_fails_before_market_mutation() -> None: runner = object.__new__(HftStrategyRunner) runner.strategy_state = StrategyState(event_bus=EventBus(sinks=[])) @@ -484,6 +553,66 @@ def _spy_process_event_entry(*args: object, **kwargs: object) -> None: assert runner._event_stream_cursor.next_index == 0 +def test_rc2_rc3_paths_never_emit_fill_event_through_process_event_entry( + monkeypatch: pytest.MonkeyPatch, +) -> None: + runner = HftStrategyRunner( + engine_cfg=_engine_cfg(), + strategy=_NoopStrategy(), + risk_cfg=_risk_cfg(), + core_cfg=_core_cfg(), + ) + + calls = {"update_account": 0, "ingest_order_snapshots": 0} + emitted_fill_events = 0 + + def _spy_update_account(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["update_account"] += 1 + + def _spy_ingest_order_snapshots(*args: object, **kwargs: object) -> None: + _ = (args, kwargs) + calls["ingest_order_snapshots"] += 1 + + def _spy_process_event_entry(state: object, entry: object, *, configuration: object) -> None: + nonlocal emitted_fill_events + _ = (state, configuration) + if isinstance(entry.event, FillEvent): + emitted_fill_events += 1 + + monkeypatch.setattr(runner.strategy_state, "update_account", _spy_update_account) + monkeypatch.setattr( + runner.strategy_state, + "ingest_order_snapshots", + _spy_ingest_order_snapshots, + ) + monkeypatch.setattr( + strategy_runner_module, + "process_event_entry", + _spy_process_event_entry, + ) + + venue = _StubVenue( + rc_sequence=[0, 2, 3, 1], + ts_sequence=[1, 2, 3, 4], + depth=_depth_snapshot(), + state_values=SimpleNamespace( + position=0.0, + balance=1000.0, + fee=0.0, + trading_volume=0.0, + trading_value=0.0, + num_trades=0, + ), + orders={}, + ) + runner.run(venue=venue, execution=_NoopExecution(), recorder=_RecorderWrapper()) + + assert emitted_fill_events == 0 + assert calls["update_account"] == 1 + assert calls["ingest_order_snapshots"] == 1 + + def test_successful_new_dispatch_processes_order_submitted_before_mark_sent( monkeypatch: pytest.MonkeyPatch, ) -> None: From ffd915e82a8aff6e40d9cb9d9ac19af507f9fcc1 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 18:42:46 +0000 Subject: [PATCH 18/36] m2 p7 s3: add low-risk split capability protocol definitions for current hftbacktest venue source capabilities --- .../backtest/adapters/protocols.py | 73 +++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 trading_runtime/backtest/adapters/protocols.py diff --git a/trading_runtime/backtest/adapters/protocols.py b/trading_runtime/backtest/adapters/protocols.py new file mode 100644 index 0000000..019a3c4 --- /dev/null +++ b/trading_runtime/backtest/adapters/protocols.py @@ -0,0 +1,73 @@ +"""Typing-only adapter capability protocols for backtest venue sources. + +This module introduces low-risk capability seams as ``typing.Protocol`` classes. +It is intentionally implementation-light: + +- no runtime behavior or orchestration changes; +- no runtime ``isinstance`` checks; +- no required explicit inheritance for concrete adapters; +- structural compatibility is sufficient. + +Current ``HftBacktestVenueAdapter`` already structurally conforms to all +protocols defined here. + +Notes on intentionally deferred capabilities: + +- ``OrderSubmissionGateway`` is excluded in this slice until additional + characterization covers concrete hftbacktest execution adapter behavior. +- ``ExecutionFeedbackRecordSource`` is excluded in this slice because + execution-feedback capability remains deferred and gated by existing + runtime/source contracts. +""" + +from __future__ import annotations + +from typing import Any, Protocol + + +class VenueEventWaiter(Protocol): + """Wakeup capability for runtime loop progression. + + This is a typing seam only. It does not alter wait semantics, call order, + timeout computation, or rc-branch interpretation. + """ + + def wait_next(self, *, timeout_ns: int, include_order_resp: bool) -> int: + """Block until next wakeup and return venue-defined rc code.""" + + +class VenueClock(Protocol): + """Timestamp-read capability for runtime adoption.""" + + def current_timestamp_ns(self) -> int: + """Return current venue-local timestamp in nanoseconds.""" + + +class MarketInputSource(Protocol): + """Market snapshot read capability for canonical market mapping.""" + + def read_market_snapshot(self) -> Any: + """Return venue-specific market snapshot object.""" + + +class OrderSnapshotSource(Protocol): + """Order snapshot capability for compatibility materialization paths. + + The current compatibility boundary consumes a combined tuple from one call. + A future split may separate this source surface. + """ + + def read_orders_snapshot(self) -> tuple[Any, Any]: + """Return (state_values, orders) from current snapshot boundary.""" + + +class AccountSnapshotSource(Protocol): + """Account snapshot capability (currently shared tuple-return surface). + + This intentionally shares ``read_orders_snapshot`` with + ``OrderSnapshotSource`` in the current runtime shape. + """ + + def read_orders_snapshot(self) -> tuple[Any, Any]: + """Return (state_values, orders) from current snapshot boundary.""" + From 02f13af4e402f64f5dbb29751b6d678803284295 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 18:53:22 +0000 Subject: [PATCH 19/36] m2 p7 s4: add focused adapter-level characterization tests for HftBacktestExecutionAdapter behavior before any OrderSubmissionGateway protocol implementation or extraction --- ...test_execution_adapter_characterization.py | 344 ++++++++++++++++++ 1 file changed, 344 insertions(+) create mode 100644 tests/runtime/test_hftbacktest_execution_adapter_characterization.py diff --git a/tests/runtime/test_hftbacktest_execution_adapter_characterization.py b/tests/runtime/test_hftbacktest_execution_adapter_characterization.py new file mode 100644 index 0000000..2b91593 --- /dev/null +++ b/tests/runtime/test_hftbacktest_execution_adapter_characterization.py @@ -0,0 +1,344 @@ +"""Characterization tests for HftBacktestExecutionAdapter. + +These tests lock current adapter submission behavior only. They do not imply: +- FillEvent ingress; +- ExecutionFeedbackRecordSource support; +- canonical execution-feedback authority; +- post-submission lifecycle migration. +""" + +from __future__ import annotations + +import inspect +from dataclasses import dataclass, field +from typing import Any + +from trading_framework.core.domain.reject_reasons import RejectReason +from trading_framework.core.domain.types import ( + CancelOrderIntent, + NewOrderIntent, + Price, + Quantity, + ReplaceOrderIntent, +) + +from trading_runtime.backtest.adapters.execution import ( + HftBacktestExecutionAdapter, + _to_i64_order_id, +) + + +@dataclass +class _FakeHbt: + """Minimal fake hftbacktest object with configurable outcomes.""" + + result_codes: dict[str, int] = field(default_factory=dict) + raise_on: set[str] = field(default_factory=set) + calls: list[tuple[str, tuple[Any, ...]]] = field(default_factory=list) + + def _maybe_raise(self, method: str) -> None: + if method in self.raise_on: + raise RuntimeError(f"{method} failed") + + def _result_code(self, method: str) -> int: + return self.result_codes.get(method, 0) + + def submit_buy_order( + self, + asset_no: int, + order_id: int, + price: float, + quantity: float, + tif: int, + order_type: int, + post_only_flag: bool, + ) -> int: + self.calls.append( + ( + "submit_buy_order", + ( + asset_no, + order_id, + price, + quantity, + tif, + order_type, + post_only_flag, + ), + ) + ) + self._maybe_raise("submit_buy_order") + return self._result_code("submit_buy_order") + + def submit_sell_order( + self, + asset_no: int, + order_id: int, + price: float, + quantity: float, + tif: int, + order_type: int, + post_only_flag: bool, + ) -> int: + self.calls.append( + ( + "submit_sell_order", + ( + asset_no, + order_id, + price, + quantity, + tif, + order_type, + post_only_flag, + ), + ) + ) + self._maybe_raise("submit_sell_order") + return self._result_code("submit_sell_order") + + def modify( + self, + asset_no: int, + order_id: int, + new_price: float, + new_quantity: float, + post_only_flag: bool, + ) -> int: + self.calls.append( + ( + "modify", + (asset_no, order_id, new_price, new_quantity, post_only_flag), + ) + ) + self._maybe_raise("modify") + return self._result_code("modify") + + def cancel( + self, + asset_no: int, + order_id: int, + post_only_flag: bool, + ) -> int: + self.calls.append(("cancel", (asset_no, order_id, post_only_flag))) + self._maybe_raise("cancel") + return self._result_code("cancel") + + +def _new_intent( + *, + side: str, + client_order_id: str, + order_type: str = "limit", + tif: str = "GTC", + intended_price: Price | None = None, +) -> NewOrderIntent: + return NewOrderIntent( + ts_ns_local=1, + instrument="BTC_USDC-PERPETUAL", + client_order_id=client_order_id, + intents_correlation_id=f"corr-{client_order_id}", + side=side, + order_type=order_type, + intended_qty=Quantity(value=2.5, unit="contracts"), + intended_price=( + intended_price + if intended_price is not None + else Price(currency="USDC", value=100.5) + ), + time_in_force=tif, + ) + + +def _replace_intent(*, client_order_id: str = "cid-replace-1") -> ReplaceOrderIntent: + return ReplaceOrderIntent( + ts_ns_local=2, + instrument="BTC_USDC-PERPETUAL", + client_order_id=client_order_id, + intents_correlation_id=f"corr-{client_order_id}", + side="buy", + order_type="limit", + intended_qty=Quantity(value=3.0, unit="contracts"), + intended_price=Price(currency="USDC", value=101.25), + ) + + +def _cancel_intent(*, client_order_id: str = "cid-cancel-1") -> CancelOrderIntent: + return CancelOrderIntent( + ts_ns_local=3, + instrument="BTC_USDC-PERPETUAL", + client_order_id=client_order_id, + intents_correlation_id=f"corr-{client_order_id}", + ) + + +def test_new_buy_and_sell_submissions_call_expected_hbt_methods_and_map_arguments() -> None: + fake_hbt = _FakeHbt() + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=7) + + buy_intent = _new_intent( + side="buy", + client_order_id="123", + tif="IOC", + order_type="limit", + intended_price=Price(currency="USDC", value=99.75), + ) + sell_intent = _new_intent( + side="sell", + client_order_id="cid-sell-1", + tif="POST_ONLY", + order_type="market", + intended_price=Price(currency="USDC", value=100.5), + ) + + execution_errors = adapter.apply_intents([buy_intent, sell_intent]) + + assert execution_errors == [] + assert len(fake_hbt.calls) == 2 + + method_buy, args_buy = fake_hbt.calls[0] + assert method_buy == "submit_buy_order" + assert args_buy == ( + 7, + _to_i64_order_id("123"), + 99.75, + 2.5, + 3, # IOC + 0, # limit + False, + ) + + method_sell, args_sell = fake_hbt.calls[1] + assert method_sell == "submit_sell_order" + assert args_sell == ( + 7, + _to_i64_order_id("cid-sell-1"), + 100.5, + 2.5, + 1, # POST_ONLY -> GTX + 1, # market + False, + ) + + +def test_new_submission_nonzero_result_code_returns_exchange_reject() -> None: + fake_hbt = _FakeHbt(result_codes={"submit_buy_order": 9}) + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=1) + intent = _new_intent(side="buy", client_order_id="cid-new-reject") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [(intent, RejectReason.EXCHANGE_REJECT)] + + +def test_new_submission_exception_returns_exchange_error() -> None: + fake_hbt = _FakeHbt(raise_on={"submit_sell_order"}) + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=1) + intent = _new_intent(side="sell", client_order_id="cid-new-error") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [(intent, RejectReason.EXCHANGE_ERROR)] + + +def test_replace_calls_modify_with_expected_mapping_and_success_behavior() -> None: + fake_hbt = _FakeHbt() + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=3) + intent = _replace_intent(client_order_id="cid-replace-ok") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [] + assert fake_hbt.calls == [ + ( + "modify", + ( + 3, + _to_i64_order_id("cid-replace-ok"), + 101.25, + 3.0, + False, + ), + ) + ] + + +def test_replace_nonzero_result_code_returns_exchange_reject() -> None: + fake_hbt = _FakeHbt(result_codes={"modify": 4}) + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=0) + intent = _replace_intent(client_order_id="cid-replace-reject") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [(intent, RejectReason.EXCHANGE_REJECT)] + + +def test_replace_exception_returns_exchange_error() -> None: + fake_hbt = _FakeHbt(raise_on={"modify"}) + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=0) + intent = _replace_intent(client_order_id="cid-replace-error") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [(intent, RejectReason.EXCHANGE_ERROR)] + + +def test_cancel_calls_cancel_with_expected_mapping_and_success_behavior() -> None: + fake_hbt = _FakeHbt() + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=5) + intent = _cancel_intent(client_order_id="cid-cancel-ok") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [] + assert fake_hbt.calls == [ + ("cancel", (5, _to_i64_order_id("cid-cancel-ok"), False)) + ] + + +def test_cancel_nonzero_result_code_returns_exchange_reject() -> None: + fake_hbt = _FakeHbt(result_codes={"cancel": 2}) + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=0) + intent = _cancel_intent(client_order_id="cid-cancel-reject") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [(intent, RejectReason.EXCHANGE_REJECT)] + + +def test_cancel_exception_returns_exchange_error() -> None: + fake_hbt = _FakeHbt(raise_on={"cancel"}) + adapter = HftBacktestExecutionAdapter(hbt=fake_hbt, asset_no=0) + intent = _cancel_intent(client_order_id="cid-cancel-error") + + execution_errors = adapter.apply_intents([intent]) + + assert execution_errors == [(intent, RejectReason.EXCHANGE_ERROR)] + + +def test_to_i64_order_id_numeric_and_deterministic_non_numeric_behavior() -> None: + assert _to_i64_order_id("42") == 42 + assert _to_i64_order_id(" 77 ") == 77 + + alpha_a = _to_i64_order_id("cid-alpha") + alpha_b = _to_i64_order_id("cid-alpha") + beta = _to_i64_order_id("cid-beta") + + assert alpha_a == alpha_b + assert alpha_a != beta + assert 0 <= alpha_a < (1 << 63) + assert 0 <= beta < (1 << 63) + + +def test_characterization_scope_excludes_feedback_source_and_fill_ingress_implications() -> None: + public_methods = { + name + for name, member in inspect.getmembers(HftBacktestExecutionAdapter) + if callable(member) and not name.startswith("_") + } + apply_intents_source = inspect.getsource(HftBacktestExecutionAdapter.apply_intents) + + assert "drain_execution_feedback_records" not in public_methods + assert "ExecutionFeedbackRecordSource" not in apply_intents_source + assert "FillEvent" not in apply_intents_source + From 1be316225e55aaae15e0c6db7616ebe69eb2fee1 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 18:55:52 +0000 Subject: [PATCH 20/36] m2 p7 s5: add an OrderSubmissionGateway typing.Protocol to the existing adapter protocols module without changing runtime behavior --- .../backtest/adapters/protocols.py | 34 ++++++++++++++++--- 1 file changed, 29 insertions(+), 5 deletions(-) diff --git a/trading_runtime/backtest/adapters/protocols.py b/trading_runtime/backtest/adapters/protocols.py index 019a3c4..5b96f15 100644 --- a/trading_runtime/backtest/adapters/protocols.py +++ b/trading_runtime/backtest/adapters/protocols.py @@ -11,12 +11,12 @@ Current ``HftBacktestVenueAdapter`` already structurally conforms to all protocols defined here. -Notes on intentionally deferred capabilities: +Notes on capability scope: -- ``OrderSubmissionGateway`` is excluded in this slice until additional - characterization covers concrete hftbacktest execution adapter behavior. -- ``ExecutionFeedbackRecordSource`` is excluded in this slice because - execution-feedback capability remains deferred and gated by existing +- ``OrderSubmissionGateway`` is included as an outbound command-submission + typing seam only. +- ``ExecutionFeedbackRecordSource`` remains excluded in this slice because + execution-feedback capability is deferred and gated by existing runtime/source contracts. """ @@ -24,6 +24,9 @@ from typing import Any, Protocol +from trading_framework.core.domain.reject_reasons import RejectReason +from trading_framework.core.domain.types import OrderIntent + class VenueEventWaiter(Protocol): """Wakeup capability for runtime loop progression. @@ -71,3 +74,24 @@ class AccountSnapshotSource(Protocol): def read_orders_snapshot(self) -> tuple[Any, Any]: """Return (state_values, orders) from current snapshot boundary.""" + +class OrderSubmissionGateway(Protocol): + """Outbound order command submission capability. + + This protocol is strictly about dispatching outbound order commands and + reporting dispatch failures. It is not an execution-feedback source. + + Successful outbound submission may allow runtime to produce + ``OrderSubmittedEvent`` for ``new`` intents under existing runner semantics. + Failure rows represent command rejection/dispatch errors only. + + This protocol does not imply canonical execution-feedback authority, + ``FillEvent`` ingress, or post-submission lifecycle migration. + ``ExecutionFeedbackRecordSource`` remains a separate deferred capability. + """ + + def apply_intents( + self, intents: list[OrderIntent] + ) -> list[tuple[OrderIntent, RejectReason]]: + """Submit intents and return per-intent dispatch failures.""" + From b75518f9feb63c3043cdc89f4957fd7519b794ef Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 19:16:10 +0000 Subject: [PATCH 21/36] m2 p7 s6: align OrderSubmissionGateway, type runner execution, keep venue unchanged --- trading_runtime/backtest/adapters/protocols.py | 6 +++--- trading_runtime/backtest/engine/strategy_runner.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/trading_runtime/backtest/adapters/protocols.py b/trading_runtime/backtest/adapters/protocols.py index 5b96f15..b7a788c 100644 --- a/trading_runtime/backtest/adapters/protocols.py +++ b/trading_runtime/backtest/adapters/protocols.py @@ -24,7 +24,6 @@ from typing import Any, Protocol -from trading_framework.core.domain.reject_reasons import RejectReason from trading_framework.core.domain.types import OrderIntent @@ -83,7 +82,8 @@ class OrderSubmissionGateway(Protocol): Successful outbound submission may allow runtime to produce ``OrderSubmittedEvent`` for ``new`` intents under existing runner semantics. - Failure rows represent command rejection/dispatch errors only. + Failure rows represent command rejection/dispatch errors only, where + reason values are string constants from the ``RejectReason`` namespace. This protocol does not imply canonical execution-feedback authority, ``FillEvent`` ingress, or post-submission lifecycle migration. @@ -92,6 +92,6 @@ class OrderSubmissionGateway(Protocol): def apply_intents( self, intents: list[OrderIntent] - ) -> list[tuple[OrderIntent, RejectReason]]: + ) -> list[tuple[OrderIntent, str]]: """Submit intents and return per-intent dispatch failures.""" diff --git a/trading_runtime/backtest/engine/strategy_runner.py b/trading_runtime/backtest/engine/strategy_runner.py index 3ab5eb5..96bda6f 100644 --- a/trading_runtime/backtest/engine/strategy_runner.py +++ b/trading_runtime/backtest/engine/strategy_runner.py @@ -30,13 +30,13 @@ from trading_framework.core.risk.risk_config import RiskConfig from trading_framework.core.risk.risk_engine import RejectedIntent, RiskEngine +from trading_runtime.backtest.adapters.protocols import OrderSubmissionGateway from trading_runtime.backtest.engine.event_stream_cursor import EventStreamCursor from trading_runtime.core.events.sinks.file_recorder import FileRecorderSink if TYPE_CHECKING: from trading_framework.strategies.base import Strategy - from trading_runtime.backtest.adapters.execution import HftBacktestExecutionAdapter from trading_runtime.backtest.engine.hft_engine import HftEngineConfig @@ -177,7 +177,7 @@ def _process_canonical_control_time_event( def run( self, venue: VenueAdapter, - execution: HftBacktestExecutionAdapter, + execution: OrderSubmissionGateway, recorder: Any, ) -> None: """Run the backtest loop.""" From 46f13d5d88d7cda16fd03804eb043154910b3d59 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 19:39:42 +0000 Subject: [PATCH 22/36] m2 p7 s7: add lightweight static boundary guard tests that prevent adapter/core boundary drift before moving to Core Runtime usability work --- requirements-dev.txt | 2 +- requirements.txt | 2 +- tests/runtime/test_adapter_boundary_guards.py | 89 +++++++++++++++++++ 3 files changed, 91 insertions(+), 2 deletions(-) create mode 100644 tests/runtime/test_adapter_boundary_guards.py diff --git a/requirements-dev.txt b/requirements-dev.txt index bdceaef..af15fc0 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -331,7 +331,7 @@ tornado==6.5.4 # via bokeh tqdm==4.67.3 # via panel -trading-framework @ git+https://github.com/TradingChassis/core.git@d36347965f33e2735d233daa59fd8e5840604523 +trading-framework @ git+https://github.com/TradingChassis/core.git@686dd949e74f6cd482e3e96236669058d9484899 # via -r _git_deps.in typing-extensions==4.15.0 # via diff --git a/requirements.txt b/requirements.txt index 1732f9f..5df1f58 100644 --- a/requirements.txt +++ b/requirements.txt @@ -307,7 +307,7 @@ tornado==6.5.4 # via bokeh tqdm==4.67.3 # via panel -trading-framework @ git+https://github.com/TradingChassis/core.git@d36347965f33e2735d233daa59fd8e5840604523 +trading-framework @ git+https://github.com/TradingChassis/core.git@686dd949e74f6cd482e3e96236669058d9484899 # via -r _git_deps.in typing-extensions==4.15.0 # via diff --git a/tests/runtime/test_adapter_boundary_guards.py b/tests/runtime/test_adapter_boundary_guards.py new file mode 100644 index 0000000..4619b0e --- /dev/null +++ b/tests/runtime/test_adapter_boundary_guards.py @@ -0,0 +1,89 @@ +"""Static boundary guard tests for adapter/core layering. + +These checks intentionally use lightweight text scanning to catch boundary +drift early without introducing new tooling dependencies. +""" + +from __future__ import annotations + +import re +from pathlib import Path + +import pytest + +CORE_RUNTIME_ROOT = Path(__file__).resolve().parents[2] +WORKSPACE_ROOT = CORE_RUNTIME_ROOT.parent + +ADAPTER_FILES = [ + CORE_RUNTIME_ROOT / "trading_runtime/backtest/adapters/venue.py", + CORE_RUNTIME_ROOT / "trading_runtime/backtest/adapters/execution.py", + CORE_RUNTIME_ROOT / "trading_runtime/backtest/adapters/protocols.py", +] + + +def _read_text(path: Path) -> str: + return path.read_text(encoding="utf-8") + + +def _assert_no_matches(path: Path, patterns: list[str], description: str) -> None: + content = _read_text(path) + for pattern in patterns: + match = re.search(pattern, content, flags=re.MULTILINE) + assert match is None, f"{path} violates {description}: matched /{pattern}/" + + +def test_adapters_do_not_import_or_reference_strategy_state() -> None: + patterns = [ + r"\bStrategyState\b", + ] + for path in ADAPTER_FILES: + _assert_no_matches(path, patterns, "StrategyState boundary") + + +def test_adapters_do_not_import_or_call_canonical_processing_boundaries() -> None: + patterns = [ + r"\bprocess_event_entry\b", + r"\bprocess_canonical_event\b", + r"\bfold_event_stream_entries\b", + ] + for path in ADAPTER_FILES: + _assert_no_matches(path, patterns, "canonical processing boundary") + + +def test_adapters_do_not_import_or_construct_fill_event() -> None: + patterns = [ + r"^\s*from\s+[^\n]*\s+import\s+[^\n]*\bFillEvent\b", + r"^\s*import\s+[^\n]*\bFillEvent\b", + r"\bFillEvent\s*\(", + ] + for path in ADAPTER_FILES: + _assert_no_matches(path, patterns, "FillEvent ingress boundary") + + +def test_core_production_package_does_not_import_trading_runtime() -> None: + core_pkg = WORKSPACE_ROOT / "core/trading_framework" + if not core_pkg.exists(): + pytest.skip( + f"core package not present in this checkout layout: {core_pkg}" + ) + + patterns = [ + r"^\s*import\s+trading_runtime(\.|$)", + r"^\s*from\s+trading_runtime(\.|\s+import\b)", + ] + + for path in core_pkg.rglob("*.py"): + _assert_no_matches(path, patterns, "core->core-runtime import boundary") + + +def test_protocols_does_not_define_execution_feedback_record_source_yet() -> None: + protocols_py = CORE_RUNTIME_ROOT / "trading_runtime/backtest/adapters/protocols.py" + patterns = [ + r"^\s*class\s+ExecutionFeedbackRecordSource\b", + r"^\s*ExecutionFeedbackRecordSource\s*=", + ] + _assert_no_matches( + protocols_py, + patterns, + "deferred ExecutionFeedbackRecordSource capability", + ) From 6cec8849d1e27745f1284092eadef7b80bd8cf45 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 19:55:51 +0000 Subject: [PATCH 23/36] m2 p8 s1: update core-runtime README so a contributor understands how to install, test, and attempt local backtests --- README.md | 97 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 96 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 2bd4138..992e55e 100644 --- a/README.md +++ b/README.md @@ -45,12 +45,23 @@ deterministic runtime environments. --- +## 🏷 Naming Clarification (current transitional state) + +- Repository/folder name in the monorepo can be `core-runtime`. +- Python import package in this repository is `trading_runtime`. +- Distribution/project name in packaging metadata is `trading-runtime`. +- Core package import remains `trading_framework`. +- Package/directory rename alignment is planned separately and is not part of this phase. + +--- + ## 📁 Repository Structure ``` .github/workflows/ CI pipelines (tests, Argo template deploy) argo/ Argo workflow templates docs/ Runtime design notes (implementation-facing) +examples/ Alternative example runner/config/strategy paths scripts/ environment & build helper scripts trading_runtime/ Python runtime entrypoints tests/ deterministic test data & validation @@ -70,6 +81,85 @@ trading_runtime/strategies/ Example strategies --- +## 🚀 Quick Start / Development Setup + +### Monorepo workspace layout (recommended for current development) + +If your workspace root contains sibling repositories (for example `core/` and +`core-runtime/`), run tests from the workspace root: + +```bash +python -m pytest -q core-runtime/tests +python -m pytest -q core/tests/semantics +``` + +Optional editable installs from workspace root: + +```bash +python -m pip install -e core +python -m pip install -e core-runtime +``` + +### Standalone `core-runtime` repo root + +From the `core-runtime` repository root: + +```bash +python -m pip install -e . +python -m pytest -q tests +./scripts/check.sh +``` + +If `trading_framework` is not already available in your environment, install +`core` as a sibling editable package or ensure the pinned dependency resolves. + +`PYTHONPATH=.` can be used as a short-term development shortcut, but editable +installation (`python -m pip install -e .`) is the preferred workflow. + +--- + +## 🗺 Entrypoint Matrix + +| Mode | Entrypoint | Command shape | Notes | +| --- | --- | --- | --- | +| Local backtest | `trading_runtime/local/backtest.py` | `python -m trading_runtime.local.backtest --config trading_runtime/local/local.json` | Main local runner. | +| Argo plan/run orchestration | `trading_runtime/backtest/runtime/entrypoint.py` | `python -m trading_runtime.backtest.runtime.entrypoint --config trading_runtime/argo/argo.json --plan` | Planner and sweep-context emitter for Argo flow. | +| Sweep worker | `trading_runtime/backtest/runtime/run_sweep.py` | `python -m trading_runtime.backtest.runtime.run_sweep --context ` | Executes one sweep context (pod-level unit). | +| Examples path | `examples/local/backtest.py` | `python examples/local/backtest.py --config examples/local/local.json` | Alternative example path; useful for reference but duplicates runtime patterns. | + +Use `trading_runtime/local/*` for local runtime development, `trading_runtime/backtest/runtime/*` +for Argo workflow execution, and `examples/*` as a duplicate reference path. + +--- + +## ⚠️ Local Config Path Caveat + +Current shipped local JSON configs are devcontainer-oriented and include +absolute `/workspaces/core-runtime/...` paths. + +On a normal host machine, use one of the following: + +- run inside the devcontainer, or +- copy the config and edit paths to local data/results locations. + +This README update does not change JSON files in-place. + +--- + +## 📌 Current semantic status (transitional) + +`core-runtime` is currently usable as a transitional runtime around `core`: + +- canonical `MarketEvent`, `OrderSubmittedEvent`, and `ControlTimeEvent` paths are in use +- post-submission order/fill progression remains on the snapshot-compatibility path +- `FillEvent` runtime ingress remains deferred + +For adapter boundary context, see: + +- `docs/venue-adapter-abstraction-design-v1.md` + +--- + ## 📌 Dependency Pinning & Reproducibility The `trading-framework` dependency is pinned by commit SHA. @@ -103,7 +193,7 @@ These files are used by: Run a deterministic local backtest: ```bash -python trading_runtime/local/backtest.py \ +python -m trading_runtime.local.backtest \ --config trading_runtime/local/local.json ``` @@ -119,6 +209,11 @@ Results are written to: tests/data/results/ ``` +Important: `trading_runtime/local/local.json` and `examples/local/local.json` +currently point to devcontainer absolute paths under +`/workspaces/core-runtime/...`; adjust paths (or use devcontainer) when running +on a regular host. + --- ## ⚙️ Infrastructure Requirements From 4143bf8d74afc0d709ff54486d18fa47b5e41458 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 20:15:11 +0000 Subject: [PATCH 24/36] m2 p8 s2: replace devcontainer-absolute local JSON paths with repo-root-relative paths to improve host local usability while preserving runtime behavior --- README.md | 17 ++++++----------- examples/local/local.json | 10 +++++----- trading_runtime/local/local.json | 10 +++++----- 3 files changed, 16 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 992e55e..ec4a579 100644 --- a/README.md +++ b/README.md @@ -134,15 +134,11 @@ for Argo workflow execution, and `examples/*` as a duplicate reference path. ## ⚠️ Local Config Path Caveat -Current shipped local JSON configs are devcontainer-oriented and include -absolute `/workspaces/core-runtime/...` paths. +Current shipped local JSON configs use cwd-relative paths for +`tests/data/...` inputs and outputs. -On a normal host machine, use one of the following: - -- run inside the devcontainer, or -- copy the config and edit paths to local data/results locations. - -This README update does not change JSON files in-place. +Supported workflow: run local commands from the `core-runtime` repository root. +If you run from a different cwd, adjust config paths accordingly. --- @@ -210,9 +206,8 @@ tests/data/results/ ``` Important: `trading_runtime/local/local.json` and `examples/local/local.json` -currently point to devcontainer absolute paths under -`/workspaces/core-runtime/...`; adjust paths (or use devcontainer) when running -on a regular host. +now use cwd-relative `tests/data/...` paths. Run from the `core-runtime` +repository root, or adjust config paths for your current working directory. --- diff --git a/examples/local/local.json b/examples/local/local.json index c34145f..9784014 100644 --- a/examples/local/local.json +++ b/examples/local/local.json @@ -5,9 +5,9 @@ "engine": { "initial_snapshot": null, "data_files": [ - "/workspaces/core-runtime/tests/data/parts/part-000.npz", - "/workspaces/core-runtime/tests/data/parts/part-001.npz", - "/workspaces/core-runtime/tests/data/parts/part-002.npz" + "tests/data/parts/part-000.npz", + "tests/data/parts/part-001.npz", + "tests/data/parts/part-002.npz" ], "instrument": "BTC_USDC-PERPETUAL", @@ -32,8 +32,8 @@ "roi_lb": 40000, "roi_ub": 80000, - "stats_npz_path": "/workspaces/core-runtime/tests/data/results/stats.npz", - "event_bus_path": "/workspaces/core-runtime/tests/data/results/events.json" + "stats_npz_path": "tests/data/results/stats.npz", + "event_bus_path": "tests/data/results/events.json" }, "risk": { diff --git a/trading_runtime/local/local.json b/trading_runtime/local/local.json index 153b81e..96478dd 100644 --- a/trading_runtime/local/local.json +++ b/trading_runtime/local/local.json @@ -5,9 +5,9 @@ "engine": { "initial_snapshot": null, "data_files": [ - "/workspaces/core-runtime/tests/data/parts/part-000.npz", - "/workspaces/core-runtime/tests/data/parts/part-001.npz", - "/workspaces/core-runtime/tests/data/parts/part-002.npz" + "tests/data/parts/part-000.npz", + "tests/data/parts/part-001.npz", + "tests/data/parts/part-002.npz" ], "instrument": "BTC_USDC-PERPETUAL", @@ -32,8 +32,8 @@ "roi_lb": 40000, "roi_ub": 80000, - "stats_npz_path": "/workspaces/core-runtime/tests/data/results/stats.npz", - "event_bus_path": "/workspaces/core-runtime/tests/data/results/events.json" + "stats_npz_path": "tests/data/results/stats.npz", + "event_bus_path": "tests/data/results/events.json" }, "risk": { From 607c2bfcf1c5a024a86255a20c1e8335bff83ea7 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 20:33:15 +0000 Subject: [PATCH 25/36] m2 p8 s3: move default local backtest outputs out of tracked tests/data/results and into ignored `.runtime/local/results/`, so local smoke runs are not blocked by tracked/root-owned result files --- .gitignore | 1 + README.md | 14 ++++++++++---- examples/local/local.json | 4 ++-- trading_runtime/local/local.json | 4 ++-- 4 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.gitignore b/.gitignore index ec65e06..bf02f8f 100644 --- a/.gitignore +++ b/.gitignore @@ -101,6 +101,7 @@ site/ local_settings.py db.sqlite3 db.sqlite3-journal +.runtime/ # ============================== # Devcontainer related diff --git a/README.md b/README.md index ec4a579..bfa662f 100644 --- a/README.md +++ b/README.md @@ -202,12 +202,12 @@ tests/data/parts/ Results are written to: ``` -tests/data/results/ +.runtime/local/results/ ``` Important: `trading_runtime/local/local.json` and `examples/local/local.json` -now use cwd-relative `tests/data/...` paths. Run from the `core-runtime` -repository root, or adjust config paths for your current working directory. +use cwd-relative paths. Run from the `core-runtime` repository root, or adjust +config paths for your current working directory. --- @@ -336,12 +336,18 @@ Synthetic datasets are provided in: tests/data/parts/ ``` -Result artifacts: +Historical/sample result artifacts may exist in: ``` tests/data/results/ ``` +Default local backtest outputs are now written to: + +``` +.runtime/local/results/ +``` + Helper generation scripts: ``` diff --git a/examples/local/local.json b/examples/local/local.json index 9784014..3fedc31 100644 --- a/examples/local/local.json +++ b/examples/local/local.json @@ -32,8 +32,8 @@ "roi_lb": 40000, "roi_ub": 80000, - "stats_npz_path": "tests/data/results/stats.npz", - "event_bus_path": "tests/data/results/events.json" + "stats_npz_path": ".runtime/local/results/stats.npz", + "event_bus_path": ".runtime/local/results/events.json" }, "risk": { diff --git a/trading_runtime/local/local.json b/trading_runtime/local/local.json index 96478dd..d1c3c6f 100644 --- a/trading_runtime/local/local.json +++ b/trading_runtime/local/local.json @@ -32,8 +32,8 @@ "roi_lb": 40000, "roi_ub": 80000, - "stats_npz_path": "tests/data/results/stats.npz", - "event_bus_path": "tests/data/results/events.json" + "stats_npz_path": ".runtime/local/results/stats.npz", + "event_bus_path": ".runtime/local/results/events.json" }, "risk": { From 45beb8cae8c6d5023a0ec76ab9b417026225d5a5 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 20:45:53 +0000 Subject: [PATCH 26/36] m2 p8 s4: document Core Runtime is now usable for the current local hftbacktest backtest path --- README.md | 41 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index bfa662f..ffa10fd 100644 --- a/README.md +++ b/README.md @@ -135,13 +135,52 @@ for Argo workflow execution, and `examples/*` as a duplicate reference path. ## ⚠️ Local Config Path Caveat Current shipped local JSON configs use cwd-relative paths for -`tests/data/...` inputs and outputs. +`tests/data/...` inputs and `.runtime/...` outputs. Supported workflow: run local commands from the `core-runtime` repository root. If you run from a different cwd, adjust config paths accordingly. --- +## ✅ Current usability status (local hftbacktest path) + +The current local backtest path is verified and usable from the `core-runtime` +repository root. + +Verified local workflow: + +```bash +python -m pip install -e . +python -m trading_runtime.local.backtest --config trading_runtime/local/local.json +``` + +Verified output location: + +``` +.runtime/local/results/events.json +.runtime/local/results/stats.npz +``` + +Verified tests: + +```bash +python -m pytest -q tests +python -m pytest -q core-runtime/tests +python -m pytest -q core/tests/semantics +``` + +Current caveats: + +- Paths are cwd-relative; supported workflow is running from `core-runtime` root. +- hftbacktest timestamp-ordering warnings may appear from fixture ordering but do not fail the run. +- `tests/data/results/` may contain historical/sample artifacts and is no longer the default local output location. +- Naming remains transitional (`core-runtime` repo, `trading-runtime` distribution, `trading_runtime` imports, `trading_framework` core imports); rename/structure alignment is separate work. + +This status confirms local usability for the current local hftbacktest path; it +does not imply full canonical Event Stream completion. + +--- + ## 📌 Current semantic status (transitional) `core-runtime` is currently usable as a transitional runtime around `core`: From c5ae2593f5571fbddcb0d90c692d925d8e071c63 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 22:00:14 +0000 Subject: [PATCH 27/36] m2 p9 s3: rename core-runtime import root from trading_runtime to core_runtime --- Dockerfile | 2 +- README.md | 33 ++++++++++--------- {trading_runtime => core_runtime}/__init__.py | 0 .../argo/argo.json | 2 +- .../backtest/adapters/__init__.py | 0 .../backtest/adapters/execution.py | 4 +-- .../backtest/adapters/protocols.py | 2 +- .../backtest/adapters/venue.py | 2 +- .../backtest/engine/__init__.py | 0 .../backtest/engine/engine_base.py | 0 .../backtest/engine/event_stream_cursor.py | 2 +- .../backtest/engine/hft_engine.py | 16 ++++----- .../backtest/engine/strategy_runner.py | 30 ++++++++--------- .../backtest/io/__init__.py | 0 .../backtest/io/s3_adapter.py | 0 .../backtest/orchestrator/__init__.py | 0 .../backtest/orchestrator/manifest.py | 0 .../backtest/orchestrator/planner.py | 8 ++--- .../backtest/orchestrator/planner_models.py | 2 +- .../backtest/orchestrator/s3_manifest.py | 4 +-- .../backtest/orchestrator/segmenter.py | 2 +- .../backtest/orchestrator/summary.py | 2 +- .../backtest/orchestrator/sweeps.py | 0 .../backtest/runtime/__init__.py | 0 .../backtest/runtime/context.py | 0 .../runtime/core_configuration_mapper.py | 2 +- .../backtest/runtime/entrypoint.py | 14 ++++---- .../runtime/experiment_finalize_entrypoint.py | 6 ++-- .../backtest/runtime/mlflow_segment_logger.py | 2 +- .../backtest/runtime/prometheus_metrics.py | 0 .../backtest/runtime/run_sweep.py | 12 +++---- .../runtime/segment_finalize_entrypoint.py | 8 ++--- .../core/__init__.py | 0 .../core/events/__init__.py | 0 .../core/events/sinks/__init__.py | 0 .../core/events/sinks/file_recorder.py | 0 .../local/__init__.py | 0 .../local/backtest.py | 10 +++--- .../local/local.json | 2 +- .../strategies/__init__.py | 0 .../strategies/debug_strategy.py | 4 +-- examples/local/backtest.py | 10 +++--- examples/strategies/debug_strategy.py | 4 +-- pyproject.toml | 10 +++--- requirements-dev.txt | 2 +- requirements.txt | 2 +- scripts/check.sh | 6 ++-- tests/runtime/test_adapter_boundary_guards.py | 12 ++++--- .../runtime/test_core_configuration_mapper.py | 4 +-- tests/runtime/test_event_stream_cursor.py | 4 +-- ...test_execution_adapter_characterization.py | 6 ++-- ...st_hftbacktest_execution_feedback_probe.py | 6 ++-- .../runtime/test_import_compatibility_shim.py | 29 ++++++++++++++++ ..._runtime_core_configuration_integration.py | 20 +++++------ ...rategy_runner_canonical_market_adoption.py | 24 +++++++------- tests/test_dummy.py | 6 ++-- 56 files changed, 174 insertions(+), 142 deletions(-) rename {trading_runtime => core_runtime}/__init__.py (100%) rename {trading_runtime => core_runtime}/argo/argo.json (96%) rename {trading_runtime => core_runtime}/backtest/adapters/__init__.py (100%) rename {trading_runtime => core_runtime}/backtest/adapters/execution.py (97%) rename {trading_runtime => core_runtime}/backtest/adapters/protocols.py (98%) rename {trading_runtime => core_runtime}/backtest/adapters/venue.py (95%) rename {trading_runtime => core_runtime}/backtest/engine/__init__.py (100%) rename {trading_runtime => core_runtime}/backtest/engine/engine_base.py (100%) rename {trading_runtime => core_runtime}/backtest/engine/event_stream_cursor.py (91%) rename {trading_runtime => core_runtime}/backtest/engine/hft_engine.py (90%) rename {trading_runtime => core_runtime}/backtest/engine/strategy_runner.py (93%) rename {trading_runtime => core_runtime}/backtest/io/__init__.py (100%) rename {trading_runtime => core_runtime}/backtest/io/s3_adapter.py (100%) rename {trading_runtime => core_runtime}/backtest/orchestrator/__init__.py (100%) rename {trading_runtime => core_runtime}/backtest/orchestrator/manifest.py (100%) rename {trading_runtime => core_runtime}/backtest/orchestrator/planner.py (93%) rename {trading_runtime => core_runtime}/backtest/orchestrator/planner_models.py (90%) rename {trading_runtime => core_runtime}/backtest/orchestrator/s3_manifest.py (95%) rename {trading_runtime => core_runtime}/backtest/orchestrator/segmenter.py (93%) rename {trading_runtime => core_runtime}/backtest/orchestrator/summary.py (98%) rename {trading_runtime => core_runtime}/backtest/orchestrator/sweeps.py (100%) rename {trading_runtime => core_runtime}/backtest/runtime/__init__.py (100%) rename {trading_runtime => core_runtime}/backtest/runtime/context.py (100%) rename {trading_runtime => core_runtime}/backtest/runtime/core_configuration_mapper.py (98%) rename {trading_runtime => core_runtime}/backtest/runtime/entrypoint.py (93%) rename {trading_runtime => core_runtime}/backtest/runtime/experiment_finalize_entrypoint.py (95%) rename {trading_runtime => core_runtime}/backtest/runtime/mlflow_segment_logger.py (96%) rename {trading_runtime => core_runtime}/backtest/runtime/prometheus_metrics.py (100%) rename {trading_runtime => core_runtime}/backtest/runtime/run_sweep.py (97%) rename {trading_runtime => core_runtime}/backtest/runtime/segment_finalize_entrypoint.py (94%) rename {trading_runtime => core_runtime}/core/__init__.py (100%) rename {trading_runtime => core_runtime}/core/events/__init__.py (100%) rename {trading_runtime => core_runtime}/core/events/sinks/__init__.py (100%) rename {trading_runtime => core_runtime}/core/events/sinks/file_recorder.py (100%) rename {trading_runtime => core_runtime}/local/__init__.py (100%) rename {trading_runtime => core_runtime}/local/backtest.py (86%) rename {trading_runtime => core_runtime}/local/local.json (96%) rename {trading_runtime => core_runtime}/strategies/__init__.py (100%) rename {trading_runtime => core_runtime}/strategies/debug_strategy.py (98%) create mode 100644 tests/runtime/test_import_compatibility_shim.py diff --git a/Dockerfile b/Dockerfile index f761b8e..3a49b77 100644 --- a/Dockerfile +++ b/Dockerfile @@ -29,7 +29,7 @@ RUN pip install --upgrade pip \ # Copy project files COPY pyproject.toml . COPY scripts/check.sh . -COPY trading_runtime/ trading_runtime/ +COPY core_runtime/ core_runtime/ COPY tests/ tests/ # Install the package itself diff --git a/README.md b/README.md index ffa10fd..8af9696 100644 --- a/README.md +++ b/README.md @@ -48,9 +48,10 @@ deterministic runtime environments. ## 🏷 Naming Clarification (current transitional state) - Repository/folder name in the monorepo can be `core-runtime`. -- Python import package in this repository is `trading_runtime`. +- Python import package in this repository is `core_runtime`. - Distribution/project name in packaging metadata is `trading-runtime`. -- Core package import remains `trading_framework`. +- Legacy package import `trading_runtime` remains available as a compatibility shim. +- Core package canonical import is `tradingchassis_core` (`trading_framework` is compatibility/deprecated). - Package/directory rename alignment is planned separately and is not part of this phase. --- @@ -63,7 +64,7 @@ argo/ Argo workflow templates docs/ Runtime design notes (implementation-facing) examples/ Alternative example runner/config/strategy paths scripts/ environment & build helper scripts -trading_runtime/ Python runtime entrypoints +core_runtime/ Python runtime entrypoints tests/ deterministic test data & validation ``` @@ -74,9 +75,9 @@ Implementation-facing design notes: ### Key runtime modules ``` -trading_runtime/local/ Local execution mode -trading_runtime/argo/ Argo workflow entrypoints -trading_runtime/strategies/ Example strategies +core_runtime/local/ Local execution mode +core_runtime/argo/ Argo workflow entrypoints +core_runtime/strategies/ Example strategies ``` --- @@ -110,7 +111,7 @@ python -m pytest -q tests ./scripts/check.sh ``` -If `trading_framework` is not already available in your environment, install +If `tradingchassis_core` is not already available in your environment, install `core` as a sibling editable package or ensure the pinned dependency resolves. `PYTHONPATH=.` can be used as a short-term development shortcut, but editable @@ -122,12 +123,12 @@ installation (`python -m pip install -e .`) is the preferred workflow. | Mode | Entrypoint | Command shape | Notes | | --- | --- | --- | --- | -| Local backtest | `trading_runtime/local/backtest.py` | `python -m trading_runtime.local.backtest --config trading_runtime/local/local.json` | Main local runner. | -| Argo plan/run orchestration | `trading_runtime/backtest/runtime/entrypoint.py` | `python -m trading_runtime.backtest.runtime.entrypoint --config trading_runtime/argo/argo.json --plan` | Planner and sweep-context emitter for Argo flow. | -| Sweep worker | `trading_runtime/backtest/runtime/run_sweep.py` | `python -m trading_runtime.backtest.runtime.run_sweep --context ` | Executes one sweep context (pod-level unit). | +| Local backtest | `core_runtime/local/backtest.py` | `python -m core_runtime.local.backtest --config core_runtime/local/local.json` | Main local runner. | +| Argo plan/run orchestration | `core_runtime/backtest/runtime/entrypoint.py` | `python -m core_runtime.backtest.runtime.entrypoint --config core_runtime/argo/argo.json --plan` | Planner and sweep-context emitter for Argo flow. | +| Sweep worker | `core_runtime/backtest/runtime/run_sweep.py` | `python -m core_runtime.backtest.runtime.run_sweep --context ` | Executes one sweep context (pod-level unit). | | Examples path | `examples/local/backtest.py` | `python examples/local/backtest.py --config examples/local/local.json` | Alternative example path; useful for reference but duplicates runtime patterns. | -Use `trading_runtime/local/*` for local runtime development, `trading_runtime/backtest/runtime/*` +Use `core_runtime/local/*` for local runtime development, `core_runtime/backtest/runtime/*` for Argo workflow execution, and `examples/*` as a duplicate reference path. --- @@ -151,7 +152,7 @@ Verified local workflow: ```bash python -m pip install -e . -python -m trading_runtime.local.backtest --config trading_runtime/local/local.json +python -m core_runtime.local.backtest --config core_runtime/local/local.json ``` Verified output location: @@ -174,7 +175,7 @@ Current caveats: - Paths are cwd-relative; supported workflow is running from `core-runtime` root. - hftbacktest timestamp-ordering warnings may appear from fixture ordering but do not fail the run. - `tests/data/results/` may contain historical/sample artifacts and is no longer the default local output location. -- Naming remains transitional (`core-runtime` repo, `trading-runtime` distribution, `trading_runtime` imports, `trading_framework` core imports); rename/structure alignment is separate work. +- Naming remains transitional (`core-runtime` repo, `trading-runtime` distribution, `core_runtime` canonical imports, `trading_runtime` compatibility shim, `tradingchassis_core` canonical core imports). This status confirms local usability for the current local hftbacktest path; it does not imply full canonical Event Stream completion. @@ -228,8 +229,8 @@ These files are used by: Run a deterministic local backtest: ```bash -python -m trading_runtime.local.backtest \ - --config trading_runtime/local/local.json +python -m core_runtime.local.backtest \ + --config core_runtime/local/local.json ``` This uses synthetic deterministic test data located in: @@ -244,7 +245,7 @@ Results are written to: .runtime/local/results/ ``` -Important: `trading_runtime/local/local.json` and `examples/local/local.json` +Important: `core_runtime/local/local.json` and `examples/local/local.json` use cwd-relative paths. Run from the `core-runtime` repository root, or adjust config paths for your current working directory. diff --git a/trading_runtime/__init__.py b/core_runtime/__init__.py similarity index 100% rename from trading_runtime/__init__.py rename to core_runtime/__init__.py diff --git a/trading_runtime/argo/argo.json b/core_runtime/argo/argo.json similarity index 96% rename from trading_runtime/argo/argo.json rename to core_runtime/argo/argo.json index 604c9c5..98c3030 100644 --- a/trading_runtime/argo/argo.json +++ b/core_runtime/argo/argo.json @@ -74,7 +74,7 @@ }, "strategy": { - "class_path": "trading_runtime.strategies.debug_strategy:DebugStrategyV1", + "class_path": "core_runtime.strategies.debug_strategy:DebugStrategyV1", "spread": 5.0, "order_qty": 0.1, "use_price_tick_levels": 3, diff --git a/trading_runtime/backtest/adapters/__init__.py b/core_runtime/backtest/adapters/__init__.py similarity index 100% rename from trading_runtime/backtest/adapters/__init__.py rename to core_runtime/backtest/adapters/__init__.py diff --git a/trading_runtime/backtest/adapters/execution.py b/core_runtime/backtest/adapters/execution.py similarity index 97% rename from trading_runtime/backtest/adapters/execution.py rename to core_runtime/backtest/adapters/execution.py index 1563cdd..fadd7d6 100644 --- a/trading_runtime/backtest/adapters/execution.py +++ b/core_runtime/backtest/adapters/execution.py @@ -8,9 +8,9 @@ if TYPE_CHECKING: from hftbacktest import ROIVectorMarketDepthBacktest - from trading_framework.core.domain.types import OrderIntent + from tradingchassis_core.core.domain.types import OrderIntent -from trading_framework.core.domain.reject_reasons import RejectReason +from tradingchassis_core.core.domain.reject_reasons import RejectReason class ExecutionAdapter(Protocol): diff --git a/trading_runtime/backtest/adapters/protocols.py b/core_runtime/backtest/adapters/protocols.py similarity index 98% rename from trading_runtime/backtest/adapters/protocols.py rename to core_runtime/backtest/adapters/protocols.py index b7a788c..9a8dee3 100644 --- a/trading_runtime/backtest/adapters/protocols.py +++ b/core_runtime/backtest/adapters/protocols.py @@ -24,7 +24,7 @@ from typing import Any, Protocol -from trading_framework.core.domain.types import OrderIntent +from tradingchassis_core.core.domain.types import OrderIntent class VenueEventWaiter(Protocol): diff --git a/trading_runtime/backtest/adapters/venue.py b/core_runtime/backtest/adapters/venue.py similarity index 95% rename from trading_runtime/backtest/adapters/venue.py rename to core_runtime/backtest/adapters/venue.py index 09c524d..33e1997 100644 --- a/trading_runtime/backtest/adapters/venue.py +++ b/core_runtime/backtest/adapters/venue.py @@ -8,7 +8,7 @@ if TYPE_CHECKING: from hftbacktest import ROIVectorMarketDepthBacktest -from trading_framework.core.ports.venue_adapter import VenueAdapter +from tradingchassis_core.core.ports.venue_adapter import VenueAdapter @dataclass(frozen=True) diff --git a/trading_runtime/backtest/engine/__init__.py b/core_runtime/backtest/engine/__init__.py similarity index 100% rename from trading_runtime/backtest/engine/__init__.py rename to core_runtime/backtest/engine/__init__.py diff --git a/trading_runtime/backtest/engine/engine_base.py b/core_runtime/backtest/engine/engine_base.py similarity index 100% rename from trading_runtime/backtest/engine/engine_base.py rename to core_runtime/backtest/engine/engine_base.py diff --git a/trading_runtime/backtest/engine/event_stream_cursor.py b/core_runtime/backtest/engine/event_stream_cursor.py similarity index 91% rename from trading_runtime/backtest/engine/event_stream_cursor.py rename to core_runtime/backtest/engine/event_stream_cursor.py index 2621f8a..a7f3b8e 100644 --- a/trading_runtime/backtest/engine/event_stream_cursor.py +++ b/core_runtime/backtest/engine/event_stream_cursor.py @@ -2,7 +2,7 @@ from __future__ import annotations -from trading_framework.core.domain.processing_order import ProcessingPosition +from tradingchassis_core.core.domain.processing_order import ProcessingPosition class EventStreamCursor: diff --git a/trading_runtime/backtest/engine/hft_engine.py b/core_runtime/backtest/engine/hft_engine.py similarity index 90% rename from trading_runtime/backtest/engine/hft_engine.py rename to core_runtime/backtest/engine/hft_engine.py index c327d5a..62cfd67 100644 --- a/trading_runtime/backtest/engine/hft_engine.py +++ b/core_runtime/backtest/engine/hft_engine.py @@ -13,20 +13,20 @@ ) if TYPE_CHECKING: - from trading_framework.core.domain.configuration import CoreConfiguration - from trading_framework.core.risk.risk_config import RiskConfig + from tradingchassis_core.core.domain.configuration import CoreConfiguration + from tradingchassis_core.core.risk.risk_config import RiskConfig -from trading_framework.strategies.base import Strategy -from trading_framework.strategies.strategy_config import StrategyConfig +from tradingchassis_core.strategies.base import Strategy +from tradingchassis_core.strategies.strategy_config import StrategyConfig -from trading_runtime.backtest.adapters.execution import HftBacktestExecutionAdapter -from trading_runtime.backtest.adapters.venue import HftBacktestVenueAdapter -from trading_runtime.backtest.engine.engine_base import ( +from core_runtime.backtest.adapters.execution import HftBacktestExecutionAdapter +from core_runtime.backtest.adapters.venue import HftBacktestVenueAdapter +from core_runtime.backtest.engine.engine_base import ( BacktestConfig, BacktestEngine, BacktestResult, ) -from trading_runtime.backtest.engine.strategy_runner import HftStrategyRunner +from core_runtime.backtest.engine.strategy_runner import HftStrategyRunner # pylint: disable=too-many-instance-attributes diff --git a/trading_runtime/backtest/engine/strategy_runner.py b/core_runtime/backtest/engine/strategy_runner.py similarity index 93% rename from trading_runtime/backtest/engine/strategy_runner.py rename to core_runtime/backtest/engine/strategy_runner.py index 96bda6f..cd02717 100644 --- a/trading_runtime/backtest/engine/strategy_runner.py +++ b/core_runtime/backtest/engine/strategy_runner.py @@ -7,13 +7,13 @@ from pathlib import Path from typing import TYPE_CHECKING, Any -from trading_framework.core.domain.configuration import CoreConfiguration -from trading_framework.core.domain.processing import process_event_entry -from trading_framework.core.domain.processing_order import ( +from tradingchassis_core.core.domain.configuration import CoreConfiguration +from tradingchassis_core.core.domain.processing import process_event_entry +from tradingchassis_core.core.domain.processing_order import ( EventStreamEntry, ) -from trading_framework.core.domain.state import StrategyState -from trading_framework.core.domain.types import ( +from tradingchassis_core.core.domain.state import StrategyState +from tradingchassis_core.core.domain.types import ( BookLevel, BookPayload, ControlTimeEvent, @@ -24,20 +24,20 @@ Price, Quantity, ) -from trading_framework.core.events.event_bus import EventBus -from trading_framework.core.events.sinks.sink_logging import LoggingEventSink -from trading_framework.core.ports.venue_adapter import VenueAdapter -from trading_framework.core.risk.risk_config import RiskConfig -from trading_framework.core.risk.risk_engine import RejectedIntent, RiskEngine +from tradingchassis_core.core.events.event_bus import EventBus +from tradingchassis_core.core.events.sinks.sink_logging import LoggingEventSink +from tradingchassis_core.core.ports.venue_adapter import VenueAdapter +from tradingchassis_core.core.risk.risk_config import RiskConfig +from tradingchassis_core.core.risk.risk_engine import RejectedIntent, RiskEngine -from trading_runtime.backtest.adapters.protocols import OrderSubmissionGateway -from trading_runtime.backtest.engine.event_stream_cursor import EventStreamCursor -from trading_runtime.core.events.sinks.file_recorder import FileRecorderSink +from core_runtime.backtest.adapters.protocols import OrderSubmissionGateway +from core_runtime.backtest.engine.event_stream_cursor import EventStreamCursor +from core_runtime.core.events.sinks.file_recorder import FileRecorderSink if TYPE_CHECKING: - from trading_framework.strategies.base import Strategy + from tradingchassis_core.strategies.base import Strategy - from trading_runtime.backtest.engine.hft_engine import HftEngineConfig + from core_runtime.backtest.engine.hft_engine import HftEngineConfig MAX_TIMEOUT_NS = 1 << 62 # Effectively "wait forever" without a heartbeat diff --git a/trading_runtime/backtest/io/__init__.py b/core_runtime/backtest/io/__init__.py similarity index 100% rename from trading_runtime/backtest/io/__init__.py rename to core_runtime/backtest/io/__init__.py diff --git a/trading_runtime/backtest/io/s3_adapter.py b/core_runtime/backtest/io/s3_adapter.py similarity index 100% rename from trading_runtime/backtest/io/s3_adapter.py rename to core_runtime/backtest/io/s3_adapter.py diff --git a/trading_runtime/backtest/orchestrator/__init__.py b/core_runtime/backtest/orchestrator/__init__.py similarity index 100% rename from trading_runtime/backtest/orchestrator/__init__.py rename to core_runtime/backtest/orchestrator/__init__.py diff --git a/trading_runtime/backtest/orchestrator/manifest.py b/core_runtime/backtest/orchestrator/manifest.py similarity index 100% rename from trading_runtime/backtest/orchestrator/manifest.py rename to core_runtime/backtest/orchestrator/manifest.py diff --git a/trading_runtime/backtest/orchestrator/planner.py b/core_runtime/backtest/orchestrator/planner.py similarity index 93% rename from trading_runtime/backtest/orchestrator/planner.py rename to core_runtime/backtest/orchestrator/planner.py index 01601e5..53288eb 100644 --- a/trading_runtime/backtest/orchestrator/planner.py +++ b/core_runtime/backtest/orchestrator/planner.py @@ -3,14 +3,14 @@ from typing import TYPE_CHECKING, Any if TYPE_CHECKING: - from trading_runtime.backtest.orchestrator.manifest import DataFileMeta, DatasetManifest + from core_runtime.backtest.orchestrator.manifest import DataFileMeta, DatasetManifest -from trading_runtime.backtest.orchestrator.planner_models import ( +from core_runtime.backtest.orchestrator.planner_models import ( ExperimentPlan, SegmentPlan, ) -from trading_runtime.backtest.orchestrator.segmenter import segment_files -from trading_runtime.backtest.orchestrator.sweeps import ( +from core_runtime.backtest.orchestrator.segmenter import segment_files +from core_runtime.backtest.orchestrator.sweeps import ( expand_parameter_grid, expand_ranges, ) diff --git a/trading_runtime/backtest/orchestrator/planner_models.py b/core_runtime/backtest/orchestrator/planner_models.py similarity index 90% rename from trading_runtime/backtest/orchestrator/planner_models.py rename to core_runtime/backtest/orchestrator/planner_models.py index 506f6a0..d6f36d9 100644 --- a/trading_runtime/backtest/orchestrator/planner_models.py +++ b/core_runtime/backtest/orchestrator/planner_models.py @@ -11,7 +11,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from trading_runtime.backtest.orchestrator.sweeps import SweepPlan + from core_runtime.backtest.orchestrator.sweeps import SweepPlan @dataclass(frozen=True, slots=True) diff --git a/trading_runtime/backtest/orchestrator/s3_manifest.py b/core_runtime/backtest/orchestrator/s3_manifest.py similarity index 95% rename from trading_runtime/backtest/orchestrator/s3_manifest.py rename to core_runtime/backtest/orchestrator/s3_manifest.py index cc55cd1..d3557fc 100644 --- a/trading_runtime/backtest/orchestrator/s3_manifest.py +++ b/core_runtime/backtest/orchestrator/s3_manifest.py @@ -2,8 +2,8 @@ import json -from trading_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim -from trading_runtime.backtest.orchestrator.manifest import DataFileMeta, DatasetManifest +from core_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim +from core_runtime.backtest.orchestrator.manifest import DataFileMeta, DatasetManifest class S3DatasetManifest(DatasetManifest): diff --git a/trading_runtime/backtest/orchestrator/segmenter.py b/core_runtime/backtest/orchestrator/segmenter.py similarity index 93% rename from trading_runtime/backtest/orchestrator/segmenter.py rename to core_runtime/backtest/orchestrator/segmenter.py index d06bb27..637b603 100644 --- a/trading_runtime/backtest/orchestrator/segmenter.py +++ b/core_runtime/backtest/orchestrator/segmenter.py @@ -10,7 +10,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from trading_runtime.backtest.orchestrator.manifest import DataFileMeta + from core_runtime.backtest.orchestrator.manifest import DataFileMeta def segment_files( diff --git a/trading_runtime/backtest/orchestrator/summary.py b/core_runtime/backtest/orchestrator/summary.py similarity index 98% rename from trading_runtime/backtest/orchestrator/summary.py rename to core_runtime/backtest/orchestrator/summary.py index a6c9aee..f325a27 100644 --- a/trading_runtime/backtest/orchestrator/summary.py +++ b/core_runtime/backtest/orchestrator/summary.py @@ -4,7 +4,7 @@ from typing import TYPE_CHECKING, List if TYPE_CHECKING: - from trading_runtime.backtest.orchestrator.planner_models import ExperimentPlan + from core_runtime.backtest.orchestrator.planner_models import ExperimentPlan # --------------------------------------------------------------------------- diff --git a/trading_runtime/backtest/orchestrator/sweeps.py b/core_runtime/backtest/orchestrator/sweeps.py similarity index 100% rename from trading_runtime/backtest/orchestrator/sweeps.py rename to core_runtime/backtest/orchestrator/sweeps.py diff --git a/trading_runtime/backtest/runtime/__init__.py b/core_runtime/backtest/runtime/__init__.py similarity index 100% rename from trading_runtime/backtest/runtime/__init__.py rename to core_runtime/backtest/runtime/__init__.py diff --git a/trading_runtime/backtest/runtime/context.py b/core_runtime/backtest/runtime/context.py similarity index 100% rename from trading_runtime/backtest/runtime/context.py rename to core_runtime/backtest/runtime/context.py diff --git a/trading_runtime/backtest/runtime/core_configuration_mapper.py b/core_runtime/backtest/runtime/core_configuration_mapper.py similarity index 98% rename from trading_runtime/backtest/runtime/core_configuration_mapper.py rename to core_runtime/backtest/runtime/core_configuration_mapper.py index 51ae3e1..f45c646 100644 --- a/trading_runtime/backtest/runtime/core_configuration_mapper.py +++ b/core_runtime/backtest/runtime/core_configuration_mapper.py @@ -3,7 +3,7 @@ import math from collections.abc import Collection, Mapping -from trading_framework.core.domain.configuration import CoreConfiguration +from tradingchassis_core.core.domain.configuration import CoreConfiguration _REQUIRED_METADATA_FIELDS = ("tick_size", "lot_size", "contract_size") diff --git a/trading_runtime/backtest/runtime/entrypoint.py b/core_runtime/backtest/runtime/entrypoint.py similarity index 93% rename from trading_runtime/backtest/runtime/entrypoint.py rename to core_runtime/backtest/runtime/entrypoint.py index 4a7bcd0..2932156 100644 --- a/trading_runtime/backtest/runtime/entrypoint.py +++ b/core_runtime/backtest/runtime/entrypoint.py @@ -8,17 +8,17 @@ from typing import TYPE_CHECKING, Any if TYPE_CHECKING: - from trading_runtime.backtest.orchestrator.planner_models import ExperimentPlan + from core_runtime.backtest.orchestrator.planner_models import ExperimentPlan -from trading_runtime.backtest.orchestrator.planner import plan_experiment -from trading_runtime.backtest.orchestrator.s3_manifest import S3DatasetManifest -from trading_runtime.backtest.orchestrator.summary import ( +from core_runtime.backtest.orchestrator.planner import plan_experiment +from core_runtime.backtest.orchestrator.s3_manifest import S3DatasetManifest +from core_runtime.backtest.orchestrator.summary import ( print_experiment_summary, summarize_experiment, ) -from trading_runtime.backtest.orchestrator.sweeps import RangeSpec -from trading_runtime.backtest.runtime.context import SweepContext -from trading_runtime.backtest.runtime.core_configuration_mapper import ( +from core_runtime.backtest.orchestrator.sweeps import RangeSpec +from core_runtime.backtest.runtime.context import SweepContext +from core_runtime.backtest.runtime.core_configuration_mapper import ( build_core_configuration_from_run_config, ) diff --git a/trading_runtime/backtest/runtime/experiment_finalize_entrypoint.py b/core_runtime/backtest/runtime/experiment_finalize_entrypoint.py similarity index 95% rename from trading_runtime/backtest/runtime/experiment_finalize_entrypoint.py rename to core_runtime/backtest/runtime/experiment_finalize_entrypoint.py index 6f24a43..b16640a 100644 --- a/trading_runtime/backtest/runtime/experiment_finalize_entrypoint.py +++ b/core_runtime/backtest/runtime/experiment_finalize_entrypoint.py @@ -8,9 +8,9 @@ from datetime import datetime, timezone from pathlib import Path -from trading_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim -from trading_runtime.backtest.runtime.context import ExperimentContext -from trading_runtime.backtest.runtime.prometheus_metrics import PrometheusMetricsClient +from core_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim +from core_runtime.backtest.runtime.context import ExperimentContext +from core_runtime.backtest.runtime.prometheus_metrics import PrometheusMetricsClient LOGGER = logging.getLogger(__name__) diff --git a/trading_runtime/backtest/runtime/mlflow_segment_logger.py b/core_runtime/backtest/runtime/mlflow_segment_logger.py similarity index 96% rename from trading_runtime/backtest/runtime/mlflow_segment_logger.py rename to core_runtime/backtest/runtime/mlflow_segment_logger.py index 3ad7326..78672fc 100644 --- a/trading_runtime/backtest/runtime/mlflow_segment_logger.py +++ b/core_runtime/backtest/runtime/mlflow_segment_logger.py @@ -7,7 +7,7 @@ import mlflow if TYPE_CHECKING: - from trading_runtime.backtest.runtime.context import SegmentContext + from core_runtime.backtest.runtime.context import SegmentContext LOGGER = logging.getLogger(__name__) diff --git a/trading_runtime/backtest/runtime/prometheus_metrics.py b/core_runtime/backtest/runtime/prometheus_metrics.py similarity index 100% rename from trading_runtime/backtest/runtime/prometheus_metrics.py rename to core_runtime/backtest/runtime/prometheus_metrics.py diff --git a/trading_runtime/backtest/runtime/run_sweep.py b/core_runtime/backtest/runtime/run_sweep.py similarity index 97% rename from trading_runtime/backtest/runtime/run_sweep.py rename to core_runtime/backtest/runtime/run_sweep.py index 2c845ed..b63385a 100644 --- a/trading_runtime/backtest/runtime/run_sweep.py +++ b/core_runtime/backtest/runtime/run_sweep.py @@ -13,17 +13,17 @@ from pathlib import Path from typing import Any -from trading_framework.core.risk.risk_config import RiskConfig -from trading_framework.strategies.strategy_config import StrategyConfig +from tradingchassis_core.core.risk.risk_config import RiskConfig +from tradingchassis_core.strategies.strategy_config import StrategyConfig -from trading_runtime.backtest.engine.hft_engine import ( +from core_runtime.backtest.engine.hft_engine import ( HftBacktestConfig, HftBacktestEngine, HftEngineConfig, ) -from trading_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim -from trading_runtime.backtest.runtime.context import SweepContext -from trading_runtime.backtest.runtime.core_configuration_mapper import ( +from core_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim +from core_runtime.backtest.runtime.context import SweepContext +from core_runtime.backtest.runtime.core_configuration_mapper import ( build_core_configuration_from_run_config, ) diff --git a/trading_runtime/backtest/runtime/segment_finalize_entrypoint.py b/core_runtime/backtest/runtime/segment_finalize_entrypoint.py similarity index 94% rename from trading_runtime/backtest/runtime/segment_finalize_entrypoint.py rename to core_runtime/backtest/runtime/segment_finalize_entrypoint.py index 063de9a..a126df5 100644 --- a/trading_runtime/backtest/runtime/segment_finalize_entrypoint.py +++ b/core_runtime/backtest/runtime/segment_finalize_entrypoint.py @@ -6,10 +6,10 @@ from datetime import datetime, timezone from pathlib import Path -from trading_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim -from trading_runtime.backtest.runtime.context import SegmentContext -from trading_runtime.backtest.runtime.mlflow_segment_logger import MlflowSegmentLogger -from trading_runtime.backtest.runtime.prometheus_metrics import PrometheusMetricsClient +from core_runtime.backtest.io.s3_adapter import OCIObjectStorageS3Shim +from core_runtime.backtest.runtime.context import SegmentContext +from core_runtime.backtest.runtime.mlflow_segment_logger import MlflowSegmentLogger +from core_runtime.backtest.runtime.prometheus_metrics import PrometheusMetricsClient LOGGER = logging.getLogger(__name__) diff --git a/trading_runtime/core/__init__.py b/core_runtime/core/__init__.py similarity index 100% rename from trading_runtime/core/__init__.py rename to core_runtime/core/__init__.py diff --git a/trading_runtime/core/events/__init__.py b/core_runtime/core/events/__init__.py similarity index 100% rename from trading_runtime/core/events/__init__.py rename to core_runtime/core/events/__init__.py diff --git a/trading_runtime/core/events/sinks/__init__.py b/core_runtime/core/events/sinks/__init__.py similarity index 100% rename from trading_runtime/core/events/sinks/__init__.py rename to core_runtime/core/events/sinks/__init__.py diff --git a/trading_runtime/core/events/sinks/file_recorder.py b/core_runtime/core/events/sinks/file_recorder.py similarity index 100% rename from trading_runtime/core/events/sinks/file_recorder.py rename to core_runtime/core/events/sinks/file_recorder.py diff --git a/trading_runtime/local/__init__.py b/core_runtime/local/__init__.py similarity index 100% rename from trading_runtime/local/__init__.py rename to core_runtime/local/__init__.py diff --git a/trading_runtime/local/backtest.py b/core_runtime/local/backtest.py similarity index 86% rename from trading_runtime/local/backtest.py rename to core_runtime/local/backtest.py index a8495c6..62a58fb 100644 --- a/trading_runtime/local/backtest.py +++ b/core_runtime/local/backtest.py @@ -8,17 +8,17 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from trading_runtime.backtest.engine.engine_base import BacktestResult + from core_runtime.backtest.engine.engine_base import BacktestResult -from trading_framework.core.risk.risk_config import RiskConfig -from trading_framework.strategies.strategy_config import StrategyConfig +from tradingchassis_core.core.risk.risk_config import RiskConfig +from tradingchassis_core.strategies.strategy_config import StrategyConfig -from trading_runtime.backtest.engine.hft_engine import ( +from core_runtime.backtest.engine.hft_engine import ( HftBacktestConfig, HftBacktestEngine, HftEngineConfig, ) -from trading_runtime.backtest.runtime.core_configuration_mapper import ( +from core_runtime.backtest.runtime.core_configuration_mapper import ( build_core_configuration_from_run_config, ) diff --git a/trading_runtime/local/local.json b/core_runtime/local/local.json similarity index 96% rename from trading_runtime/local/local.json rename to core_runtime/local/local.json index d1c3c6f..83ef50b 100644 --- a/trading_runtime/local/local.json +++ b/core_runtime/local/local.json @@ -78,7 +78,7 @@ }, "strategy": { - "class_path": "trading_runtime.strategies.debug_strategy:DebugStrategyV1", + "class_path": "core_runtime.strategies.debug_strategy:DebugStrategyV1", "spread": 5.0, "order_qty": 0.1, "use_price_tick_levels": 3, diff --git a/trading_runtime/strategies/__init__.py b/core_runtime/strategies/__init__.py similarity index 100% rename from trading_runtime/strategies/__init__.py rename to core_runtime/strategies/__init__.py diff --git a/trading_runtime/strategies/debug_strategy.py b/core_runtime/strategies/debug_strategy.py similarity index 98% rename from trading_runtime/strategies/debug_strategy.py rename to core_runtime/strategies/debug_strategy.py index 4ce1738..42486d1 100644 --- a/trading_runtime/strategies/debug_strategy.py +++ b/core_runtime/strategies/debug_strategy.py @@ -3,7 +3,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from trading_framework import ( + from tradingchassis_core import ( EngineContext, GateDecision, MarketEvent, @@ -11,7 +11,7 @@ StrategyState, ) -from trading_framework import ( +from tradingchassis_core import ( NewOrderIntent, OrderIntent, Price, diff --git a/examples/local/backtest.py b/examples/local/backtest.py index 1d2d115..9c7db70 100644 --- a/examples/local/backtest.py +++ b/examples/local/backtest.py @@ -14,16 +14,16 @@ sys.path.insert(0, str(PROJECT_ROOT)) if TYPE_CHECKING: - from trading_runtime.backtest.engine.engine_base import BacktestResult + from core_runtime.backtest.engine.engine_base import BacktestResult -from trading_framework.core.risk.risk_config import RiskConfig -from trading_framework.strategies.strategy_config import StrategyConfig -from trading_runtime.backtest.engine.hft_engine import ( +from tradingchassis_core.core.risk.risk_config import RiskConfig +from tradingchassis_core.strategies.strategy_config import StrategyConfig +from core_runtime.backtest.engine.hft_engine import ( HftBacktestConfig, HftBacktestEngine, HftEngineConfig, ) -from trading_runtime.backtest.runtime.core_configuration_mapper import ( +from core_runtime.backtest.runtime.core_configuration_mapper import ( build_core_configuration_from_run_config, ) diff --git a/examples/strategies/debug_strategy.py b/examples/strategies/debug_strategy.py index 4789159..09b5e98 100644 --- a/examples/strategies/debug_strategy.py +++ b/examples/strategies/debug_strategy.py @@ -3,7 +3,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from trading_framework import ( + from tradingchassis_core import ( EngineContext, GateDecision, MarketEvent, @@ -11,7 +11,7 @@ StrategyState, ) -from trading_framework import ( +from tradingchassis_core import ( NewOrderIntent, OrderIntent, Price, diff --git a/pyproject.toml b/pyproject.toml index 49b83d8..896c601 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,9 +3,9 @@ requires = ["setuptools>=69", "wheel"] build-backend = "setuptools.build_meta" [project] -name = "trading-runtime" +name = "core-runtime" version = "0.1.0" -description = "Runtime execution and Kubernetes orchestration layer for the trading-framework with reproducible environments and deterministic backtesting." +description = "Runtime execution and Kubernetes orchestration layer for the core with reproducible environments and deterministic backtesting." readme = "README.md" requires-python = ">=3.11" authors = [{ name = "tradingeng@protonmail.com" }] @@ -37,13 +37,13 @@ dev = [ # Explicit package discovery # -------------------------------------------------- [tool.setuptools.packages.find] -include = ["trading_runtime*"] +include = ["core_runtime*", "trading_runtime*"] # -------------------------------------------------- # Include runtime assets # -------------------------------------------------- [tool.setuptools.package-data] -trading_runtime = ["**/*"] +core_runtime = ["**/*"] [tool.setuptools] include-package-data = true @@ -82,5 +82,5 @@ ignore_errors = true # Import Linter # -------------------------------------------------- [tool.importlinter] -root_package = "trading_runtime" +root_package = "core_runtime" include_external_packages = true diff --git a/requirements-dev.txt b/requirements-dev.txt index af15fc0..c5c6b54 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -331,7 +331,7 @@ tornado==6.5.4 # via bokeh tqdm==4.67.3 # via panel -trading-framework @ git+https://github.com/TradingChassis/core.git@686dd949e74f6cd482e3e96236669058d9484899 +trading-framework @ git+https://github.com/TradingChassis/core.git@632067b470e18b2c636008d08281551412a8ac3a # via -r _git_deps.in typing-extensions==4.15.0 # via diff --git a/requirements.txt b/requirements.txt index 5df1f58..46e0e95 100644 --- a/requirements.txt +++ b/requirements.txt @@ -307,7 +307,7 @@ tornado==6.5.4 # via bokeh tqdm==4.67.3 # via panel -trading-framework @ git+https://github.com/TradingChassis/core.git@686dd949e74f6cd482e3e96236669058d9484899 +trading-framework @ git+https://github.com/TradingChassis/core.git@632067b470e18b2c636008d08281551412a8ac3a # via -r _git_deps.in typing-extensions==4.15.0 # via diff --git a/scripts/check.sh b/scripts/check.sh index 1ba9c8a..4e560e3 100755 --- a/scripts/check.sh +++ b/scripts/check.sh @@ -5,12 +5,12 @@ echo "🔍 Running import-linter..." lint-imports --verbose echo "⚡ Running ruff (check only)..." -ruff check trading_runtime tests +ruff check core_runtime tests echo "🧠 Running mypy..." -mypy trading_runtime tests +mypy core_runtime tests echo "🧪 Running pytest..." -pytest +python -m pytest echo "✅ All checks passed!" diff --git a/tests/runtime/test_adapter_boundary_guards.py b/tests/runtime/test_adapter_boundary_guards.py index 4619b0e..b1dfbe3 100644 --- a/tests/runtime/test_adapter_boundary_guards.py +++ b/tests/runtime/test_adapter_boundary_guards.py @@ -15,9 +15,9 @@ WORKSPACE_ROOT = CORE_RUNTIME_ROOT.parent ADAPTER_FILES = [ - CORE_RUNTIME_ROOT / "trading_runtime/backtest/adapters/venue.py", - CORE_RUNTIME_ROOT / "trading_runtime/backtest/adapters/execution.py", - CORE_RUNTIME_ROOT / "trading_runtime/backtest/adapters/protocols.py", + CORE_RUNTIME_ROOT / "core_runtime/backtest/adapters/venue.py", + CORE_RUNTIME_ROOT / "core_runtime/backtest/adapters/execution.py", + CORE_RUNTIME_ROOT / "core_runtime/backtest/adapters/protocols.py", ] @@ -61,7 +61,7 @@ def test_adapters_do_not_import_or_construct_fill_event() -> None: def test_core_production_package_does_not_import_trading_runtime() -> None: - core_pkg = WORKSPACE_ROOT / "core/trading_framework" + core_pkg = WORKSPACE_ROOT / "core/tradingchassis_core" if not core_pkg.exists(): pytest.skip( f"core package not present in this checkout layout: {core_pkg}" @@ -70,6 +70,8 @@ def test_core_production_package_does_not_import_trading_runtime() -> None: patterns = [ r"^\s*import\s+trading_runtime(\.|$)", r"^\s*from\s+trading_runtime(\.|\s+import\b)", + r"^\s*import\s+core_runtime(\.|$)", + r"^\s*from\s+core_runtime(\.|\s+import\b)", ] for path in core_pkg.rglob("*.py"): @@ -77,7 +79,7 @@ def test_core_production_package_does_not_import_trading_runtime() -> None: def test_protocols_does_not_define_execution_feedback_record_source_yet() -> None: - protocols_py = CORE_RUNTIME_ROOT / "trading_runtime/backtest/adapters/protocols.py" + protocols_py = CORE_RUNTIME_ROOT / "core_runtime/backtest/adapters/protocols.py" patterns = [ r"^\s*class\s+ExecutionFeedbackRecordSource\b", r"^\s*ExecutionFeedbackRecordSource\s*=", diff --git a/tests/runtime/test_core_configuration_mapper.py b/tests/runtime/test_core_configuration_mapper.py index ac93136..76c33d2 100644 --- a/tests/runtime/test_core_configuration_mapper.py +++ b/tests/runtime/test_core_configuration_mapper.py @@ -1,9 +1,9 @@ from __future__ import annotations import pytest -from trading_framework.core.domain.configuration import CoreConfiguration +from tradingchassis_core.core.domain.configuration import CoreConfiguration -from trading_runtime.backtest.runtime.core_configuration_mapper import ( +from core_runtime.backtest.runtime.core_configuration_mapper import ( build_core_configuration_from_run_config, ) diff --git a/tests/runtime/test_event_stream_cursor.py b/tests/runtime/test_event_stream_cursor.py index 9ef2d08..b94b9b7 100644 --- a/tests/runtime/test_event_stream_cursor.py +++ b/tests/runtime/test_event_stream_cursor.py @@ -1,9 +1,9 @@ from __future__ import annotations import pytest -from trading_framework.core.domain.processing_order import ProcessingPosition +from tradingchassis_core.core.domain.processing_order import ProcessingPosition -from trading_runtime.backtest.engine.event_stream_cursor import EventStreamCursor +from core_runtime.backtest.engine.event_stream_cursor import EventStreamCursor def test_event_stream_cursor_starts_at_zero() -> None: diff --git a/tests/runtime/test_hftbacktest_execution_adapter_characterization.py b/tests/runtime/test_hftbacktest_execution_adapter_characterization.py index 2b91593..f03634e 100644 --- a/tests/runtime/test_hftbacktest_execution_adapter_characterization.py +++ b/tests/runtime/test_hftbacktest_execution_adapter_characterization.py @@ -13,8 +13,8 @@ from dataclasses import dataclass, field from typing import Any -from trading_framework.core.domain.reject_reasons import RejectReason -from trading_framework.core.domain.types import ( +from tradingchassis_core.core.domain.reject_reasons import RejectReason +from tradingchassis_core.core.domain.types import ( CancelOrderIntent, NewOrderIntent, Price, @@ -22,7 +22,7 @@ ReplaceOrderIntent, ) -from trading_runtime.backtest.adapters.execution import ( +from core_runtime.backtest.adapters.execution import ( HftBacktestExecutionAdapter, _to_i64_order_id, ) diff --git a/tests/runtime/test_hftbacktest_execution_feedback_probe.py b/tests/runtime/test_hftbacktest_execution_feedback_probe.py index d975588..bbd7869 100644 --- a/tests/runtime/test_hftbacktest_execution_feedback_probe.py +++ b/tests/runtime/test_hftbacktest_execution_feedback_probe.py @@ -8,12 +8,12 @@ import pytest -from trading_runtime.backtest.adapters.execution import ( +from core_runtime.backtest.adapters.execution import ( HftBacktestExecutionAdapter, _to_i64_order_id, ) -from trading_runtime.backtest.adapters.venue import HftBacktestVenueAdapter -from trading_runtime.backtest.engine.strategy_runner import HftStrategyRunner +from core_runtime.backtest.adapters.venue import HftBacktestVenueAdapter +from core_runtime.backtest.engine.strategy_runner import HftStrategyRunner hftbacktest = pytest.importorskip("hftbacktest") from hftbacktest import types as hbt_types # type: ignore # noqa: E402 diff --git a/tests/runtime/test_import_compatibility_shim.py b/tests/runtime/test_import_compatibility_shim.py new file mode 100644 index 0000000..bcd3193 --- /dev/null +++ b/tests/runtime/test_import_compatibility_shim.py @@ -0,0 +1,29 @@ +from __future__ import annotations + +import warnings + + +def test_legacy_and_new_nested_runtime_modules_share_identity() -> None: + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + import core_runtime.backtest.engine.strategy_runner as old_strategy_runner + import core_runtime.strategies.debug_strategy as old_debug_strategy + + import core_runtime.backtest.engine.strategy_runner as new_strategy_runner + import core_runtime.strategies.debug_strategy as new_debug_strategy + + assert old_strategy_runner is new_strategy_runner + assert old_debug_strategy is new_debug_strategy + + +def test_legacy_and_new_runtime_symbols_share_identity() -> None: + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + from core_runtime.backtest.engine.strategy_runner import HftStrategyRunner as OldRunner + from core_runtime.strategies.debug_strategy import DebugStrategyV1 as OldStrategy + + from core_runtime.backtest.engine.strategy_runner import HftStrategyRunner as NewRunner + from core_runtime.strategies.debug_strategy import DebugStrategyV1 as NewStrategy + + assert OldRunner is NewRunner + assert OldStrategy is NewStrategy diff --git a/tests/runtime/test_runtime_core_configuration_integration.py b/tests/runtime/test_runtime_core_configuration_integration.py index 5a9c6b1..f9a4ee2 100644 --- a/tests/runtime/test_runtime_core_configuration_integration.py +++ b/tests/runtime/test_runtime_core_configuration_integration.py @@ -6,9 +6,9 @@ from pathlib import Path import pytest -from trading_framework.core.domain.configuration import CoreConfiguration +from tradingchassis_core.core.domain.configuration import CoreConfiguration -from trading_runtime.local.backtest import load_config +from core_runtime.local.backtest import load_config def _repo_root() -> Path: @@ -54,7 +54,7 @@ def _from_file(*, file_location: str, profile_name: str) -> dict[str, object]: def test_local_loader_fails_early_when_core_missing(tmp_path: Path) -> None: - sample_path = _repo_root() / "trading_runtime/local/local.json" + sample_path = _repo_root() / "core_runtime/local/local.json" config = _load_sample_config(sample_path) config.pop("core", None) @@ -66,7 +66,7 @@ def test_local_loader_fails_early_when_core_missing(tmp_path: Path) -> None: def test_local_loader_succeeds_with_valid_core() -> None: - sample_path = _repo_root() / "trading_runtime/local/local.json" + sample_path = _repo_root() / "core_runtime/local/local.json" cfg = load_config(str(sample_path)) assert isinstance(cfg.core_cfg, CoreConfiguration) @@ -79,9 +79,9 @@ def test_argo_entrypoint_rejects_invalid_run_config_before_planning( ) -> None: _install_oci_stubs(monkeypatch) - from trading_runtime.backtest.runtime.entrypoint import main as argo_entrypoint_main + from core_runtime.backtest.runtime.entrypoint import main as argo_entrypoint_main - sample_path = _repo_root() / "trading_runtime/argo/argo.json" + sample_path = _repo_root() / "core_runtime/argo/argo.json" config = _load_sample_config(sample_path) config.pop("core", None) @@ -109,7 +109,7 @@ def test_argo_sweep_worker_rejects_context_missing_core( ) -> None: _install_oci_stubs(monkeypatch) - from trading_runtime.backtest.runtime.run_sweep import main as run_sweep_main + from core_runtime.backtest.runtime.run_sweep import main as run_sweep_main context = { "experiment_id": "exp-1", @@ -156,12 +156,12 @@ def test_argo_emit_includes_core_section_in_sweep_context( ) -> None: _install_oci_stubs(monkeypatch) - from trading_runtime.backtest.orchestrator.planner_models import ( + from core_runtime.backtest.orchestrator.planner_models import ( ExperimentPlan, SegmentPlan, ) - from trading_runtime.backtest.orchestrator.sweeps import SweepPlan - from trading_runtime.backtest.runtime.entrypoint import _emit_sweep_context + from core_runtime.backtest.orchestrator.sweeps import SweepPlan + from core_runtime.backtest.runtime.entrypoint import _emit_sweep_context plan = ExperimentPlan( experiment_id="exp-1", diff --git a/tests/runtime/test_strategy_runner_canonical_market_adoption.py b/tests/runtime/test_strategy_runner_canonical_market_adoption.py index f0d8431..d3b315d 100644 --- a/tests/runtime/test_strategy_runner_canonical_market_adoption.py +++ b/tests/runtime/test_strategy_runner_canonical_market_adoption.py @@ -5,9 +5,9 @@ from typing import Any import pytest -from trading_framework.core.domain.configuration import CoreConfiguration -from trading_framework.core.domain.state import StrategyState -from trading_framework.core.domain.types import ( +from tradingchassis_core.core.domain.configuration import CoreConfiguration +from tradingchassis_core.core.domain.state import StrategyState +from tradingchassis_core.core.domain.types import ( BookLevel, BookPayload, CancelOrderIntent, @@ -20,15 +20,15 @@ Quantity, ReplaceOrderIntent, ) -from trading_framework.core.events.event_bus import EventBus -from trading_framework.core.risk.risk_config import RiskConfig -from trading_framework.core.risk.risk_engine import GateDecision -from trading_framework.strategies.base import Strategy - -import trading_runtime.backtest.engine.strategy_runner as strategy_runner_module -from trading_runtime.backtest.engine.event_stream_cursor import EventStreamCursor -from trading_runtime.backtest.engine.hft_engine import HftEngineConfig -from trading_runtime.backtest.engine.strategy_runner import ( +from tradingchassis_core.core.events.event_bus import EventBus +from tradingchassis_core.core.risk.risk_config import RiskConfig +from tradingchassis_core.core.risk.risk_engine import GateDecision +from tradingchassis_core.strategies.base import Strategy + +import core_runtime.backtest.engine.strategy_runner as strategy_runner_module +from core_runtime.backtest.engine.event_stream_cursor import EventStreamCursor +from core_runtime.backtest.engine.hft_engine import HftEngineConfig +from core_runtime.backtest.engine.strategy_runner import ( MAX_TIMEOUT_NS, HftStrategyRunner, ) diff --git a/tests/test_dummy.py b/tests/test_dummy.py index 13a78be..1baea3f 100644 --- a/tests/test_dummy.py +++ b/tests/test_dummy.py @@ -27,10 +27,10 @@ def test_exception(): def test_package_import(): """ - Optional: verify that trading_runtime can be imported. + Optional: verify that core_runtime and trading_runtime can be imported. Remove if not needed. """ try: - import trading_runtime # noqa: F401 + import core_runtime # noqa: F401 except ImportError as exc: - pytest.fail(f"Failed to import trading_runtime: {exc}") + pytest.fail(f"Failed to import runtime package: {exc}") From 12c68dfedd09212581ef0057932b1cff2e1e6806 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 22:24:12 +0000 Subject: [PATCH 28/36] m2 p9 s4.2: remove active legacy naming references --- .env.example | 2 +- .github/ISSUE_TEMPLATE/bug-report.md | 2 +- CHANGELOG.md | 4 +- CONTRIBUTING.md | 2 +- Dockerfile | 6 +-- README.md | 39 +++++++++---------- argo/run-backtest.yaml | 4 +- argo/run-build.yaml | 2 +- argo/workflowtemplate-backtest-fanout.yaml | 12 +++--- argo/workflowtemplate-build-push-ghcr.yaml | 10 ++--- core_runtime/backtest/runtime/run_sweep.py | 2 +- docs/venue-adapter-abstraction-design-v1.md | 8 ++-- pyproject.toml | 4 +- scripts/compile-requirements.sh | 6 +-- scripts/post-create.sh | 2 +- .../runtime/test_import_compatibility_shim.py | 4 +- tests/test_dummy.py | 2 +- 17 files changed, 55 insertions(+), 56 deletions(-) diff --git a/.env.example b/.env.example index e06d920..65e653c 100644 --- a/.env.example +++ b/.env.example @@ -1 +1 @@ -TRADING_FRAMEWORK_COMMIT=934d332c21bef5... +TRADINGCHASSIS_CORE_COMMIT=934d332c21bef5... diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 7428e03..4b5f5ed 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -9,7 +9,7 @@ Clear and concise description of the issue. ## Environment -- Trading Framework version: +- Core (`tradingchassis-core`) version: - Python version: - Execution mode (local / cloud): - Strategy used: diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ccab5b..9f5d799 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ This project adheres to Semantic Versioning. ## [0.1.0] – 2026-02-17 -Initial public release of the trading-runtime execution and orchestration layer. +Initial public release of the Core Runtime execution and orchestration layer. ### Added @@ -20,7 +20,7 @@ Initial public release of the trading-runtime execution and orchestration layer. #### Dependency Management -- Commit-pinned `trading-framework` integration +- Commit-pinned `tradingchassis-core` integration - Reproducible dependency compilation via pip-tools - Environment bootstrap scripts diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 678e6a1..cfa8e25 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ Thank you for your interest in contributing! -This repository is a runtime example for the [trading-framework](https://github.com/trading-engineering/trading-framework) framework using +This repository is a Core Runtime example for [Core (`tradingchassis-core`)](https://github.com/TradingChassis/core) using [Kubernetes](https://kubernetes.io) (via e.g. [MicroK8s](https://microk8s.io)) and [Argo Workflows](https://argoproj.github.io/workflows). Contributions should preserve clarity, explicitness and reproducibility. diff --git a/Dockerfile b/Dockerfile index 3a49b77..5674585 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,13 +3,13 @@ # ================================================== FROM python:3.11.14-slim-trixie AS build -ARG TRADING_RUNTIME_COMMIT +# ARG TRADING_RUNTIME_COMMIT -ENV TRADING_RUNTIME_COMMIT=${TRADING_RUNTIME_COMMIT} +# ENV TRADING_RUNTIME_COMMIT=${TRADING_RUNTIME_COMMIT} ENV PATH="/install/bin:/install-dev/bin:${PATH}" ENV PYTHONPATH="/install/lib/python3.11/site-packages" -WORKDIR /workspaces/trading-runtime +WORKDIR /workspaces/core-runtime # System dependencies for building Python packages & running tests RUN apt-get update && \ diff --git a/README.md b/README.md index 8af9696..6dba2e4 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,11 @@ -# Trading Runtime +# Core Runtime -![CI](https://github.com/trading-engineering/trading-runtime/actions/workflows/tests.yaml/badge.svg) +![CI](https://github.com/TradingChassis/core-runtime/actions/workflows/tests.yaml/badge.svg) ![Python](https://img.shields.io/badge/python-3.11+-blue) ![License](https://img.shields.io/badge/license-MIT-green) Runtime execution layer and orchestration environment for the -[trading-framework](https://github.com/trading-engineering/trading-framework). +[Core (`tradingchassis-core`)](https://github.com/TradingChassis/core). This repository provides: @@ -19,9 +19,9 @@ This repository provides: ## 🧠 What is this? -`trading-runtime` is the execution and orchestration layer built on top of `trading-framework`. +`core-runtime` is the execution and orchestration layer built on top of `tradingchassis-core`. -While `trading-framework` implements the deterministic trading framework, +While Core (`tradingchassis-core`) implements the deterministic trading core, this repository focuses on: - how strategies are executed @@ -33,11 +33,11 @@ It intentionally contains no domain framework logic. --- -## 🧩 Relationship to trading-framework +## 🧩 Relationship to Core ``` -trading-framework → core framework, backtesting engine, domain logic -trading-runtime → executing entrypoints, runtime configs, orchestration +tradingchassis-core → core framework, backtesting engine, domain logic +tradingchassis-core-runtime → executing entrypoints, runtime configs, orchestration ``` The framework is consumed as a pinned Git dependency to guarantee @@ -45,14 +45,13 @@ deterministic runtime environments. --- -## 🏷 Naming Clarification (current transitional state) +## 🏷 Naming Clarification - Repository/folder name in the monorepo can be `core-runtime`. - Python import package in this repository is `core_runtime`. -- Distribution/project name in packaging metadata is `trading-runtime`. -- Legacy package import `trading_runtime` remains available as a compatibility shim. -- Core package canonical import is `tradingchassis_core` (`trading_framework` is compatibility/deprecated). -- Package/directory rename alignment is planned separately and is not part of this phase. +- Distribution/project name in packaging metadata is `tradingchassis-core-runtime`. +- Core package canonical import is `tradingchassis_core`. +- Core distribution/project name is `tradingchassis-core`. --- @@ -175,7 +174,7 @@ Current caveats: - Paths are cwd-relative; supported workflow is running from `core-runtime` root. - hftbacktest timestamp-ordering warnings may appear from fixture ordering but do not fail the run. - `tests/data/results/` may contain historical/sample artifacts and is no longer the default local output location. -- Naming remains transitional (`core-runtime` repo, `trading-runtime` distribution, `core_runtime` canonical imports, `trading_runtime` compatibility shim, `tradingchassis_core` canonical core imports). +- Naming is aligned (`core-runtime` repo, `tradingchassis-core-runtime` distribution, `core_runtime` import package, `tradingchassis_core` core import package). This status confirms local usability for the current local hftbacktest path; it does not imply full canonical Event Stream completion. @@ -198,12 +197,12 @@ For adapter boundary context, see: ## 📌 Dependency Pinning & Reproducibility -The `trading-framework` dependency is pinned by commit SHA. +The `tradingchassis-core` dependency is pinned by commit SHA. Create a `.env` file: ```bash -TRADING_FRAMEWORK_COMMIT= +TRADINGCHASSIS_CORE_COMMIT= ``` Generate reproducible environments: @@ -278,13 +277,13 @@ argo/workflowtemplate-backtest.yaml ### 🐳 Runtime Image Build & Push -`workflowtemplate-build-push-ghcr.yaml` builds the trading-runtime Docker image and pushes it to +`workflowtemplate-build-push-ghcr.yaml` builds the Core Runtime Docker image and pushes it to GitHub Container Registry (GHCR). This image contains: - Python dependencies and entrypoints -- trading-framework and trading-runtime commit SHA +- tradingchassis-core and core-runtime commit SHA - strategies and configs It acts as an immutable and deterministic runtime environment for all backtests. @@ -362,7 +361,7 @@ Without this secret, the workflow cannot authenticate against GHCR, and Kubernet | Script | Purpose | | ------------------------- | ----------------------------------------------- | -| `compile-requirements.sh` | Pins trading-framework and resolves dependencies | +| `compile-requirements.sh` | Pins tradingchassis-core and resolves dependencies | | `post-create.sh` | Dev container bootstrap | | `check.sh` | Local validation helpers | @@ -430,7 +429,7 @@ This repository includes: It does not include: -- trading framework internals +- Core internals - specific strategy research logic --- diff --git a/argo/run-backtest.yaml b/argo/run-backtest.yaml index faa4121..6ccfd01 100644 --- a/argo/run-backtest.yaml +++ b/argo/run-backtest.yaml @@ -8,10 +8,10 @@ spec: arguments: parameters: - name: image_repo - value: "ghcr.io/trading-engineering/trading-runtime" + value: "ghcr.io/trading-engineering/tradingchassis-core-runtime" - name: image_tag value: "${IMAGE_TAG}" - name: experiment_config - value: "/usr/local/lib/python3.11/site-packages/trading_runtime/argo/argo.json" + value: "/usr/local/lib/python3.11/site-packages/core_runtime/argo/argo.json" - name: scratch_root value: "/mnt/scratch" diff --git a/argo/run-build.yaml b/argo/run-build.yaml index 558c9c4..d87312d 100644 --- a/argo/run-build.yaml +++ b/argo/run-build.yaml @@ -9,5 +9,5 @@ spec: parameters: - name: git_branch value: "${GIT_BRANCH}" - - name: trading_runtime_commit + - name: core_runtime_commit value: "${RUNTIME_COMMIT}" diff --git a/argo/workflowtemplate-backtest-fanout.yaml b/argo/workflowtemplate-backtest-fanout.yaml index 22f1067..6c5deb2 100644 --- a/argo/workflowtemplate-backtest-fanout.yaml +++ b/argo/workflowtemplate-backtest-fanout.yaml @@ -16,7 +16,7 @@ spec: parameters: - name: image_repo description: "Container image repo" - value: ghcr.io/trading-engineering/trading-runtime + value: ghcr.io/trading-engineering/tradingchassis-core-runtime - name: image_tag description: "Container image tag to run (recommended: commit SHA)" @@ -24,7 +24,7 @@ spec: - name: experiment_config description: "Path to experiment JSON inside the container" - value: /usr/local/lib/python3.11/site-packages/trading_runtime/argo/argo.json + value: /usr/local/lib/python3.11/site-packages/core_runtime/argo/argo.json - name: scratch_root description: "Scratch root inside the container" @@ -104,7 +104,7 @@ spec: command: [python, -m] args: - - trading_runtime.backtest.runtime.entrypoint + - core_runtime.backtest.runtime.entrypoint - --config - "{{workflow.parameters.experiment_config}}" - --run @@ -156,7 +156,7 @@ spec: command: [python, -m] args: - - trading_runtime.backtest.runtime.run_sweep + - core_runtime.backtest.runtime.run_sweep - --context - "{{inputs.parameters.sweep-path}}" - --scratch-root @@ -204,7 +204,7 @@ spec: command: [python, -m] args: - - trading_runtime.backtest.runtime.segment_finalize_entrypoint + - core_runtime.backtest.runtime.segment_finalize_entrypoint - "--experiment-id" - "{{inputs.parameters.experiment-id}}" @@ -268,7 +268,7 @@ spec: command: [python, -m] args: - - trading_runtime.backtest.runtime.experiment_finalize_entrypoint + - core_runtime.backtest.runtime.experiment_finalize_entrypoint - "--experiment-id" - "{{inputs.parameters.experiment-id}}" diff --git a/argo/workflowtemplate-build-push-ghcr.yaml b/argo/workflowtemplate-build-push-ghcr.yaml index a28e1dd..2439683 100644 --- a/argo/workflowtemplate-build-push-ghcr.yaml +++ b/argo/workflowtemplate-build-push-ghcr.yaml @@ -7,14 +7,14 @@ spec: arguments: parameters: - name: image_repo - value: ghcr.io/trading-engineering/trading-runtime + value: ghcr.io/trading-engineering/tradingchassis-core-runtime - name: git_branch description: "Branch name (for tagging only)" value: main - - name: trading_runtime_commit - description: "Exact commit SHA of trading-runtime" + - name: core_runtime_commit + description: "Exact commit SHA of core-runtime" value: "" templates: @@ -27,10 +27,10 @@ spec: IMAGE_REPO="{{workflow.parameters.image_repo}}" GIT_BRANCH="{{workflow.parameters.git_branch}}" - RUNTIME_COMMIT="{{workflow.parameters.trading_runtime_commit}}" + RUNTIME_COMMIT="{{workflow.parameters.core_runtime_commit}}" if [ -z "$RUNTIME_COMMIT" ]; then - echo "> trading_runtime_commit must be set" + echo "> core_runtime_commit must be set" exit 1 fi diff --git a/core_runtime/backtest/runtime/run_sweep.py b/core_runtime/backtest/runtime/run_sweep.py index b63385a..30865d0 100644 --- a/core_runtime/backtest/runtime/run_sweep.py +++ b/core_runtime/backtest/runtime/run_sweep.py @@ -230,7 +230,7 @@ def _resolve_project_metadata(cls) -> dict[str, str | None]: pyproject_path ) - distribution_name = name_from_pyproject or "trading-framework" + distribution_name = name_from_pyproject or "tradingchassis-core" version: str | None source: str diff --git a/docs/venue-adapter-abstraction-design-v1.md b/docs/venue-adapter-abstraction-design-v1.md index 47fa162..e43d623 100644 --- a/docs/venue-adapter-abstraction-design-v1.md +++ b/docs/venue-adapter-abstraction-design-v1.md @@ -45,10 +45,10 @@ This note is implementation-facing and must stay consistent with: Current runtime anchors: -- `core-runtime/trading_runtime/backtest/adapters/venue.py` -- `core-runtime/trading_runtime/backtest/adapters/execution.py` -- `core-runtime/trading_runtime/backtest/engine/strategy_runner.py` -- `core-runtime/trading_runtime/backtest/engine/event_stream_cursor.py` +- `core-runtime/core_runtime/backtest/adapters/venue.py` +- `core-runtime/core_runtime/backtest/adapters/execution.py` +- `core-runtime/core_runtime/backtest/engine/strategy_runner.py` +- `core-runtime/core_runtime/backtest/engine/event_stream_cursor.py` --- diff --git a/pyproject.toml b/pyproject.toml index 896c601..89b4a75 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ requires = ["setuptools>=69", "wheel"] build-backend = "setuptools.build_meta" [project] -name = "core-runtime" +name = "tradingchassis-core-runtime" version = "0.1.0" description = "Runtime execution and Kubernetes orchestration layer for the core with reproducible environments and deterministic backtesting." readme = "README.md" @@ -37,7 +37,7 @@ dev = [ # Explicit package discovery # -------------------------------------------------- [tool.setuptools.packages.find] -include = ["core_runtime*", "trading_runtime*"] +include = ["core_runtime*"] # -------------------------------------------------- # Include runtime assets diff --git a/scripts/compile-requirements.sh b/scripts/compile-requirements.sh index 495a7a8..5439f90 100755 --- a/scripts/compile-requirements.sh +++ b/scripts/compile-requirements.sh @@ -6,10 +6,10 @@ set -a source .env set +a -: "${TRADING_FRAMEWORK_COMMIT:?Missing TRADING_FRAMEWORK_COMMIT in .env}" +: "${TRADINGCHASSIS_CORE_COMMIT:?Missing TRADINGCHASSIS_CORE_COMMIT in .env}" echo "🔧 Compiling requirements with pip-tools..." -echo "📌 Pinning core at commit: $TRADING_FRAMEWORK_COMMIT" +echo "📌 Pinning core at commit: $TRADINGCHASSIS_CORE_COMMIT" python -m pip install --upgrade \ "pip>=23.3,<25" \ @@ -19,7 +19,7 @@ python -m pip install --upgrade \ # Temporary requirements input for git dependency cat > _git_deps.in < None: +def test_nested_runtime_modules_share_identity_across_import_sites() -> None: with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) import core_runtime.backtest.engine.strategy_runner as old_strategy_runner @@ -16,7 +16,7 @@ def test_legacy_and_new_nested_runtime_modules_share_identity() -> None: assert old_debug_strategy is new_debug_strategy -def test_legacy_and_new_runtime_symbols_share_identity() -> None: +def test_runtime_symbols_share_identity_across_import_sites() -> None: with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) from core_runtime.backtest.engine.strategy_runner import HftStrategyRunner as OldRunner diff --git a/tests/test_dummy.py b/tests/test_dummy.py index 1baea3f..cbb29c5 100644 --- a/tests/test_dummy.py +++ b/tests/test_dummy.py @@ -27,7 +27,7 @@ def test_exception(): def test_package_import(): """ - Optional: verify that core_runtime and trading_runtime can be imported. + Optional: verify that core_runtime can be imported. Remove if not needed. """ try: From d5050b9608b0abdaa1ca4edc87e7753da0b4f331 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 22:39:36 +0000 Subject: [PATCH 29/36] m2 p9 s5.2: rewrite README that matches the current architecture and naming --- README.md | 446 +++++++++++++++++------------------------------------- 1 file changed, 139 insertions(+), 307 deletions(-) diff --git a/README.md b/README.md index 6dba2e4..d717dd6 100644 --- a/README.md +++ b/README.md @@ -1,440 +1,272 @@ -# Core Runtime +# TradingChassis — Core Runtime ![CI](https://github.com/TradingChassis/core-runtime/actions/workflows/tests.yaml/badge.svg) ![Python](https://img.shields.io/badge/python-3.11+-blue) ![License](https://img.shields.io/badge/license-MIT-green) -Runtime execution layer and orchestration environment for the -[Core (`tradingchassis-core`)](https://github.com/TradingChassis/core). +Execution and orchestration environment around Core. -This repository provides: - -- Local execution examples -- Reproducible runtime environments -- Dependency pinning -- [Kubernetes](https://kubernetes.io)-native orchestration via [Argo Workflows](https://argoproj.github.io/workflows) -- CI-integrated build pipelines +Core Runtime consumes Core (`tradingchassis_core`) and provides local/cluster entrypoints, +configuration, adapter integration, runtime packaging, and reproducible execution workflows. --- -## 🧠 What is this? - -`core-runtime` is the execution and orchestration layer built on top of `tradingchassis-core`. - -While Core (`tradingchassis-core`) implements the deterministic trading core, -this repository focuses on: +## Overview -- how strategies are executed -- how environments are reproduced -- how workloads are orchestrated -- how results are produced and validated +Core Runtime is the runtime layer for executing Core semantics in concrete environments. -It intentionally contains no domain framework logic. +- local hftbacktest-backed backtest execution +- runtime entrypoints for orchestration flows +- reproducible dependency/runtime packaging +- CI and infrastructure wiring for deployment workflows --- -## 🧩 Relationship to Core - -``` -tradingchassis-core → core framework, backtesting engine, domain logic -tradingchassis-core-runtime → executing entrypoints, runtime configs, orchestration -``` - -The framework is consumed as a pinned Git dependency to guarantee -deterministic runtime environments. - ---- +## What Core Runtime is -## 🏷 Naming Clarification +Core Runtime provides: -- Repository/folder name in the monorepo can be `core-runtime`. -- Python import package in this repository is `core_runtime`. -- Distribution/project name in packaging metadata is `tradingchassis-core-runtime`. -- Core package canonical import is `tradingchassis_core`. -- Core distribution/project name is `tradingchassis-core`. +- executable runtime entrypoints (`core_runtime/...`) +- runtime configs and environment wiring +- adapter-facing integration layers around Core +- orchestration integration (Argo/Kubernetes) +- runtime validation and smoke/test workflows --- -## 📁 Repository Structure +## What Core Runtime is not -``` -.github/workflows/ CI pipelines (tests, Argo template deploy) -argo/ Argo workflow templates -docs/ Runtime design notes (implementation-facing) -examples/ Alternative example runner/config/strategy paths -scripts/ environment & build helper scripts -core_runtime/ Python runtime entrypoints -tests/ deterministic test data & validation -``` - -Implementation-facing design notes: +Core Runtime is not the semantic source of truth for Core concepts. -- `docs/venue-adapter-abstraction-design-v1.md` - -### Key runtime modules - -``` -core_runtime/local/ Local execution mode -core_runtime/argo/ Argo workflow entrypoints -core_runtime/strategies/ Example strategies -``` +It consumes Core and should not redefine canonical terms such as Event, Event Stream, Processing +Order, State, or Risk Engine. --- -## 🚀 Quick Start / Development Setup +## Current local hftbacktest usability status -### Monorepo workspace layout (recommended for current development) - -If your workspace root contains sibling repositories (for example `core/` and -`core-runtime/`), run tests from the workspace root: +Current local smoke is usable from the `core-runtime` repository root: ```bash -python -m pytest -q core-runtime/tests -python -m pytest -q core/tests/semantics +python -m core_runtime.local.backtest --config core_runtime/local/local.json ``` -Optional editable installs from workspace root: +Default output location: -```bash -python -m pip install -e core -python -m pip install -e core-runtime +```text +.runtime/local/results/ ``` -### Standalone `core-runtime` repo root +This confirms current local usability and does not claim full canonical Event Stream completion. + +--- + +## Quick start From the `core-runtime` repository root: ```bash python -m pip install -e . -python -m pytest -q tests -./scripts/check.sh +python -m core_runtime.local.backtest --config core_runtime/local/local.json ``` -If `tradingchassis_core` is not already available in your environment, install -`core` as a sibling editable package or ensure the pinned dependency resolves. +If `tradingchassis_core` is not already resolvable in your environment, install `core` as a +sibling editable package in a monorepo workspace: -`PYTHONPATH=.` can be used as a short-term development shortcut, but editable -installation (`python -m pip install -e .`) is the preferred workflow. +```bash +python -m pip install -e ../core +``` --- -## 🗺 Entrypoint Matrix +## Entrypoint matrix | Mode | Entrypoint | Command shape | Notes | | --- | --- | --- | --- | | Local backtest | `core_runtime/local/backtest.py` | `python -m core_runtime.local.backtest --config core_runtime/local/local.json` | Main local runner. | | Argo plan/run orchestration | `core_runtime/backtest/runtime/entrypoint.py` | `python -m core_runtime.backtest.runtime.entrypoint --config core_runtime/argo/argo.json --plan` | Planner and sweep-context emitter for Argo flow. | -| Sweep worker | `core_runtime/backtest/runtime/run_sweep.py` | `python -m core_runtime.backtest.runtime.run_sweep --context ` | Executes one sweep context (pod-level unit). | -| Examples path | `examples/local/backtest.py` | `python examples/local/backtest.py --config examples/local/local.json` | Alternative example path; useful for reference but duplicates runtime patterns. | - -Use `core_runtime/local/*` for local runtime development, `core_runtime/backtest/runtime/*` -for Argo workflow execution, and `examples/*` as a duplicate reference path. +| Sweep worker | `core_runtime/backtest/runtime/run_sweep.py` | `python -m core_runtime.backtest.runtime.run_sweep --context ` | Executes one sweep context. | +| Examples path | `examples/local/backtest.py` | `python examples/local/backtest.py --config examples/local/local.json` | Reference path; duplicates runtime patterns. | --- -## ⚠️ Local Config Path Caveat +## Adapter capability model -Current shipped local JSON configs use cwd-relative paths for -`tests/data/...` inputs and `.runtime/...` outputs. - -Supported workflow: run local commands from the `core-runtime` repository root. -If you run from a different cwd, adjust config paths accordingly. +| Capability area | Status | Notes | +| --- | --- | --- | +| Canonical runtime paths | Active | `MarketEvent`, `OrderSubmittedEvent`, `ControlTimeEvent` | +| Compatibility paths | Active | Post-submission order/fill progression via snapshots, `OrderStateEvent`, and `DerivedFillEvent` | +| Deferred capabilities | Deferred | Runtime `FillEvent` ingress, `ExecutionFeedbackRecordSource`, replay/storage/Event Stream persistence, `ProcessingContext` | --- -## ✅ Current usability status (local hftbacktest path) - -The current local backtest path is verified and usable from the `core-runtime` -repository root. +## Current hftbacktest capability map -Verified local workflow: - -```bash -python -m pip install -e . -python -m core_runtime.local.backtest --config core_runtime/local/local.json -``` - -Verified output location: - -``` -.runtime/local/results/events.json -.runtime/local/results/stats.npz -``` - -Verified tests: - -```bash -python -m pytest -q tests -python -m pytest -q core-runtime/tests -python -m pytest -q core/tests/semantics -``` - -Current caveats: - -- Paths are cwd-relative; supported workflow is running from `core-runtime` root. -- hftbacktest timestamp-ordering warnings may appear from fixture ordering but do not fail the run. -- `tests/data/results/` may contain historical/sample artifacts and is no longer the default local output location. -- Naming is aligned (`core-runtime` repo, `tradingchassis-core-runtime` distribution, `core_runtime` import package, `tradingchassis_core` core import package). - -This status confirms local usability for the current local hftbacktest path; it -does not imply full canonical Event Stream completion. +- Local hftbacktest flow is usable for current transitional runtime paths. +- Compatibility mechanisms remain in place for post-submission progression. +- Deferred capabilities are intentionally not presented as shipped runtime behavior. --- -## 📌 Current semantic status (transitional) - -`core-runtime` is currently usable as a transitional runtime around `core`: - -- canonical `MarketEvent`, `OrderSubmittedEvent`, and `ControlTimeEvent` paths are in use -- post-submission order/fill progression remains on the snapshot-compatibility path -- `FillEvent` runtime ingress remains deferred +## Canonical runtime paths -For adapter boundary context, see: - -- `docs/venue-adapter-abstraction-design-v1.md` +- `MarketEvent` +- `OrderSubmittedEvent` +- `ControlTimeEvent` --- -## 📌 Dependency Pinning & Reproducibility - -The `tradingchassis-core` dependency is pinned by commit SHA. +## Compatibility paths -Create a `.env` file: +- snapshot-based post-submission progression +- `OrderStateEvent` +- `DerivedFillEvent` -```bash -TRADINGCHASSIS_CORE_COMMIT= -``` - -Generate reproducible environments: - -```bash -./scripts/compile-requirements.sh -``` - -This produces: - -- `requirements.txt` -- `requirements-dev.txt` +--- -These files are used by: +## Deferred capabilities -- Dev Containers -- Docker images +- runtime `FillEvent` ingress +- `ExecutionFeedbackRecordSource` +- replay/storage/Event Stream persistence +- `ProcessingContext` --- -## ▶️ Local Execution +## Package and import names -Run a deterministic local backtest: +- Human-facing concept name: Core Runtime +- Distribution/project name: `tradingchassis-core-runtime` +- Python import package: `core_runtime` +- Core distribution/project name: `tradingchassis-core` +- Core Python import package: `tradingchassis_core` -```bash -python -m core_runtime.local.backtest \ - --config core_runtime/local/local.json -``` +--- -This uses synthetic deterministic test data located in: +## Repository structure +```text +.github/workflows/ CI and deployment workflows +argo/ Argo workflow templates +core_runtime/ Runtime entrypoints and execution modules +docs/ Runtime implementation notes +examples/ Example runner/config paths +scripts/ Build/validation helper scripts +tests/ Runtime tests and deterministic fixtures ``` -tests/data/parts/ -``` - -Results are written to: - -``` -.runtime/local/results/ -``` - -Important: `core_runtime/local/local.json` and `examples/local/local.json` -use cwd-relative paths. Run from the `core-runtime` repository root, or adjust -config paths for your current working directory. --- -## ⚙️ Infrastructure Requirements +## Configuration -The Argo-based workflows require: +Primary local config: -- A self-hosted GitHub Actions runner -- microk8s Kubernetes distribution (with sudo access) -- Argo Workflows installed in the cluster -- GitHub Container Registry access (GHCR_TOKEN secret) +- `core_runtime/local/local.json` -GitHub-hosted runners are only used for unit tests. -All Kubernetes orchestration runs on self-hosted infrastructure. +Note: local JSON configs use cwd-relative paths for `tests/data/...` inputs and `.runtime/...` +outputs. The supported default workflow is to run commands from the `core-runtime` repo root. --- -## ☸ Kubernetes & Argo Workflows - -This runtime is designed for Kubernetes-native execution using Argo Workflows. +## Development setup -Two core workflow templates define the execution pipeline: +### Standalone `core-runtime` root +```bash +python -m pip install -e . +python -m pytest -q tests +./scripts/check.sh ``` -argo/workflowtemplate-build-push-ghcr.yaml -argo/workflowtemplate-backtest.yaml -``` - -### 🐳 Runtime Image Build & Push - -`workflowtemplate-build-push-ghcr.yaml` builds the Core Runtime Docker image and pushes it to -GitHub Container Registry (GHCR). - -This image contains: - -- Python dependencies and entrypoints -- tradingchassis-core and core-runtime commit SHA -- strategies and configs -It acts as an immutable and deterministic runtime environment for all backtests. +### Monorepo workspace root (with `core/` and `core-runtime/`) -### ▶️ Backtest Orchestration - -`workflowtemplate-backtest.yaml` orchestrates backtest workloads using Argo. - -It: - -- pulls the runtime image from GHCR -- executes runtime entrypoints inside Kubernetes pods -- distributes workloads across the cluster -- saves deterministic result artifacts - -All backtests always run inside the runtime image. - -### 🔄 End-to-End Flow - -``` -Docker build → Push to GHCR → Argo pulls image → Backtests execute in cluster +```bash +python -m pip install -e core +python -m pip install -e core-runtime +python -m pytest -q core-runtime/tests +python -m pytest -q core/tests ``` -This guarantees: - -- identical runtime environments locally and in Kubernetes -- reproducible research runs - --- -## 🔐 GHCR Registry Access - -To allow Kubernetes to pull runtime images from GitHub Container Registry (GHCR), -the deployment workflow creates a `docker-registry` secret inside the target Kubernetes namespace. - -The secret is created by the GitHub Actions workflow located at: - -``` -.github/workflows/deploy_argo_template.yaml -``` +## Test commands -It runs the equivalent of: +From `core-runtime` root: ```bash -sudo microk8s kubectl -n $K8S_NAMESPACE create secret docker-registry ghcr-secret \ - --docker-server=ghcr.io \ - --docker-username=git \ - --docker-password=$GHCR_TOKEN \ - --dry-run=client -o yaml | sudo microk8s kubectl apply -f - -``` - -### Required Repository Secret - -The workflow requires a GitHub repository secret named: - -``` -GHCR_TOKEN +python -m pytest -q tests +./scripts/check.sh ``` -This token must be a GitHub Personal Access Token with: - -* `read:packages` +From monorepo root: -Add it under: - -``` -Repository → Settings → Secrets and variables → Actions +```bash +python -m pytest -q core-runtime/tests +python -m pytest -q core/tests ``` -Without this secret, the workflow cannot authenticate against GHCR, and Kubernetes will fail to pull the runtime image. - --- -## 🛠 Scripts +## Relationship to Core -| Script | Purpose | -| ------------------------- | ----------------------------------------------- | -| `compile-requirements.sh` | Pins tradingchassis-core and resolves dependencies | -| `post-create.sh` | Dev container bootstrap | -| `check.sh` | Local validation helpers | +Core provides deterministic semantics and domain contracts. ---- +Core Runtime provides execution environments and orchestration around those semantics. -## 🧪 Test Data +--- -Synthetic datasets are provided in: +## Dependency pinning and reproducibility -``` -tests/data/parts/ -``` +Core dependency can be pinned by commit SHA through environment configuration: -Historical/sample result artifacts may exist in: - -``` -tests/data/results/ +```bash +TRADINGCHASSIS_CORE_COMMIT= ``` -Default local backtest outputs are now written to: +To compile reproducible requirements: -``` -.runtime/local/results/ +```bash +./scripts/compile-requirements.sh ``` -Helper generation scripts: +Artifacts: -``` -tests/data/scripts/ -``` - -These guarantee reproducible runtime validation. +- `requirements.txt` +- `requirements-dev.txt` --- -## 🧪 CI & Automation +## Infrastructure notes -GitHub Actions workflows: +Argo/Kubernetes workflows are defined in: -- `tests.yaml` — runtime validation -- `deploy_argo_template.yaml` — Argo template deployment +- `argo/workflowtemplate-build-push-ghcr.yaml` +- `argo/workflowtemplate-backtest.yaml` -Supports both GitHub-hosted and self-hosted runners respectively. +Deployment automation is in: ---- - -## 🎯 Design Principles - -- Determinism over convenience -- Reproducible environments -- Explicit execution entrypoints -- Infrastructure separated from domain logic -- Cloud-native orchestration +- `.github/workflows/deploy_argo_template.yaml` --- -## 📌 Scope +## Scripts -This repository includes: +| Script | Purpose | +| --- | --- | +| `compile-requirements.sh` | Resolves dependencies and pins Core revision inputs | +| `post-create.sh` | Dev container bootstrap | +| `check.sh` | Local validation helpers | -- runtime execution logic -- environment orchestration -- CI pipelines -- container workflows +--- -It does not include: +## Documentation index -- Core internals -- specific strategy research logic +- Runtime adapter design: `docs/venue-adapter-abstraction-design-v1.md` +- Shared terminology source of truth: `docs/docs/00-guides/terminology.md` +- Core library scope: `core/README.md` --- -## 🏷️ Versioning +## License and versioning -This project follows the MIT license and semantic versioning. -Initial public release: `v0.1.0` +MIT licensed. Versioning follows semantic versioning. From c79e51bf13c3f495326b14da013789b2cfd92acd Mon Sep 17 00:00:00 2001 From: bxvtr Date: Sun, 3 May 2026 22:46:35 +0000 Subject: [PATCH 30/36] m2 p9 s5.3: recompile requirements --- requirements-dev.txt | 20 ++++++++++---------- requirements.txt | 12 ++++++------ 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index c5c6b54..c771f30 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -95,7 +95,7 @@ gunicorn==23.0.0 h11==0.16.0 # via uvicorn hftbacktest==2.4.4 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) holoviews==1.22.1 # via hftbacktest huey==2.6.0 @@ -105,7 +105,7 @@ idna==3.11 # anyio # requests import-linter==1.12.1 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) importlib-metadata==8.7.1 # via # mlflow-skinny @@ -151,13 +151,13 @@ mdit-py-plugins==0.5.0 mdurl==0.1.2 # via markdown-it-py mlflow==3.9.0 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) mlflow-skinny==3.9.0 # via mlflow mlflow-tracing==3.9.0 # via mlflow mypy==1.19.1 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) mypy-extensions==1.1.0 # via mypy narwhals==2.16.0 @@ -182,7 +182,7 @@ numpy==2.2.6 # scipy # skops oci==2.167.1 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) opentelemetry-api==1.39.1 # via # mlflow-skinny @@ -238,7 +238,7 @@ polars-runtime-32==1.38.1 prettytable==3.17.0 # via skops prometheus-client==0.24.1 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) protobuf==6.33.5 # via # databricks-sdk @@ -260,7 +260,7 @@ pydantic==2.12.5 # fastapi # mlflow-skinny # mlflow-tracing - # trading-framework + # tradingchassis-core pydantic-core==2.41.5 # via pydantic pygments==2.19.2 @@ -270,7 +270,7 @@ pyopenssl==25.1.0 pyparsing==3.3.2 # via matplotlib pytest==9.0.2 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) python-dateutil==2.9.0.post0 # via # graphene @@ -301,7 +301,7 @@ requests==2.32.5 rsa==4.9.1 # via google-auth ruff==0.15.1 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) scikit-learn==1.8.0 # via # mlflow @@ -331,7 +331,7 @@ tornado==6.5.4 # via bokeh tqdm==4.67.3 # via panel -trading-framework @ git+https://github.com/TradingChassis/core.git@632067b470e18b2c636008d08281551412a8ac3a +tradingchassis-core @ git+https://github.com/TradingChassis/core.git@09b364cd00c424b1756cd181c3fee6ab33a04827 # via -r _git_deps.in typing-extensions==4.15.0 # via diff --git a/requirements.txt b/requirements.txt index 46e0e95..b8a479e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -92,7 +92,7 @@ gunicorn==23.0.0 h11==0.16.0 # via uvicorn hftbacktest==2.4.4 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) holoviews==1.22.1 # via hftbacktest huey==2.6.0 @@ -142,7 +142,7 @@ mdit-py-plugins==0.5.0 mdurl==0.1.2 # via markdown-it-py mlflow==3.9.0 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) mlflow-skinny==3.9.0 # via mlflow mlflow-tracing==3.9.0 @@ -169,7 +169,7 @@ numpy==2.2.6 # scipy # skops oci==2.167.1 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) opentelemetry-api==1.39.1 # via # mlflow-skinny @@ -220,7 +220,7 @@ polars-runtime-32==1.38.1 prettytable==3.17.0 # via skops prometheus-client==0.24.1 - # via trading-runtime (pyproject.toml) + # via tradingchassis-core-runtime (pyproject.toml) protobuf==6.33.5 # via # databricks-sdk @@ -242,7 +242,7 @@ pydantic==2.12.5 # fastapi # mlflow-skinny # mlflow-tracing - # trading-framework + # tradingchassis-core pydantic-core==2.41.5 # via pydantic pyopenssl==25.1.0 @@ -307,7 +307,7 @@ tornado==6.5.4 # via bokeh tqdm==4.67.3 # via panel -trading-framework @ git+https://github.com/TradingChassis/core.git@632067b470e18b2c636008d08281551412a8ac3a +tradingchassis-core @ git+https://github.com/TradingChassis/core.git@09b364cd00c424b1756cd181c3fee6ab33a04827 # via -r _git_deps.in typing-extensions==4.15.0 # via From 126523ae297ee2813b4a162bc5384bb88a5fe491 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Tue, 5 May 2026 10:52:20 +0000 Subject: [PATCH 31/36] m2 p10 s3: Docker validation and build-context ignore rules --- .dockerignore | 8 ++++++++ requirements-dev.txt | 2 +- requirements.txt | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/.dockerignore b/.dockerignore index 2a8ced1..c995f3b 100644 --- a/.dockerignore +++ b/.dockerignore @@ -52,6 +52,9 @@ site/ # ============================== # Test cache / reports # ============================== +.ruff_cache/ +.mypy_cache/ +.import_linter_cache/ pytest_cache/ .tox/ .nox/ @@ -59,6 +62,11 @@ pytest_cache/ .coverage* coverage.xml +# ============================== +# Local runtime outputs +# ============================== +.runtime/ + # ============================== # Random OS files # ============================== diff --git a/requirements-dev.txt b/requirements-dev.txt index c771f30..f98bc2b 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -331,7 +331,7 @@ tornado==6.5.4 # via bokeh tqdm==4.67.3 # via panel -tradingchassis-core @ git+https://github.com/TradingChassis/core.git@09b364cd00c424b1756cd181c3fee6ab33a04827 +tradingchassis-core @ git+https://github.com/TradingChassis/core.git@10b09aac06881b0f8f942e08104a09d86ba2d2ca # via -r _git_deps.in typing-extensions==4.15.0 # via diff --git a/requirements.txt b/requirements.txt index b8a479e..d3c302f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -307,7 +307,7 @@ tornado==6.5.4 # via bokeh tqdm==4.67.3 # via panel -tradingchassis-core @ git+https://github.com/TradingChassis/core.git@09b364cd00c424b1756cd181c3fee6ab33a04827 +tradingchassis-core @ git+https://github.com/TradingChassis/core.git@10b09aac06881b0f8f942e08104a09d86ba2d2ca # via -r _git_deps.in typing-extensions==4.15.0 # via From 58cff1ba5456702e4957c340796e36162dab85f1 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Tue, 5 May 2026 11:09:30 +0000 Subject: [PATCH 32/36] m2 p10 s3.1: fix build destination --- argo/run-backtest.yaml | 2 +- argo/workflowtemplate-backtest-fanout.yaml | 2 +- argo/workflowtemplate-build-push-ghcr.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/argo/run-backtest.yaml b/argo/run-backtest.yaml index 6ccfd01..f57894f 100644 --- a/argo/run-backtest.yaml +++ b/argo/run-backtest.yaml @@ -8,7 +8,7 @@ spec: arguments: parameters: - name: image_repo - value: "ghcr.io/trading-engineering/tradingchassis-core-runtime" + value: "ghcr.io/tradingchassis/tradingchassis-core-runtime" - name: image_tag value: "${IMAGE_TAG}" - name: experiment_config diff --git a/argo/workflowtemplate-backtest-fanout.yaml b/argo/workflowtemplate-backtest-fanout.yaml index 6c5deb2..c87b7a5 100644 --- a/argo/workflowtemplate-backtest-fanout.yaml +++ b/argo/workflowtemplate-backtest-fanout.yaml @@ -16,7 +16,7 @@ spec: parameters: - name: image_repo description: "Container image repo" - value: ghcr.io/trading-engineering/tradingchassis-core-runtime + value: ghcr.io/tradingchassis/tradingchassis-core-runtime - name: image_tag description: "Container image tag to run (recommended: commit SHA)" diff --git a/argo/workflowtemplate-build-push-ghcr.yaml b/argo/workflowtemplate-build-push-ghcr.yaml index 2439683..b73c59b 100644 --- a/argo/workflowtemplate-build-push-ghcr.yaml +++ b/argo/workflowtemplate-build-push-ghcr.yaml @@ -7,7 +7,7 @@ spec: arguments: parameters: - name: image_repo - value: ghcr.io/trading-engineering/tradingchassis-core-runtime + value: ghcr.io/tradingchassis/tradingchassis-core-runtime - name: git_branch description: "Branch name (for tagging only)" From 01af55af9527a9d5589f543013f59a5283a715ad Mon Sep 17 00:00:00 2001 From: bxvtr Date: Tue, 5 May 2026 12:29:25 +0000 Subject: [PATCH 33/36] m2 p10 s4: improve and refactor argo templates --- .../argo-launchers}/run-backtest.yaml | 2 +- .../argo-launchers}/run-build.yaml | 0 .../workflows/argo-build-and-backtest.yaml | 8 +- README.md | 68 ++- .../workflowtemplate-backtest-fanout.yaml | 2 +- .../workflowtemplate-build-push-ghcr.yaml | 23 +- tests/data/results/events.json | 412 ------------------ tests/data/results/stats.npz | Bin 2459 -> 0 bytes 8 files changed, 88 insertions(+), 427 deletions(-) rename {argo => .github/argo-launchers}/run-backtest.yaml (86%) rename {argo => .github/argo-launchers}/run-build.yaml (100%) rename argo/{ => templates}/workflowtemplate-backtest-fanout.yaml (99%) rename argo/{ => templates}/workflowtemplate-build-push-ghcr.yaml (73%) delete mode 100644 tests/data/results/events.json delete mode 100644 tests/data/results/stats.npz diff --git a/argo/run-backtest.yaml b/.github/argo-launchers/run-backtest.yaml similarity index 86% rename from argo/run-backtest.yaml rename to .github/argo-launchers/run-backtest.yaml index f57894f..ac695dd 100644 --- a/argo/run-backtest.yaml +++ b/.github/argo-launchers/run-backtest.yaml @@ -8,7 +8,7 @@ spec: arguments: parameters: - name: image_repo - value: "ghcr.io/tradingchassis/tradingchassis-core-runtime" + value: "ghcr.io/tradingchassis/core-runtime" - name: image_tag value: "${IMAGE_TAG}" - name: experiment_config diff --git a/argo/run-build.yaml b/.github/argo-launchers/run-build.yaml similarity index 100% rename from argo/run-build.yaml rename to .github/argo-launchers/run-build.yaml diff --git a/.github/workflows/argo-build-and-backtest.yaml b/.github/workflows/argo-build-and-backtest.yaml index 79eb6a8..57c6bae 100644 --- a/.github/workflows/argo-build-and-backtest.yaml +++ b/.github/workflows/argo-build-and-backtest.yaml @@ -68,8 +68,8 @@ jobs: - name: Apply Argo WorkflowTemplates run: | sudo microk8s kubectl -n "${{ needs.resolve-context.outputs.namespace }}" apply \ - -f argo/workflowtemplate-build-push-ghcr.yaml \ - -f argo/workflowtemplate-backtest-fanout.yaml + -f argo/templates/workflowtemplate-build-push-ghcr.yaml \ + -f argo/templates/workflowtemplate-backtest-fanout.yaml build-and-backtest: runs-on: self-hosted @@ -88,7 +88,7 @@ jobs: export GIT_BRANCH="${{ needs.resolve-context.outputs.branch }}" BUILD_NAME=$( - envsubst < argo/run-build.yaml | \ + envsubst < .github/argo-launchers/run-build.yaml | \ sudo microk8s kubectl -n "${{ needs.resolve-context.outputs.namespace }}" create -f - -o jsonpath='{.metadata.name}' ) @@ -137,7 +137,7 @@ jobs: run: | export IMAGE_TAG="${GITHUB_SHA}" - envsubst < argo/run-backtest.yaml | \ + envsubst < .github/argo-launchers/run-backtest.yaml | \ sudo microk8s kubectl -n "${{ needs.resolve-context.outputs.namespace }}" create -f - echo "Submitted backtest workflow in namespace: ${{ needs.resolve-context.outputs.namespace }}" diff --git a/README.md b/README.md index d717dd6..b7e6707 100644 --- a/README.md +++ b/README.md @@ -147,7 +147,8 @@ python -m pip install -e ../core ```text .github/workflows/ CI and deployment workflows -argo/ Argo workflow templates +.github/argo-launchers/ Argo Workflow submit wrappers used by GitHub Actions +argo/templates/ Argo WorkflowTemplates shown in Argo UI core_runtime/ Runtime entrypoints and execution modules docs/ Runtime implementation notes examples/ Example runner/config paths @@ -238,14 +239,69 @@ Artifacts: ## Infrastructure notes -Argo/Kubernetes workflows are defined in: +Argo WorkflowTemplates (visible in Argo UI) are defined in: -- `argo/workflowtemplate-build-push-ghcr.yaml` -- `argo/workflowtemplate-backtest.yaml` +- `argo/templates/workflowtemplate-build-push-ghcr.yaml` +- `argo/templates/workflowtemplate-backtest-fanout.yaml` -Deployment automation is in: +GitHub-only Argo submit wrappers are in: -- `.github/workflows/deploy_argo_template.yaml` +- `.github/argo-launchers/run-build.yaml` +- `.github/argo-launchers/run-backtest.yaml` + +Automation that applies templates and starts workflows is in: + +- `.github/workflows/argo-build-and-backtest.yaml` + +### Argo UI usage + +Use this model to avoid confusion: + +- `argo/templates/*`: reusable `WorkflowTemplate` definitions that appear in the Argo UI. +- `.github/argo-launchers/*`: one-off `Workflow` manifests used by GitHub Actions with `envsubst`. + +Namespace intent: + +- `dev`: branch and development runs. +- `prod`: main branch and production-like runs. + +#### Build image from Argo UI (`build-push-ghcr`) + +Template: `build-push-ghcr` + +Recommended parameters: + +- `git_repo`: keep default `https://github.com/TradingChassis/core-runtime.git`. +- `image_repo`: keep default `ghcr.io/tradingchassis/core-runtime`. +- `git_branch`: set the branch name for tagging (default `main`). +- `core_runtime_commit`: set to a real commit SHA (required). + +Guardrails: + +- `core_runtime_commit` must be a 7-40 character hex SHA. +- `git_repo` must be an HTTPS URL ending in `.git`. + +Tagging behavior: + +- always pushes `:` +- always pushes `:` +- also pushes `:latest` when `git_branch=main` + +#### Run backtest from Argo UI (`backtest-fanout`) + +Template: `backtest-fanout` + +Recommended parameters: + +- `image_repo`: keep default. +- `image_tag`: set to the exact commit SHA built by `build-push-ghcr` for reproducibility. +- `experiment_config`: keep default unless intentionally testing a different in-image config. +- `scratch_root`: keep default `/mnt/scratch`. + +Guardrails: + +- prefer commit SHA tags for `prod` runs. +- use mutable tags such as `latest` only for quick smoke checks. --- diff --git a/argo/workflowtemplate-backtest-fanout.yaml b/argo/templates/workflowtemplate-backtest-fanout.yaml similarity index 99% rename from argo/workflowtemplate-backtest-fanout.yaml rename to argo/templates/workflowtemplate-backtest-fanout.yaml index c87b7a5..a6b16d7 100644 --- a/argo/workflowtemplate-backtest-fanout.yaml +++ b/argo/templates/workflowtemplate-backtest-fanout.yaml @@ -16,7 +16,7 @@ spec: parameters: - name: image_repo description: "Container image repo" - value: ghcr.io/tradingchassis/tradingchassis-core-runtime + value: ghcr.io/tradingchassis/core-runtime - name: image_tag description: "Container image tag to run (recommended: commit SHA)" diff --git a/argo/workflowtemplate-build-push-ghcr.yaml b/argo/templates/workflowtemplate-build-push-ghcr.yaml similarity index 73% rename from argo/workflowtemplate-build-push-ghcr.yaml rename to argo/templates/workflowtemplate-build-push-ghcr.yaml index b73c59b..8ff4b69 100644 --- a/argo/workflowtemplate-build-push-ghcr.yaml +++ b/argo/templates/workflowtemplate-build-push-ghcr.yaml @@ -6,8 +6,12 @@ spec: entrypoint: build arguments: parameters: + - name: git_repo + description: "Git repo used as Kaniko build context" + value: https://github.com/TradingChassis/core-runtime.git + - name: image_repo - value: ghcr.io/tradingchassis/tradingchassis-core-runtime + value: ghcr.io/tradingchassis/core-runtime - name: git_branch description: "Branch name (for tagging only)" @@ -26,6 +30,7 @@ spec: set -euo pipefail IMAGE_REPO="{{workflow.parameters.image_repo}}" + GIT_REPO="{{workflow.parameters.git_repo}}" GIT_BRANCH="{{workflow.parameters.git_branch}}" RUNTIME_COMMIT="{{workflow.parameters.core_runtime_commit}}" @@ -34,7 +39,19 @@ spec: exit 1 fi - GIT_REPO="github.com/${IMAGE_REPO#ghcr.io/}" + if ! echo "$RUNTIME_COMMIT" | grep -Eq '^[0-9a-fA-F]{7,40}$'; then + echo "> core_runtime_commit must be a 7-40 character hex SHA" + exit 1 + fi + + case "$GIT_REPO" in + https://*".git") + ;; + *) + echo "> git_repo must be an HTTPS URL ending with .git" + exit 1 + ;; + esac SAFE_BRANCH=$(echo "$GIT_BRANCH" | tr '/' '-' | tr '[:upper:]' '[:lower:]') @@ -50,7 +67,7 @@ spec: done /kaniko/executor \ - --context=git://$GIT_REPO.git#$RUNTIME_COMMIT \ + --context="$GIT_REPO#$RUNTIME_COMMIT" \ --dockerfile=Dockerfile \ --target=runtime \ $DESTINATIONS \ diff --git a/tests/data/results/events.json b/tests/data/results/events.json deleted file mode 100644 index afb305a..0000000 --- a/tests/data/results/events.json +++ /dev/null @@ -1,412 +0,0 @@ -{"event": "RiskDecisionEvent(ts_ns_local=1723161256101000000, accepted=6, queued=0, rejected=0, handled=0, reject_reasons={})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state=None, next_state='working')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='working')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='working')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1078790734324421344', prev_state=None, next_state='working')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='working')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='7581400325422276892', prev_state=None, next_state='working')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256201000000, accepted=0, queued=0, rejected=0, handled=6, reject_reasons={})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state='working', next_state='expired')"} -{"event": "DerivedFillEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', side='buy', delta_qty=0.08, cum_qty=0.08, price=59999.9)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state='working', next_state='expired')"} -{"event": "DerivedFillEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', side='sell', delta_qty=0.01, cum_qty=0.01, price=60000.100000000006)"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256301000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161256310000000, instrument='BTC_USDC-PERPETUAL', exposure=1200.0000000000002, delta_exposure=1200.0000000000002)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='expired')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256401000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256501000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256601000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256701000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='expired')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256801000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "DerivedPnLEvent(ts_ns_local=1723161256810000000, instrument='BTC_USDC-PERPETUAL', delta_pnl=0.0009999999999763531, cum_realized_pnl=0.0009999999999763531)"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161256810000000, instrument='BTC_USDC-PERPETUAL', exposure=599.9999999999997, delta_exposure=-600.0000000000006)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161256901000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257001000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257101000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257201000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257301000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257401000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257501000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257601000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257701000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257801000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161257901000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258001000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258101000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258201000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258301000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258401000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258501000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258601000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258701000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258801000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161258901000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259001000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259101000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259201000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259301000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259401000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "DerivedPnLEvent(ts_ns_local=1723161259410000000, instrument='BTC_USDC-PERPETUAL', delta_pnl=-0.0004999999999881766, cum_realized_pnl=0.0004999999999881766)"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161259410000000, instrument='BTC_USDC-PERPETUAL', exposure=599.9994999999997, delta_exposure=-0.0004999999999881766)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state='working', next_state='expired')"} -{"event": "DerivedFillEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', side='buy', delta_qty=0.02, cum_qty=0.02, price=59999.8)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259501000000, accepted=2, queued=0, rejected=3, handled=1, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 3})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='expired')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259601000000, accepted=0, queued=0, rejected=3, handled=3, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 3})"} -{"event": "DerivedPnLEvent(ts_ns_local=1723161259610000000, instrument='BTC_USDC-PERPETUAL', delta_pnl=0.012000000000284672, cum_realized_pnl=0.012500000000272848)"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161259610000000, instrument='BTC_USDC-PERPETUAL', exposure=5399.9955, delta_exposure=4799.996)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259701000000, accepted=0, queued=0, rejected=3, handled=3, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 3})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259801000000, accepted=0, queued=0, rejected=3, handled=3, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 3})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state='working', next_state='expired')"} -{"event": "DerivedFillEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', side='sell', delta_qty=0.02, cum_qty=0.02, price=60000.100000000006)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161259901000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state=None, next_state='expired')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260001000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260101000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260201000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "DerivedPnLEvent(ts_ns_local=1723161260210000000, instrument='BTC_USDC-PERPETUAL', delta_pnl=0.007500000000163709, cum_realized_pnl=0.020000000000436557)"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161260210000000, instrument='BTC_USDC-PERPETUAL', exposure=2399.9979999999996, delta_exposure=-2999.9975000000004)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260301000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260401000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260501000000, accepted=4, queued=0, rejected=0, handled=2, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260601000000, accepted=6, queued=0, rejected=0, handled=0, reject_reasons={})"} -{"event": "DerivedPnLEvent(ts_ns_local=1723161260621000000, instrument='BTC_USDC-PERPETUAL', delta_pnl=0.00400000000036016, cum_realized_pnl=0.024000000000796717)"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161260621000000, instrument='BTC_USDC-PERPETUAL', exposure=2400.002, delta_exposure=0.00400000000036016)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260701000000, accepted=2, queued=2, rejected=0, handled=2, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260751000000, accepted=1, queued=1, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260801000000, accepted=1, queued=3, rejected=1, handled=2, reject_reasons={'ORDER_NOT_FOUND': 1})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260851000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260901000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161260951000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261001000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261051000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261101000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261151000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261201000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261251000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261301000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261351000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261401000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261451000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261501000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261551000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261601000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261651000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261701000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261751000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261801000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261851000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261901000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161261951000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262001000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262051000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262101000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262151000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262201000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262251000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262301000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262351000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262401000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262451000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262501000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262551000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262601000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262651000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262701000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262751000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262801000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262851000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262901000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161262951000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263001000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263051000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263101000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263151000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263201000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263251000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263301000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263351000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263401000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263451000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263501000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263551000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263601000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263651000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263701000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263751000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263801000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263851000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263901000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161263951000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264001000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264051000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264101000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264151000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264201000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264251000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264301000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264351000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264401000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264451000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264501000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264551000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264601000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264651000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264701000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264751000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264801000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264851000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264901000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161264951000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265001000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265051000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265101000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265151000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265201000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265251000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265301000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265351000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265401000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265451000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265501000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265551000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265601000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265651000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265701000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265751000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265801000000, accepted=1, queued=3, rejected=2, handled=2, reject_reasons={'ORDER_NOT_FOUND': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265851000000, accepted=1, queued=2, rejected=0, handled=0, reject_reasons={})"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "DerivedPnLEvent(ts_ns_local=1723161265851000000, instrument='BTC_USDC-PERPETUAL', delta_pnl=0.020000000000436557, cum_realized_pnl=0.044000000001233275)"} -{"event": "ExposureDerivedEvent(ts_ns_local=1723161265851000000, instrument='BTC_USDC-PERPETUAL', exposure=7200.006, delta_exposure=4800.004000000001)"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161259501000000, instrument='BTC_USDC-PERPETUAL', client_order_id='4218267878951418420', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='1478678295735644481', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='9220587067321093634', prev_state=None, next_state='filled')"} -{"event": "OrderStateTransitionEvent(ts_ns_local=1723161256101000000, instrument='BTC_USDC-PERPETUAL', client_order_id='3670455356658261962', prev_state=None, next_state='filled')"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=2, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=2, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=2, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=4, handled=2, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 4})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=2, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} -{"event": "RiskDecisionEvent(ts_ns_local=1723161265860000000, accepted=0, queued=0, rejected=2, handled=4, reject_reasons={'MAX_GROSS_QUOTE_NOTIONAL': 2})"} diff --git a/tests/data/results/stats.npz b/tests/data/results/stats.npz deleted file mode 100644 index da56e6210e9e1cfefbedc6212ec6715ee079611c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2459 zcmb`}eOwIt9>DQUme{h99&l=RbHb5Y6sw|rI!@@N_Ig@{mO3b<4k31Hx93jeHYXij zDx0{%!J$T8{T% zFc?!5pux0$NqTW--tVv(j51rAn{5I#heGZlP+Lw)s znr`=a&7P3_%gZioE$ymn=jP0dJZigo^U`~<-1nZZZc6fZo-5s8IrG-$q6aL;h+~Uy zNGhx{EOJ(6&k==BfBm!Uxa};ys8wmI z>YuxROxZ3?oK&sbQ{pcOI5u=rccw+G#yGFJ(8IrxJ!(@+Z?rGcu+bhG+Cj1_C-$F7 z)aK~p8<{)SE625}+{#2_MS^;n(CHTIokzd2B&_CE6d&b-D=r1k(13wYhqI$i+duq0 zyZfYGPA)%}a37z2xvHsdw&x~sjqPDZ-&LqSYt%c=y)|HgXR7ikKS-hvkM(JNylbOq ztT<|;3|Y99HSC6jrH|&Q;-DPuSU zzb}hBEZ>zoB=)Aw4N{BY8iqO;(zz2illRcP{%L+vaXAJtb_ z_4TiHk;N&S>_&0EAgHgm?O2ceQR5|7@>6#I$M=u2)st0q-AA7GgzY5gY zr^|x*7mbIuWDlK85GE_{9_ii#w`r4=JK#l(zK##6C7=>D{<~4=?uFKkb@Y(rnhk5~ z0yN?)9;e@y-87PB^Ye??)sfn$^HpKClc5gl;WuYiU6dowB>Im>IyY--N=ify^Ge%- z1)?$H&G_@W_?mocylVJ60Fmo$u-b-GJZP9dC{p*l+Xg81?PsF}4ucuQo!KD(RQr;F z7VeMdDyoGXOlouxftpNvOz#oELz1>kMg0ob@D%1}t6`3^+!|9=_5#Q?A4|Mmw@IWv z_|^v&>TgWOSZz;5>MD&hS|}HYz)u2>7;G<_gryY-q~LXQCaUs`Ng;5Se_CR|14kgv z6yt{)$@dOuLVaI4Q8l9rKt@ie0L6FxDMWLCLaH`HPzuqNlmtM-cNu6uIO>Akzn#fL z$Y)llsS|iO718gV-^_~Ft<6frclw}gfj<-1SC(_3bB`urh3?uE;xxO6M;>?)4`|SI z7P_00kU<>i9|sFu)=AXp-IV~RsL~0O?^H@aLqjhxxaV_fV%-J>tj(*KhM8=}p$TwkSB z*25XdAmHu=F!Ga;f$I96q!2ruabQrXXJEylR{&ve&pCkX`ll;Ac#XeubgrQ-Tvd2? zlUwta0mZiQxS-HM>hA}so3$JvVlG_cb13NDx)n{q5rZ3h8g-X_C)D4_-Fli?$f|t& zOZU1>y-fVs8U?)DfRv6^EVjU2;MI}B*EJkeS3N-l_Q|*?-^Xb@e)P(5ps@j2X}Vdw z10eT?E<=&k*~!Go?`=?lQz@5>$(e!$mWK0$@~c@yPW>l9`%Rt|-oD)qjSDTDj7Jky zQn2mv0a$+Rxdk47XE7I1hs;6Q%J*r6OLYtky&H8w6{iYVxZ|5|Qi=6JSvRQTKsOB2a4ZfeQ|95UUZx5g1K?Ds=b$W$DF8H-5G*w(K8=ABJ{Gg^D*lr+0(6@U4~C8`N2@(Y+)%;%P9F3+ zgbnkPc1=PJJ`2)_E@6@dwzfsjQ|vamqFLta)37nfk&D!7mY}s{3mYpQoG1eC_}OAv zu}rag@c3~6>2?^0*SFo{LZkbap<2+%0?#-&6K(`;nfM#_K_G6q9SiSUuY}oZp$nE# z`aNu1;4L;2!V3~GqTvu+T_2Q1n4dTUNTru0*74*Nfc)b&8*@K#lLxhqxS)o*Hxl!} zTPXl7!4{(Z_gFk(Ov@tJh{&vQ|CG5vSmY5+xpQ;**tbn9I)u}@%Ew%(;LySQ-(b@_ z9BPN0`u~r3lMp@ALGp~9X7(+^XAQifnv@RQ8sqG5{9puBv6`&Tc#{uB8fjufk!Xr2 zXfl%`jWo%mh!;h)6j57KL_rZmLJ@n4lu?9Cr-&OxI5e@Mh>#{?ir7$OH%(G0Vo(2t zOOf#u;m|}x5hg|E(j=K8O!_Z0VcaqdkSA0FW`8OWI+jxb(cw#lmX2jqXy{lT^%wT_3wTV!kOf+d^RWEuOhu1P^a#cLIz4^4CtJ|VmkCQ2 S$echwSWu@IwXXXzW&H>GfiL|4 From 19f911408376f5f929c6de00089113513dc3012e Mon Sep 17 00:00:00 2001 From: bxvtr Date: Tue, 5 May 2026 12:42:30 +0000 Subject: [PATCH 34/36] m2 p10 s4.1: template fix --- argo/templates/workflowtemplate-build-push-ghcr.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/argo/templates/workflowtemplate-build-push-ghcr.yaml b/argo/templates/workflowtemplate-build-push-ghcr.yaml index 8ff4b69..8c613b4 100644 --- a/argo/templates/workflowtemplate-build-push-ghcr.yaml +++ b/argo/templates/workflowtemplate-build-push-ghcr.yaml @@ -66,8 +66,10 @@ spec: DESTINATIONS="$DESTINATIONS --destination=$TAG" done + export GIT_PULL_METHOD=https + /kaniko/executor \ - --context="$GIT_REPO#$RUNTIME_COMMIT" \ + --context="git://${GIT_REPO#https://}#$RUNTIME_COMMIT" \ --dockerfile=Dockerfile \ --target=runtime \ $DESTINATIONS \ From d77296f0c667bcb1639b9db4dca89fbf87144800 Mon Sep 17 00:00:00 2001 From: bxvtr Date: Tue, 5 May 2026 14:06:45 +0000 Subject: [PATCH 35/36] m2 p10 s5: inform about removal of unnecessary mlflow S3 configs --- README.md | 25 +++++++++ examples/argo/argo.json | 119 ---------------------------------------- 2 files changed, 25 insertions(+), 119 deletions(-) delete mode 100644 examples/argo/argo.json diff --git a/README.md b/README.md index b7e6707..430b2c6 100644 --- a/README.md +++ b/README.md @@ -303,6 +303,31 @@ Guardrails: - prefer commit SHA tags for `prod` runs. - use mutable tags such as `latest` only for quick smoke checks. +### Backtest storage vs MLflow tracking + +Core Runtime and MLflow serve different purposes in cluster runs: + +- Backtest output artifacts are written by Core Runtime directly to OCI Object Storage. +- MLflow is used for tracking metadata only (params, metrics, tags), not for artifact files. + +Backtest artifact storage path: + +- bucket: `data` +- prefix: `backtests//...` +- auth mode: OCI Instance Principals (IAM policy controlled) + +Code anchors: + +- backtest result download/upload pipeline: `core_runtime/backtest/runtime/run_sweep.py` +- OCI Object Storage adapter + auth behavior: `core_runtime/backtest/io/s3_adapter.py` +- MLflow tracking logger (no artifact logging): `core_runtime/backtest/runtime/mlflow_segment_logger.py` + +Tracking-only policy: + +- MLflow run metadata remains in the backend store. +- MLflow artifact storage is intentionally unsupported in this setup. +- If a client starts calling artifact APIs (for example `mlflow.log_artifact(...)`), treat failures as expected until artifact storage is intentionally added. + --- ## Scripts diff --git a/examples/argo/argo.json b/examples/argo/argo.json deleted file mode 100644 index 1f26301..0000000 --- a/examples/argo/argo.json +++ /dev/null @@ -1,119 +0,0 @@ -{ - "id": "debug_strategy_v0", - "description": "Debug Strategy V0", - - "engine": { - "initial_snapshot": null, - "data_files": null, - - "instrument": "BTC_USDC-PERPETUAL", - "tick_size": 0.1, - "lot_size": 0.01, - "contract_size": 1, - - "maker_fee_rate": 0.0, - "taker_fee_rate": 0.0, - - "entry_latency_ns": 10000000, - "response_latency_ns": 10000000, - - "use_risk_adverse_queue_model": true, - "partial_fill_venue": true, - - "max_steps": 5000000, - - "last_trades_capacity": 10, - "max_price_tick_levels": 20, - - "roi_lb": 40000, - "roi_ub": 80000, - - "stats_npz_path": null, - "event_bus_path": null - }, - - "risk": { - "scope": "debug_strategy_v0", - - "position_limits": { - "currency": "USDC", - "max_position": 10 - }, - - "notional_limits": { - "currency": "USDC", - "max_gross_notional": 200000.0, - "max_single_order_notional": 10000.0 - }, - - "quote_limits": { - "currency": "USDC", - "max_gross_quote_notional": 20000.0, - "max_net_quote_notional": 10000.0, - "max_active_quotes": 20000 - }, - - "order_rate_limits": { - "max_orders_per_second": 20, - "max_cancels_per_second": 20 - }, - - "max_loss": { - "currency": "USDC", - "max_drawdown": -2000.0, - "rolling_loss": -200.0, - "rolling_loss_window": 60 - }, - - "extra": { - "venue_policy": { - "min_order_notional": 5.0, - "post_only_mode": "reject" - } - } - }, - - "strategy": { - "class_path": "examples.strategies.debug_strategy:DebugStrategyV0", - "spread": 5.0, - "order_qty": 0.1, - "use_price_tick_levels": 3, - "post_only": true - }, - - "core": { - "version": "v1", - "market": { - "instruments": { - "BTC_USDC-PERPETUAL": { - "tick_size": 0.1, - "lot_size": 0.01, - "contract_size": 1 - } - } - } - }, - - "experiment": { - "start_ts_ns": 1636035200000000000, - "end_ts_ns": 1836121600000000000, - "symbol": "BTC_USDC-PERPETUAL", - - "venue": "deribit", - "datatype": "mixed", - - "segmentation": { - "max_segment_gb": 0.00001 - }, - - "sweeps": { - "strategy.spread": { - "start": 2.0, - "stop": 3.0, - "step": 1.0 - }, - "strategy.order_qty": [0.1, 0.2] - } - } - -} From 223302be435be177654a6b6171e21e2dbb2df59b Mon Sep 17 00:00:00 2001 From: bxvtr Date: Tue, 5 May 2026 14:17:42 +0000 Subject: [PATCH 36/36] m2 p10 s6: remove redundant examples --- README.md | 3 +- .../local/oci.config.example | 0 examples/__init__.py | 0 examples/local/__init__.py | 0 examples/local/backtest.py | 89 --------- examples/local/local.json | 101 ---------- examples/strategies/__init__.py | 0 examples/strategies/debug_strategy.py | 184 ------------------ 8 files changed, 1 insertion(+), 376 deletions(-) rename {examples => core_runtime}/local/oci.config.example (100%) delete mode 100644 examples/__init__.py delete mode 100644 examples/local/__init__.py delete mode 100644 examples/local/backtest.py delete mode 100644 examples/local/local.json delete mode 100644 examples/strategies/__init__.py delete mode 100644 examples/strategies/debug_strategy.py diff --git a/README.md b/README.md index 430b2c6..fd066af 100644 --- a/README.md +++ b/README.md @@ -86,7 +86,6 @@ python -m pip install -e ../core | Local backtest | `core_runtime/local/backtest.py` | `python -m core_runtime.local.backtest --config core_runtime/local/local.json` | Main local runner. | | Argo plan/run orchestration | `core_runtime/backtest/runtime/entrypoint.py` | `python -m core_runtime.backtest.runtime.entrypoint --config core_runtime/argo/argo.json --plan` | Planner and sweep-context emitter for Argo flow. | | Sweep worker | `core_runtime/backtest/runtime/run_sweep.py` | `python -m core_runtime.backtest.runtime.run_sweep --context ` | Executes one sweep context. | -| Examples path | `examples/local/backtest.py` | `python examples/local/backtest.py --config examples/local/local.json` | Reference path; duplicates runtime patterns. | --- @@ -151,7 +150,6 @@ python -m pip install -e ../core argo/templates/ Argo WorkflowTemplates shown in Argo UI core_runtime/ Runtime entrypoints and execution modules docs/ Runtime implementation notes -examples/ Example runner/config paths scripts/ Build/validation helper scripts tests/ Runtime tests and deterministic fixtures ``` @@ -163,6 +161,7 @@ tests/ Runtime tests and deterministic fixtures Primary local config: - `core_runtime/local/local.json` +- OCI config template (for local object storage auth setups): `core_runtime/local/oci.config.example` Note: local JSON configs use cwd-relative paths for `tests/data/...` inputs and `.runtime/...` outputs. The supported default workflow is to run commands from the `core-runtime` repo root. diff --git a/examples/local/oci.config.example b/core_runtime/local/oci.config.example similarity index 100% rename from examples/local/oci.config.example rename to core_runtime/local/oci.config.example diff --git a/examples/__init__.py b/examples/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/examples/local/__init__.py b/examples/local/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/examples/local/backtest.py b/examples/local/backtest.py deleted file mode 100644 index 9c7db70..0000000 --- a/examples/local/backtest.py +++ /dev/null @@ -1,89 +0,0 @@ -"""Command-line interface for running backtests in devcontainer.""" - -from __future__ import annotations - -import argparse -import json -import sys -from pathlib import Path -from typing import TYPE_CHECKING - -# Enable importing plugin-style modules outside the core package (e.g. examples/) -if __name__ == "__main__" or True: - PROJECT_ROOT = Path(__file__).resolve().parents[2] - sys.path.insert(0, str(PROJECT_ROOT)) - -if TYPE_CHECKING: - from core_runtime.backtest.engine.engine_base import BacktestResult - -from tradingchassis_core.core.risk.risk_config import RiskConfig -from tradingchassis_core.strategies.strategy_config import StrategyConfig -from core_runtime.backtest.engine.hft_engine import ( - HftBacktestConfig, - HftBacktestEngine, - HftEngineConfig, -) -from core_runtime.backtest.runtime.core_configuration_mapper import ( - build_core_configuration_from_run_config, -) - - -def load_config(path: str) -> HftBacktestConfig: - """Load a backtest configuration from a JSON file.""" - config_path = Path(path) - raw_json = json.loads(config_path.read_text(encoding="utf-8")) - - try: - engine_raw = raw_json["engine"] - strategy_raw = raw_json["strategy"] - risk_raw = raw_json["risk"] - except KeyError as exc: - raise ValueError( - f"Missing top-level section in {config_path}: {exc}" - ) from exc - - engine_cfg = HftEngineConfig(**engine_raw) - strategy_cfg = StrategyConfig(**strategy_raw) - risk_cfg = RiskConfig(**risk_raw) - core_cfg = build_core_configuration_from_run_config(raw_json) - - return HftBacktestConfig( - id=raw_json["id"], - description=raw_json.get("description", ""), - engine_cfg=engine_cfg, - strategy_cfg=strategy_cfg, - risk_cfg=risk_cfg, - core_cfg=core_cfg, - ) - - -def main() -> None: - """Entry point for the backtest command-line interface.""" - parser = argparse.ArgumentParser( - description="Run a strategy-based hftbacktest backtest." - ) - parser.add_argument( - "--config", - type=str, - required=True, - help="Path to JSON config file (HftBacktestConfig).", - ) - args = parser.parse_args() - - cfg = load_config(args.config) - engine = HftBacktestEngine(cfg) - - print("Backtest started.") - result: BacktestResult = engine.run() - - print("Backtest finished.") - print(f" id: {result.id}") - print(f" stats_npz: {result.stats_file}") - if result.extra_metadata is not None: - print(" metadata:") - for key, value in result.extra_metadata.items(): - print(f" {key}: {value}") - - -if __name__ == "__main__": - main() diff --git a/examples/local/local.json b/examples/local/local.json deleted file mode 100644 index 3fedc31..0000000 --- a/examples/local/local.json +++ /dev/null @@ -1,101 +0,0 @@ -{ - "id": "debug_strategy_v0", - "description": "Debug Strategy V0", - - "engine": { - "initial_snapshot": null, - "data_files": [ - "tests/data/parts/part-000.npz", - "tests/data/parts/part-001.npz", - "tests/data/parts/part-002.npz" - ], - - "instrument": "BTC_USDC-PERPETUAL", - "tick_size": 0.1, - "lot_size": 0.01, - "contract_size": 1, - - "maker_fee_rate": 0.0, - "taker_fee_rate": 0.0, - - "entry_latency_ns": 10000000, - "response_latency_ns": 10000000, - - "use_risk_adverse_queue_model": true, - "partial_fill_venue": true, - - "max_steps": 5000000, - - "last_trades_capacity": 10, - "max_price_tick_levels": 20, - - "roi_lb": 40000, - "roi_ub": 80000, - - "stats_npz_path": ".runtime/local/results/stats.npz", - "event_bus_path": ".runtime/local/results/events.json" - }, - - "risk": { - "scope": "debug_strategy_v0", - - "position_limits": { - "currency": "USDC", - "max_position": 10 - }, - - "notional_limits": { - "currency": "USDC", - "max_gross_notional": 200000.0, - "max_single_order_notional": 10000.0 - }, - - "quote_limits": { - "currency": "USDC", - "max_gross_quote_notional": 20000.0, - "max_net_quote_notional": 10000.0, - "max_active_quotes": 20000 - }, - - "order_rate_limits": { - "max_orders_per_second": 20, - "max_cancels_per_second": 20 - }, - - "max_loss": { - "currency": "USDC", - "max_drawdown": -2000.0, - "rolling_loss": -200.0, - "rolling_loss_window": 60 - }, - - "extra": { - "venue_policy": { - "min_order_notional": 5.0, - "post_only_mode": "reject" - } - } - }, - - "strategy": { - "class_path": "examples.strategies.debug_strategy:DebugStrategyV0", - "spread": 5.0, - "order_qty": 0.1, - "use_price_tick_levels": 3, - "post_only": true - }, - - "core": { - "version": "v1", - "market": { - "instruments": { - "BTC_USDC-PERPETUAL": { - "tick_size": 0.1, - "lot_size": 0.01, - "contract_size": 1 - } - } - } - } - -} diff --git a/examples/strategies/__init__.py b/examples/strategies/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/examples/strategies/debug_strategy.py b/examples/strategies/debug_strategy.py deleted file mode 100644 index 09b5e98..0000000 --- a/examples/strategies/debug_strategy.py +++ /dev/null @@ -1,184 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from tradingchassis_core import ( - EngineContext, - GateDecision, - MarketEvent, - RiskConstraints, - StrategyState, - ) - -from tradingchassis_core import ( - NewOrderIntent, - OrderIntent, - Price, - Quantity, - ReplaceOrderIntent, - SlotKey, - Strategy, - stable_slot_order_id, -) - -_SLOT_NAMESPACE = "debug_strategy_v0" - - -class DebugStrategyV0(Strategy): - """Very simple market making example strategy.""" - - def __init__( - self, - spread: float, - order_qty: float, - use_price_tick_levels: int, - post_only: bool, - ) -> None: - self.spread = spread - self.order_qty = order_qty - self.use_price_tick_levels = use_price_tick_levels - self.post_only = post_only - - self.intents_on_event: list[OrderIntent] = [] - self.intents_after_risk: list[OrderIntent] = [] - - def round_to_tick(self, price: float, tick: float) -> float: - if tick <= 0: - raise ValueError("tick must be positive") - return round(price / tick) * tick - - def on_feed( - self, - state: StrategyState, - event: MarketEvent, - engine_cfg: EngineContext, - constraints: RiskConstraints, - ) -> list[OrderIntent]: - """Feed-triggered logic (rc=2). Inputs are read-only for Strategy, otherwise considered a bug.""" - - self.intents_on_event = [] - - # NOTE: keep existing logic as-is for now; we will align field names/types later. - # This block is only to satisfy the new interface. - if not constraints.trading_enabled: - return self.intents_on_event - - if not event.is_book() or event.book is None: - return self.intents_on_event - - if not event.book.bids or not event.book.asks: - return self.intents_on_event - - best_bid = float(event.book.bids[0].price.value) - best_ask = float(event.book.asks[0].price.value) - mid = 0.5 * (best_bid + best_ask) - - tick = float(engine_cfg.tick_size) - tif = "POST_ONLY" if self.post_only else "GTC" - - num_levels = int(self.use_price_tick_levels) - if num_levels <= 0: - num_levels = 1 - - instrument = str(event.instrument) - - def is_slot_busy(client_order_id: str) -> bool: - return state.is_order_id_busy(instrument, client_order_id) - - def bid_price_for_level(level_index: int) -> float: - if level_index < len(event.book.bids): - px = float(event.book.bids[level_index].price.value) - else: - px = mid - (self.spread * 0.5) - (float(level_index) * tick) - return self.round_to_tick(px, tick) - - def ask_price_for_level(level_index: int) -> float: - if level_index < len(event.book.asks): - px = float(event.book.asks[level_index].price.value) - else: - px = mid + (self.spread * 0.5) + (float(level_index) * tick) - return self.round_to_tick(px, tick) - - intents: list[OrderIntent] = [] - - for level in range(num_levels): - bid_slot = SlotKey(instrument=instrument, side="buy", level_index=level) - ask_slot = SlotKey(instrument=instrument, side="sell", level_index=level) - - bid_id = stable_slot_order_id(bid_slot, namespace=_SLOT_NAMESPACE) - ask_id = stable_slot_order_id(ask_slot, namespace=_SLOT_NAMESPACE) - - bid_px = bid_price_for_level(level) - ask_px = ask_price_for_level(level) - - if is_slot_busy(bid_id): - intents.append( - ReplaceOrderIntent( - ts_ns_local=event.ts_ns_local, - instrument=instrument, - client_order_id=bid_id, - intent_type="replace", - order_type="limit", - side="buy", - intended_price=Price(currency="UNKNOWN", value=bid_px), - intended_qty=Quantity(value=self.order_qty, unit="contracts"), - ) - ) - else: - intents.append( - NewOrderIntent( - ts_ns_local=event.ts_ns_local, - instrument=instrument, - client_order_id=bid_id, - intent_type="new", - order_type="limit", - side="buy", - intended_price=Price(currency="UNKNOWN", value=bid_px), - intended_qty=Quantity(value=self.order_qty, unit="contracts"), - time_in_force=tif, - ) - ) - - if is_slot_busy(ask_id): - intents.append( - ReplaceOrderIntent( - ts_ns_local=event.ts_ns_local, - instrument=instrument, - client_order_id=ask_id, - intent_type="replace", - order_type="limit", - side="sell", - intended_price=Price(currency="UNKNOWN", value=ask_px), - intended_qty=Quantity(value=self.order_qty, unit="contracts"), - ) - ) - else: - intents.append( - NewOrderIntent( - ts_ns_local=event.ts_ns_local, - instrument=instrument, - client_order_id=ask_id, - intent_type="new", - order_type="limit", - side="sell", - intended_price=Price(currency="UNKNOWN", value=ask_px), - intended_qty=Quantity(value=self.order_qty, unit="contracts"), - time_in_force=tif, - ) - ) - - self.intents_on_event.extend(intents) - return self.intents_on_event - - def on_order_update( - self, - state: StrategyState, - engine_cfg: EngineContext, - constraints: RiskConstraints, - ) -> list[OrderIntent]: - """Order-update-triggered logic (rc=3). Inputs are read-only for Strategy, otherwise considered a bug.""" - return [] - - def on_risk_decision(self, decision: GateDecision) -> None: - self.intents_after_risk = decision.accepted_now