diff --git a/images/chromium-headful/run-docker.sh b/images/chromium-headful/run-docker.sh index aad41331..75d36b7e 100755 --- a/images/chromium-headful/run-docker.sh +++ b/images/chromium-headful/run-docker.sh @@ -71,6 +71,14 @@ if [[ -n "${PLAYWRIGHT_ENGINE:-}" ]]; then RUN_ARGS+=( -e PLAYWRIGHT_ENGINE="$PLAYWRIGHT_ENGINE" ) fi +# S2 durable event storage +if [[ -n "${S2_BASIN:-}" ]]; then + RUN_ARGS+=( -e S2_BASIN="$S2_BASIN" ) +fi +if [[ -n "${S2_ACCESS_TOKEN:-}" ]]; then + RUN_ARGS+=( -e S2_ACCESS_TOKEN="$S2_ACCESS_TOKEN" ) +fi + # WebRTC port mapping if [[ "${ENABLE_WEBRTC:-}" == "true" ]]; then echo "Running container with WebRTC" diff --git a/images/chromium-headful/supervisor/services/kernel-images-api.conf b/images/chromium-headful/supervisor/services/kernel-images-api.conf index e57d30a8..9385101e 100644 --- a/images/chromium-headful/supervisor/services/kernel-images-api.conf +++ b/images/chromium-headful/supervisor/services/kernel-images-api.conf @@ -1,5 +1,5 @@ [program:kernel-images-api] -command=/bin/bash -lc 'mkdir -p "${KERNEL_IMAGES_API_OUTPUT_DIR:-/recordings}" && PORT="${KERNEL_IMAGES_API_PORT:-10001}" FRAME_RATE="${KERNEL_IMAGES_API_FRAME_RATE:-10}" DISPLAY_NUM="${KERNEL_IMAGES_API_DISPLAY_NUM:-${DISPLAY_NUM:-1}}" MAX_SIZE_MB="${KERNEL_IMAGES_API_MAX_SIZE_MB:-500}" OUTPUT_DIR="${KERNEL_IMAGES_API_OUTPUT_DIR:-/recordings}" LOG_CDP_MESSAGES="${LOG_CDP_MESSAGES:-false}" exec /usr/local/bin/kernel-images-api' +command=/bin/bash -lc 'mkdir -p "${KERNEL_IMAGES_API_OUTPUT_DIR:-/recordings}" && PORT="${KERNEL_IMAGES_API_PORT:-10001}" FRAME_RATE="${KERNEL_IMAGES_API_FRAME_RATE:-10}" DISPLAY_NUM="${KERNEL_IMAGES_API_DISPLAY_NUM:-${DISPLAY_NUM:-1}}" MAX_SIZE_MB="${KERNEL_IMAGES_API_MAX_SIZE_MB:-500}" OUTPUT_DIR="${KERNEL_IMAGES_API_OUTPUT_DIR:-/recordings}" LOG_CDP_MESSAGES="${LOG_CDP_MESSAGES:-false}" S2_BASIN="${S2_BASIN:-}" S2_ACCESS_TOKEN="${S2_ACCESS_TOKEN:-}" exec /usr/local/bin/kernel-images-api' autostart=false autorestart=true startsecs=2 diff --git a/images/chromium-headless/image/supervisor/services/kernel-images-api.conf b/images/chromium-headless/image/supervisor/services/kernel-images-api.conf index e57d30a8..9385101e 100644 --- a/images/chromium-headless/image/supervisor/services/kernel-images-api.conf +++ b/images/chromium-headless/image/supervisor/services/kernel-images-api.conf @@ -1,5 +1,5 @@ [program:kernel-images-api] -command=/bin/bash -lc 'mkdir -p "${KERNEL_IMAGES_API_OUTPUT_DIR:-/recordings}" && PORT="${KERNEL_IMAGES_API_PORT:-10001}" FRAME_RATE="${KERNEL_IMAGES_API_FRAME_RATE:-10}" DISPLAY_NUM="${KERNEL_IMAGES_API_DISPLAY_NUM:-${DISPLAY_NUM:-1}}" MAX_SIZE_MB="${KERNEL_IMAGES_API_MAX_SIZE_MB:-500}" OUTPUT_DIR="${KERNEL_IMAGES_API_OUTPUT_DIR:-/recordings}" LOG_CDP_MESSAGES="${LOG_CDP_MESSAGES:-false}" exec /usr/local/bin/kernel-images-api' +command=/bin/bash -lc 'mkdir -p "${KERNEL_IMAGES_API_OUTPUT_DIR:-/recordings}" && PORT="${KERNEL_IMAGES_API_PORT:-10001}" FRAME_RATE="${KERNEL_IMAGES_API_FRAME_RATE:-10}" DISPLAY_NUM="${KERNEL_IMAGES_API_DISPLAY_NUM:-${DISPLAY_NUM:-1}}" MAX_SIZE_MB="${KERNEL_IMAGES_API_MAX_SIZE_MB:-500}" OUTPUT_DIR="${KERNEL_IMAGES_API_OUTPUT_DIR:-/recordings}" LOG_CDP_MESSAGES="${LOG_CDP_MESSAGES:-false}" S2_BASIN="${S2_BASIN:-}" S2_ACCESS_TOKEN="${S2_ACCESS_TOKEN:-}" exec /usr/local/bin/kernel-images-api' autostart=false autorestart=true startsecs=2 diff --git a/images/chromium-headless/run-docker.sh b/images/chromium-headless/run-docker.sh index 56f582bf..677ef780 100755 --- a/images/chromium-headless/run-docker.sh +++ b/images/chromium-headless/run-docker.sh @@ -24,6 +24,14 @@ if [[ -n "${PLAYWRIGHT_ENGINE:-}" ]]; then RUN_ARGS+=( -e PLAYWRIGHT_ENGINE="$PLAYWRIGHT_ENGINE" ) fi +# S2 durable event storage +if [[ -n "${S2_BASIN:-}" ]]; then + RUN_ARGS+=( -e S2_BASIN="$S2_BASIN" ) +fi +if [[ -n "${S2_ACCESS_TOKEN:-}" ]]; then + RUN_ARGS+=( -e S2_ACCESS_TOKEN="$S2_ACCESS_TOKEN" ) +fi + # If a positional argument is given, use it as the entrypoint ENTRYPOINT_ARG=() if [[ $# -ge 1 && -n "$1" ]]; then diff --git a/server/cmd/api/api/api.go b/server/cmd/api/api/api.go index 84164b26..e5a0b3d6 100644 --- a/server/cmd/api/api/api.go +++ b/server/cmd/api/api/api.go @@ -10,7 +10,6 @@ import ( "sync" "time" - "github.com/kernel/kernel-images/server/lib/capturesession" "github.com/kernel/kernel-images/server/lib/cdpmonitor" "github.com/kernel/kernel-images/server/lib/devtoolsproxy" "github.com/kernel/kernel-images/server/lib/events" @@ -82,8 +81,7 @@ type ApiService struct { xvfbResizeMu sync.Mutex // CDP event pipeline and cdpMonitor. - eventStream *events.EventStream - captureSession *capturesession.CaptureSession + captureSession *events.CaptureSession cdpMonitor cdpMonitorController monitorMu sync.Mutex lifecycleCtx context.Context @@ -92,14 +90,14 @@ type ApiService struct { var _ oapi.StrictServerInterface = (*ApiService)(nil) +// New constructs an ApiService. func New( recordManager recorder.RecordManager, factory recorder.FFmpegRecorderFactory, upstreamMgr *devtoolsproxy.UpstreamManager, stz scaletozero.Controller, nekoAuthClient *nekoclient.AuthClient, - captureSession *capturesession.CaptureSession, - eventStream *events.EventStream, + captureSession *events.CaptureSession, displayNum int, ) (*ApiService, error) { switch { @@ -113,16 +111,14 @@ func New( return nil, fmt.Errorf("nekoAuthClient cannot be nil") case captureSession == nil: return nil, fmt.Errorf("captureSession cannot be nil") - case eventStream == nil: - return nil, fmt.Errorf("eventStream cannot be nil") } - mon := cdpmonitor.New(upstreamMgr, captureSession.Publish, displayNum, slog.Default()) + mon := cdpmonitor.New(upstreamMgr, func(ev events.Event) { captureSession.Publish(ev) }, displayNum, slog.Default()) ctx, cancel := context.WithCancel(context.Background()) return &ApiService{ - recordManager: recordManager, - factory: factory, + recordManager: recordManager, + factory: factory, defaultRecorderID: "default", watches: make(map[string]*fsWatch), procs: make(map[string]*processHandle), @@ -130,7 +126,6 @@ func New( stz: stz, nekoAuthClient: nekoAuthClient, policy: &policy.Policy{}, - eventStream: eventStream, captureSession: captureSession, cdpMonitor: mon, lifecycleCtx: ctx, @@ -358,6 +353,7 @@ func (s *ApiService) Shutdown(ctx context.Context) error { s.lifecycleCancel() s.cdpMonitor.Stop() s.captureSession.Stop() + _ = s.captureSession.Close() s.monitorMu.Unlock() return s.recordManager.StopAll(ctx) } diff --git a/server/cmd/api/api/api_test.go b/server/cmd/api/api/api_test.go index 02835ab1..cb5e9afc 100644 --- a/server/cmd/api/api/api_test.go +++ b/server/cmd/api/api/api_test.go @@ -11,7 +11,6 @@ import ( "log/slog" - "github.com/kernel/kernel-images/server/lib/capturesession" "github.com/kernel/kernel-images/server/lib/devtoolsproxy" "github.com/kernel/kernel-images/server/lib/events" "github.com/kernel/kernel-images/server/lib/nekoclient" @@ -27,7 +26,7 @@ func TestApiService_StartRecording(t *testing.T) { t.Run("success", func(t *testing.T) { mgr := recorder.NewFFmpegManager() - svc, err := newSvc(t, mgr) + svc, err := New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), newCaptureSession(t), 0) require.NoError(t, err) resp, err := svc.StartRecording(ctx, oapi.StartRecordingRequestObject{}) @@ -41,7 +40,7 @@ func TestApiService_StartRecording(t *testing.T) { t.Run("already recording", func(t *testing.T) { mgr := recorder.NewFFmpegManager() - svc, err := newSvc(t, mgr) + svc, err := New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), newCaptureSession(t), 0) require.NoError(t, err) // First start should succeed @@ -56,7 +55,7 @@ func TestApiService_StartRecording(t *testing.T) { t.Run("custom ids don't collide", func(t *testing.T) { mgr := recorder.NewFFmpegManager() - svc, err := newSvc(t, mgr) + svc, err := New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), newCaptureSession(t), 0) require.NoError(t, err) for i := 0; i < 5; i++ { @@ -89,7 +88,7 @@ func TestApiService_StopRecording(t *testing.T) { t.Run("no active recording", func(t *testing.T) { mgr := recorder.NewFFmpegManager() - svc, err := newSvc(t, mgr) + svc, err := New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), newCaptureSession(t), 0) require.NoError(t, err) resp, err := svc.StopRecording(ctx, oapi.StopRecordingRequestObject{}) @@ -102,7 +101,7 @@ func TestApiService_StopRecording(t *testing.T) { rec := &mockRecorder{id: "default", isRecordingFlag: true} require.NoError(t, mgr.RegisterRecorder(ctx, rec), "failed to register recorder") - svc, err := newSvc(t, mgr) + svc, err := New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), newCaptureSession(t), 0) require.NoError(t, err) resp, err := svc.StopRecording(ctx, oapi.StopRecordingRequestObject{}) require.NoError(t, err) @@ -117,7 +116,7 @@ func TestApiService_StopRecording(t *testing.T) { force := true req := oapi.StopRecordingRequestObject{Body: &oapi.StopRecordingJSONRequestBody{ForceStop: &force}} - svc, err := newSvc(t, mgr) + svc, err := New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), newCaptureSession(t), 0) require.NoError(t, err) resp, err := svc.StopRecording(ctx, req) require.NoError(t, err) @@ -131,7 +130,7 @@ func TestApiService_DownloadRecording(t *testing.T) { t.Run("not found", func(t *testing.T) { mgr := recorder.NewFFmpegManager() - svc, err := newSvc(t, mgr) + svc, err := New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), newCaptureSession(t), 0) require.NoError(t, err) resp, err := svc.DownloadRecording(ctx, oapi.DownloadRecordingRequestObject{}) require.NoError(t, err) @@ -151,7 +150,7 @@ func TestApiService_DownloadRecording(t *testing.T) { rec := &mockRecorder{id: "default", isRecordingFlag: true, recordingData: randomBytes(minRecordingSizeInBytes - 1)} require.NoError(t, mgr.RegisterRecorder(ctx, rec), "failed to register recorder") - svc, err := newSvc(t, mgr) + svc, err := New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), newCaptureSession(t), 0) require.NoError(t, err) // will return a 202 when the recording is too small resp, err := svc.DownloadRecording(ctx, oapi.DownloadRecordingRequestObject{}) @@ -181,7 +180,7 @@ func TestApiService_DownloadRecording(t *testing.T) { rec := &mockRecorder{id: "default", recordingData: data} require.NoError(t, mgr.RegisterRecorder(ctx, rec), "failed to register recorder") - svc, err := newSvc(t, mgr) + svc, err := New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), newCaptureSession(t), 0) require.NoError(t, err) resp, err := svc.DownloadRecording(ctx, oapi.DownloadRecordingRequestObject{}) require.NoError(t, err) @@ -201,7 +200,7 @@ func TestApiService_Shutdown(t *testing.T) { rec := &mockRecorder{id: "default", isRecordingFlag: true} require.NoError(t, mgr.RegisterRecorder(ctx, rec), "failed to register recorder") - svc, err := newSvc(t, mgr) + svc, err := New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), newCaptureSession(t), 0) require.NoError(t, err) require.NoError(t, svc.Shutdown(ctx)) @@ -305,26 +304,23 @@ func newMockNekoClient(t *testing.T) *nekoclient.AuthClient { return client } -func newCaptureSession(t *testing.T) (*capturesession.CaptureSession, *events.EventStream) { +func newCaptureSession(t *testing.T) *events.CaptureSession { t.Helper() - es, err := events.NewEventStream(events.EventStreamConfig{RingCapacity: 64}) + cs, err := events.NewCaptureSession(events.CaptureSessionConfig{ + LogDir: t.TempDir(), + RingCapacity: 64, + }) if err != nil { t.Fatal(err) } - return capturesession.NewCaptureSession(es), es -} - -// newSvc constructs an ApiService with a fresh capture session and event stream. -func newSvc(t *testing.T, mgr recorder.RecordManager) (*ApiService, error) { - t.Helper() - cs, es := newCaptureSession(t) - return New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), cs, es, 0) + t.Cleanup(func() { cs.Close() }) + return cs } func TestApiService_PatchChromiumFlags(t *testing.T) { ctx := context.Background() mgr := recorder.NewFFmpegManager() - svc, err := newSvc(t, mgr) + svc, err := New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), newCaptureSession(t), 0) require.NoError(t, err) // Test with valid flags diff --git a/server/cmd/api/api/capture_session.go b/server/cmd/api/api/capture_session.go index b69ed84a..f29379f1 100644 --- a/server/cmd/api/api/capture_session.go +++ b/server/cmd/api/api/capture_session.go @@ -8,7 +8,6 @@ import ( "github.com/nrednav/cuid2" oapi "github.com/kernel/kernel-images/server/lib/oapi" - "github.com/kernel/kernel-images/server/lib/capturesession" "github.com/kernel/kernel-images/server/lib/events" "github.com/kernel/kernel-images/server/lib/logger" ) @@ -88,6 +87,9 @@ func (s *ApiService) StopCaptureSession(_ context.Context, _ oapi.StopCaptureSes // tear down asynchronously, leaving IsRunning briefly true. resp := s.buildSessionResponse() resp.Status = oapi.CaptureSessionStatusStopped + // Session cleanup (Remove on the S2 producer) happens automatically in + // EventsStorageWriter.Run when it processes the SessionEnded event, ensuring + // all pending writes are flushed before the producer is torn down. s.captureSession.Stop() return oapi.StopCaptureSession200JSONResponse(resp), nil @@ -120,26 +122,26 @@ func (s *ApiService) buildSessionResponse() oapi.CaptureSession { } // captureConfigFrom converts the optional StartCaptureSessionRequest body -// into a capturesession.CaptureConfig. -func captureConfigFrom(body *oapi.StartCaptureSessionRequest) (capturesession.CaptureConfig, error) { +// into an events.CaptureConfig. +func captureConfigFrom(body *oapi.StartCaptureSessionRequest) (events.CaptureConfig, error) { if body == nil { - return capturesession.CaptureConfig{}, nil + return events.CaptureConfig{}, nil } return captureConfigFromOAPI(body.Config) } -// captureConfigFromOAPI converts an oapi.CaptureConfig to capturesession.CaptureConfig. -func captureConfigFromOAPI(cfg *oapi.CaptureConfig) (capturesession.CaptureConfig, error) { +// captureConfigFromOAPI converts an oapi.CaptureConfig to events.CaptureConfig. +func captureConfigFromOAPI(cfg *oapi.CaptureConfig) (events.CaptureConfig, error) { if cfg == nil || cfg.Categories == nil { - return capturesession.CaptureConfig{}, nil + return events.CaptureConfig{}, nil } - out := capturesession.CaptureConfig{ + out := events.CaptureConfig{ Categories: make([]events.EventCategory, 0, len(*cfg.Categories)), } for _, c := range *cfg.Categories { cat := events.EventCategory(c) if !events.ValidCategory(cat) { - return capturesession.CaptureConfig{}, fmt.Errorf("unknown category: %q", c) + return events.CaptureConfig{}, fmt.Errorf("unknown category: %q", c) } out.Categories = append(out.Categories, cat) } diff --git a/server/cmd/api/api/capture_session_test.go b/server/cmd/api/api/capture_session_test.go index 032cbb77..769972d5 100644 --- a/server/cmd/api/api/capture_session_test.go +++ b/server/cmd/api/api/capture_session_test.go @@ -244,10 +244,11 @@ func (m *mockRecordManager) ListActiveRecorders(_ context.Context) []recorder.Re func (m *mockRecordManager) StopAll(_ context.Context) error { return nil } // newTestService builds an ApiService with minimal dependencies for capture session tests. +// The RemoveSession path (triggered by SessionEnded events via the writer's Run loop) is +// not exercised here — it lives in eventsstorage_writer_test.go. func newTestService(t *testing.T, mgr recorder.RecordManager) *ApiService { t.Helper() - cs, es := newCaptureSession(t) - svc, err := New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), cs, es, 0) + svc, err := New(mgr, newMockFactory(), newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), newCaptureSession(t), 0) require.NoError(t, err) svc.cdpMonitor = &stubCdpMonitor{} return svc @@ -258,3 +259,4 @@ type stubCdpMonitor struct{} func (s *stubCdpMonitor) Start(_ context.Context) error { return nil } func (s *stubCdpMonitor) Stop() {} func (s *stubCdpMonitor) IsRunning() bool { return false } + diff --git a/server/cmd/api/api/display_test.go b/server/cmd/api/api/display_test.go index 1ecefa3c..905a8d72 100644 --- a/server/cmd/api/api/display_test.go +++ b/server/cmd/api/api/display_test.go @@ -34,8 +34,7 @@ func testFFmpegFactory(t *testing.T, tempDir string) recorder.FFmpegRecorderFact func newTestServiceWithFactory(t *testing.T, mgr recorder.RecordManager, factory recorder.FFmpegRecorderFactory) *ApiService { t.Helper() - cs, es := newCaptureSession(t) - svc, err := New(mgr, factory, newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), cs, es, 0) + svc, err := New(mgr, factory, newTestUpstreamManager(), scaletozero.NewNoopController(), newMockNekoClient(t), newCaptureSession(t), 0) require.NoError(t, err) return svc } diff --git a/server/cmd/api/api/events.go b/server/cmd/api/api/events.go index c72ba756..ca534654 100644 --- a/server/cmd/api/api/events.go +++ b/server/cmd/api/api/events.go @@ -16,13 +16,21 @@ import ( ) // PublishEvent handles POST /events/publish. -// Injects a caller-supplied event into the event bus. Returns 400 if the event -// fails validation. +// Injects a caller-supplied event into the active capture session. Returns 400 +// if no session is active or the event fails validation. func (s *ApiService) PublishEvent(_ context.Context, req oapi.PublishEventRequestObject) (oapi.PublishEventResponseObject, error) { + if !s.captureSession.Active() { + return oapi.PublishEvent400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "no active capture session"}}, nil + } + body := req.Body if body == nil || body.Type == "" { return oapi.PublishEvent400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "type is required"}}, nil } + if body.Type == events.SessionEnded || body.Type == events.EventsDropped || body.Type == events.EventsStorageError { + return oapi.PublishEvent400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "type is reserved"}}, nil + } + ev := events.Event{Type: body.Type} ev.Ts = time.Now().UnixMicro() @@ -36,13 +44,9 @@ func (s *ApiService) PublishEvent(_ context.Context, req oapi.PublishEventReques ev.Category = events.CategorySystem } + // Enforce source.kind = KindKernelAPI so callers can't spoof the origin. + ev.Source.Kind = events.KindKernelAPI if body.Source != nil { - if body.Source.Kind != nil { - if *body.Source.Kind == oapi.KernelApi { - return oapi.PublishEvent400JSONResponse{BadRequestErrorJSONResponse: oapi.BadRequestErrorJSONResponse{Message: "source.kind kernel_api is reserved for server-generated events"}}, nil - } - ev.Source.Kind = events.SourceKind(*body.Source.Kind) - } if body.Source.Event != nil { ev.Source.Event = *body.Source.Event } @@ -60,25 +64,27 @@ func (s *ApiService) PublishEvent(_ context.Context, req oapi.PublishEventReques ev.Data = json.RawMessage(data) } - env := s.eventStream.Publish(events.Envelope{Event: ev}) + env := s.captureSession.PublishUnfiltered(ev) return publishEventOKResponse{env}, nil } // StreamEvents handles GET /events/stream. -// Opens an SSE stream of envelopes from the event bus ring buffer. +// Opens an SSE stream of envelopes from the active capture session's ring buffer. // Supports reconnection via the Last-Event-ID header. Emits a keepalive comment -// frame every 15 s when no event arrives. +// frame every 15 s when no event arrives, and exits cleanly on session_ended. func (s *ApiService) StreamEvents(ctx context.Context, req oapi.StreamEventsRequestObject) (oapi.StreamEventsResponseObject, error) { - // Default to the current seq so fresh connections only see new events. - // Seqs are process-monotonic; a Last-Event-ID from any prior session resumes correctly. - afterSeq := s.eventStream.Seq() + afterSeq := uint64(0) if id := req.Params.LastEventID; id != nil && *id != "" { - if n, err := strconv.ParseUint(*id, 10, 64); err == nil && n > 0 { + // Invalid/non-numeric values fall back to 0, replaying all events from the start. + // Note: seq is per capture session and resets on each Start(). A Last-Event-ID + // from a previous session may silently overlap with the current session's seqs. + if n, err := strconv.ParseUint(*id, 10, 64); err == nil { afterSeq = n } } - reader := s.eventStream.NewReader(afterSeq) + sessionID := s.captureSession.ID() + reader := s.captureSession.NewReader(afterSeq) pr, pw := io.Pipe() go func() { @@ -104,6 +110,21 @@ func (s *ApiService) StreamEvents(ctx context.Context, req oapi.StreamEventsRequ } if result.Dropped > 0 { + env := events.Envelope{ + CaptureSessionID: sessionID, + Seq: 0, + Event: events.Event{ + Ts: time.Now().UnixMicro(), + Type: events.EventsDropped, + Category: events.CategorySystem, + Source: events.Source{Kind: events.KindKernelAPI}, + Data: json.RawMessage(fmt.Sprintf(`{"dropped":%d}`, result.Dropped)), + }, + } + // Omit the id: field so the client's Last-Event-ID is not overwritten. + if err := writeEnvelopeFrame(pw, nil, env); err != nil { + return + } continue } @@ -111,6 +132,9 @@ func (s *ApiService) StreamEvents(ctx context.Context, req oapi.StreamEventsRequ if err := writeEnvelopeFrame(pw, &env.Seq, *env); err != nil { return } + if env.Event.Type == events.SessionEnded { + return + } } }() @@ -118,16 +142,6 @@ func (s *ApiService) StreamEvents(ctx context.Context, req oapi.StreamEventsRequ return oapi.StreamEvents200TexteventStreamResponse{Body: pr, Headers: headers}, nil } -// publishEventOKResponse serializes events.Envelope directly so the response -// is identical in shape to the SSE stream frames. -type publishEventOKResponse struct{ env events.Envelope } - -func (r publishEventOKResponse) VisitPublishEventResponse(w http.ResponseWriter) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(200) - return json.NewEncoder(w).Encode(r.env) -} - // writeEnvelopeFrame writes a single SSE frame. If seq is non-nil it is // emitted as the id: field, updating the client's Last-Event-ID. func writeEnvelopeFrame(w io.Writer, seq *uint64, env events.Envelope) error { @@ -145,3 +159,13 @@ func writeEnvelopeFrame(w io.Writer, seq *uint64, env events.Envelope) error { _, err = w.Write(buf.Bytes()) return err } + +// publishEventOKResponse serializes events.Envelope directly so the response +// is identical in shape to the SSE stream frames. +type publishEventOKResponse struct{ env events.Envelope } + +func (r publishEventOKResponse) VisitPublishEventResponse(w http.ResponseWriter) error { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(200) + return json.NewEncoder(w).Encode(r.env) +} diff --git a/server/cmd/api/api/events_test.go b/server/cmd/api/api/events_test.go index b47cdffc..9fab0deb 100644 --- a/server/cmd/api/api/events_test.go +++ b/server/cmd/api/api/events_test.go @@ -77,4 +77,21 @@ func TestEventLifecycle(t *testing.T) { stopResp, err := svc.StopCaptureSession(ctx, oapi.StopCaptureSessionRequestObject{}) require.NoError(t, err) assert.IsType(t, oapi.StopCaptureSession200JSONResponse{}, stopResp) + + // Verify session_ended arrives on the stream. + select { + case env := <-received: + assert.Equal(t, events.SessionEnded, env.Event.Type) + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for session_ended") + } + + // Verify the stream closes after session_ended. + select { + case _, open := <-received: + assert.False(t, open, "stream should be closed after session_ended") + case <-time.After(2 * time.Second): + t.Fatal("timed out waiting for stream to close") + } } + diff --git a/server/cmd/api/main.go b/server/cmd/api/main.go index 48d17351..46c06388 100644 --- a/server/cmd/api/main.go +++ b/server/cmd/api/main.go @@ -22,7 +22,6 @@ import ( serverpkg "github.com/kernel/kernel-images/server" "github.com/kernel/kernel-images/server/cmd/api/api" "github.com/kernel/kernel-images/server/cmd/config" - "github.com/kernel/kernel-images/server/lib/capturesession" "github.com/kernel/kernel-images/server/lib/chromedriverproxy" "github.com/kernel/kernel-images/server/lib/devtoolsproxy" "github.com/kernel/kernel-images/server/lib/events" @@ -51,7 +50,7 @@ func main() { // ensure ffmpeg is available mustFFmpeg() - stz := scaletozero.NewDebouncedControllerWithCooldown(scaletozero.NewUnikraftCloudController(), config.ScaleToZeroCooldown) + stz := scaletozero.NewDebouncedController(scaletozero.NewUnikraftCloudController()) r := chi.NewRouter() r.Use( chiMiddleware.Logger, @@ -93,14 +92,27 @@ func main() { } // Construct events pipeline - eventStream, err := events.NewEventStream(events.EventStreamConfig{ + captureSession, err := events.NewCaptureSession(events.CaptureSessionConfig{ + LogDir: "/var/log/kernel", RingCapacity: 1024, }) if err != nil { - slogger.Error("failed to create event stream", "err", err) + slogger.Error("failed to create capture session", "err", err) os.Exit(1) } - captureSession := capturesession.NewCaptureSession(eventStream) + + // Optionally connect S2 durable storage sink (requires S2_BASIN + S2_TOKEN). + var storageWriter *events.EventsStorageWriter + if config.S2Basin != "" && config.S2Token != "" { + s2backend, err := events.NewS2Storage(config.S2Basin, config.S2Token, + config.S2BatcherLingerMs, config.S2BatcherMaxRecs) + if err != nil { + slogger.Warn("s2 storage unavailable, running without durable event storage", "err", err) + } else { + storageWriter = events.NewEventsStorageWriter(captureSession, s2backend) + slogger.Info("s2 durable event storage enabled", "basin", config.S2Basin) + } + } apiService, err := api.New( recorder.NewFFmpegManager(), @@ -109,7 +121,6 @@ func main() { stz, nekoAuthClient, captureSession, - eventStream, config.DisplayNum, ) if err != nil { @@ -244,20 +255,63 @@ func main() { } }() + // Give the storage writer its own cancellable context so we can stop it + // independently of the signal context. This lets us call captureSession.Stop() + // (which publishes SessionEnded to the ring) before cancelling the writer, + // ensuring the writer can process and flush SessionEnded to S2. + writerCtx, writerCancel := context.WithCancel(context.Background()) + defer writerCancel() + + // Start the S2 storage writer goroutine (no-op if S2 not configured). + storageDone := make(chan struct{}) + if storageWriter != nil { + go func() { + defer close(storageDone) + storageWriter.Run(writerCtx) + }() + } else { + close(storageDone) + } + // graceful shutdown <-ctx.Done() slogger.Info("shutdown signal received") shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 10*time.Second) defer shutdownCancel() + + // Stop the active capture session now, while the storage writer is still + // alive (writerCtx not yet cancelled), so the writer can process the + // SessionEnded event and flush it to S2 before tearing down. + captureSession.Stop() + + // Cancel the writer so it exits after draining remaining events. + writerCancel() + + // Drain storage writer, close it (bounded by shutdownCtx), and shut down + // all HTTP servers in parallel so the full 10s budget is available to each. g, _ := errgroup.WithContext(shutdownCtx) + if storageWriter != nil { + g.Go(func() error { + <-storageDone + closeDone := make(chan error, 1) + go func() { closeDone <- storageWriter.Close() }() + select { + case err := <-closeDone: + if err != nil { + slogger.Error("storage writer close failed", "err", err) + } + case <-shutdownCtx.Done(): + slogger.Error("storage writer close timed out, forcing shutdown") + } + return nil + }) + } + g.Go(func() error { return srv.Shutdown(shutdownCtx) }) - g.Go(func() error { - return apiService.Shutdown(shutdownCtx) - }) g.Go(func() error { upstreamMgr.Stop() return srvDevtools.Shutdown(shutdownCtx) @@ -265,10 +319,16 @@ func main() { g.Go(func() error { return srvChromeDriver.Shutdown(shutdownCtx) }) - if err := g.Wait(); err != nil { slogger.Error("server failed to shutdown", "err", err) } + + // Close CaptureSession last, after storage is fully drained. + // apiService.Shutdown calls captureSession.Close, which must come after + // the storage writer has finished consuming events. + if err := apiService.Shutdown(shutdownCtx); err != nil { + slogger.Error("api service failed to shutdown", "err", err) + } } func mustFFmpeg() { diff --git a/server/cmd/config/config.go b/server/cmd/config/config.go index 2fdd4bdb..0fdd57f6 100644 --- a/server/cmd/config/config.go +++ b/server/cmd/config/config.go @@ -35,6 +35,15 @@ type Config struct { // DevTools proxy address passed to ChromeDriver as goog:chromeOptions.debuggerAddress. // If empty, it is derived from DevToolsProxyPort as 127.0.0.1:. DevToolsProxyAddr string `envconfig:"DEVTOOLS_PROXY_ADDR" default:""` + + // S2 durable event storage. Both fields must be non-empty to enable the sink. + S2Basin string `envconfig:"S2_BASIN" default:""` + S2Token string `envconfig:"S2_ACCESS_TOKEN" default:""` + // S2BatcherLingerMs and S2BatcherMaxRecs control the batcher's flush triggers. + // 100ms linger keeps event latency low for near-real-time replay; 50 records + // per batch keeps individual S2 append payloads small. + S2BatcherLingerMs int `envconfig:"S2_BATCHER_LINGER_MS" default:"100"` + S2BatcherMaxRecs int `envconfig:"S2_BATCHER_MAX_RECORDS" default:"50"` } // Load loads configuration from environment variables diff --git a/server/cmd/config/config_test.go b/server/cmd/config/config_test.go index d2b50291..03b35a4f 100644 --- a/server/cmd/config/config_test.go +++ b/server/cmd/config/config_test.go @@ -29,6 +29,8 @@ func TestLoad(t *testing.T) { ChromeDriverProxyPort: 9224, ChromeDriverUpstreamAddr: "127.0.0.1:9225", DevToolsProxyAddr: "127.0.0.1:9222", + S2BatcherLingerMs: 100, + S2BatcherMaxRecs: 50, }, }, { @@ -57,6 +59,8 @@ func TestLoad(t *testing.T) { ChromeDriverProxyPort: 5432, ChromeDriverUpstreamAddr: "127.0.0.1:9999", DevToolsProxyAddr: "127.0.0.1:9876", + S2BatcherLingerMs: 100, + S2BatcherMaxRecs: 50, }, }, { @@ -77,6 +81,8 @@ func TestLoad(t *testing.T) { ChromeDriverProxyPort: 9224, ChromeDriverUpstreamAddr: "127.0.0.1:9225", DevToolsProxyAddr: "10.0.0.1:1234", + S2BatcherLingerMs: 100, + S2BatcherMaxRecs: 50, }, }, { diff --git a/server/go.mod b/server/go.mod index 5b2d7ed8..1e85b105 100644 --- a/server/go.mod +++ b/server/go.mod @@ -21,6 +21,7 @@ require ( github.com/m1k1o/neko/server v0.0.0-20251008185748-46e2fc7d3866 github.com/nrednav/cuid2 v1.1.0 github.com/oapi-codegen/runtime v1.2.0 + github.com/s2-streamstore/s2-sdk-go v0.14.0 github.com/samber/lo v1.52.0 github.com/stretchr/testify v1.11.1 github.com/testcontainers/testcontainers-go v0.40.0 @@ -99,6 +100,7 @@ require ( go.opentelemetry.io/proto/otlp v1.9.0 // indirect golang.org/x/crypto v0.43.0 // indirect golang.org/x/mod v0.28.0 // indirect + golang.org/x/net v0.45.0 // indirect golang.org/x/text v0.30.0 // indirect golang.org/x/tools v0.37.0 // indirect google.golang.org/protobuf v1.36.10 // indirect diff --git a/server/go.sum b/server/go.sum index 0f5296e8..aee1b710 100644 --- a/server/go.sum +++ b/server/go.sum @@ -198,6 +198,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94 github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/s2-streamstore/s2-sdk-go v0.14.0 h1:YqAqUpyeaf6XBA2gPjIyHhVMOnClhFAd8etckUbmSE4= +github.com/s2-streamstore/s2-sdk-go v0.14.0/go.mod h1:1a+v2sGqU+s5neI8XwqRJz78ktStkR+mZH/JEi9HNSo= github.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw= github.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= diff --git a/server/lib/capturesession/capturesession.go b/server/lib/capturesession/capturesession.go index 9a7fa106..bd27ee47 100644 --- a/server/lib/capturesession/capturesession.go +++ b/server/lib/capturesession/capturesession.go @@ -29,8 +29,8 @@ type CaptureSession struct { } func NewCaptureSession(es *events.EventStream) *CaptureSession { - cats := make(map[events.EventCategory]struct{}, len(events.AllCategories)) - for _, c := range events.AllCategories { + cats := make(map[events.EventCategory]struct{}, len(events.AllCategories())) + for _, c := range events.AllCategories() { cats[c] = struct{}{} } return &CaptureSession{es: es, categories: cats} @@ -47,7 +47,7 @@ func (s *CaptureSession) Start(captureSessionID string, cfg CaptureConfig) { s.createdAt = time.Now() cats := cfg.Categories if len(cats) == 0 { - cats = events.AllCategories + cats = events.AllCategories() } s.categories = make(map[events.EventCategory]struct{}, len(cats)) for _, c := range cats { @@ -130,7 +130,7 @@ func (s *CaptureSession) UpdateConfig(cfg CaptureConfig) { defer s.mu.Unlock() cats := cfg.Categories if len(cats) == 0 { - cats = events.AllCategories + cats = events.AllCategories() } s.categories = make(map[events.EventCategory]struct{}, len(cats)) for _, c := range cats { diff --git a/server/lib/cdpmonitor/README.md b/server/lib/cdpmonitor/README.md index 9bf3a86f..817581c6 100644 --- a/server/lib/cdpmonitor/README.md +++ b/server/lib/cdpmonitor/README.md @@ -10,13 +10,13 @@ Chrome can restart independently of the monitor. When that happens, `UpstreamPro ## Event taxonomy -**CDP-derived** (1-to-1 with a CDP notification): `console_log`, `console_error`, `network_request`, `network_response`, `network_loading_failed`, `page_tab_opened`, `page_navigation`, `page_dom_content_loaded`, `page_load`, `page_layout_shift` +**CDP-derived** (1-to-1 with a CDP notification): `console_log`, `console_error`, `network_request`, `network_response`, `network_loading_failed`, `navigation`, `dom_content_loaded`, `page_load`, `layout_shift` -**Computed** (inferred from sequences of CDP events): `network_idle` (fires when in-flight requests drop to zero), `page_layout_settled` (1 s after `page_load` with no intervening layout shifts), `page_navigation_settled` (fires once `page_dom_content_loaded` and `page_layout_settled` have both fired for the same navigation; intentionally independent of `network_idle` so that a single hung request cannot stall the event). +**Computed** (inferred from sequences of CDP events): `network_idle` (fires when in-flight requests drop to zero), `layout_settled` (1 s after `page_load` with no intervening layout shifts), `navigation_settled` (fires once `dom_content_loaded`, `network_idle`, and `layout_settled` have all fired for the same navigation). -**Interaction** (fired by `interaction.js` via `Runtime.bindingCalled`): `interaction_click`, `interaction_key`, `interaction_scroll_settled` +**Interaction** (fired by `interaction.js` via `Runtime.bindingCalled`): `interaction_click`, `interaction_key`, `scroll_settled` -**Monitor lifecycle** (emitted by the monitor itself, not by Chrome): `monitor_screenshot`, `monitor_disconnected`, `monitor_reconnected`, `monitor_reconnect_failed`, `monitor_init_failed` +**Monitor lifecycle** (emitted by the monitor itself, not by Chrome): `screenshot`, `monitor_disconnected`, `monitor_reconnected`, `monitor_reconnect_failed`, `monitor_init_failed` ## Responsibilities @@ -57,12 +57,9 @@ Chrome can restart independently of the monitor. When that happens, `UpstreamPro Locks must be acquired left to right. Never hold a lock on the left while acquiring one further right. ``` -restartMu -> lifeMu -> pendReqMu -> computed.mu -> pendMu -restartMu -> lifeMu -> sessionsMu +restartMu -> lifeMu -> pendReqMu -> computed.mu -> pendMu -> sessionsMu ``` -`computed.mu` and `sessionsMu` are never held simultaneously; `cs.stop()` and `cs.resetOnNavigation()` are called only after the relevant `sessionsMu` critical section is complete. - `bindingRateMu` is independent of this ordering and is always acquired alone. | Lock | Protects | @@ -70,7 +67,7 @@ restartMu -> lifeMu -> sessionsMu | `restartMu` | Serializes `handleUpstreamRestart` to prevent overlapping reconnects from rapid Chrome restarts | | `lifeMu` | `conn`, `lifecycleCtx`, `cancel`, `done`, `readReady` -- all fields that change during Start / Stop / reconnect | | `pendReqMu` | `pendingRequests` (requestId -> `networkReqState`): in-flight network requests accumulating request/response metadata until `loadingFinished` | -| `computed.mu` | All `computedState` fields: counters and timers for the `network_idle`, `page_layout_settled`, and `page_navigation_settled` state machines | +| `computed.mu` | All `computedState` fields: counters and timers for the `network_idle`, `layout_settled`, and `navigation_settled` state machines | | `pendMu` | `pending` (id -> reply channel): in-flight CDP commands waiting for a response from Chrome | | `sessionsMu` | `sessions` (sessionID -> `targetInfo`): the set of currently attached CDP targets (tabs, iframes, workers) | | `bindingRateMu` | `bindingLastSeen` (sessionID:eventType -> time): rate-limit state for `__kernelEvent` binding calls | @@ -79,143 +76,4 @@ Fields that need no mutex use `sync/atomic`: `nextID`, `mainSessionID`, `running ### WebSocket concurrency -`coder/websocket` guarantees one concurrent `Read` and one concurrent `Write` are safe on the same connection. `readLoop` is the sole reader. All writes go through `send`, which calls `conn.Write` directly -- `conn.Write` is internally serialized by the library, so no external write mutex is needed. - -## Event data model - -### Envelope and top-level fields - -Every event arrives as an `Envelope`: - -```json -{ - "capture_session_id": "cs_abc123", - "seq": 42, - "event": { - "ts": 1746123456789000, - "type": "network_request", - "category": "network", - "source": { ... }, - "data": { ... }, - "truncated": false - } -} -``` - -| Field | Type | Description | -| --- | --- | --- | -| `capture_session_id` | string | Pipeline-assigned ID for the capture session (not a CDP concept). | -| `seq` | uint64 | Monotonically increasing per-capture-session sequence number. | -| `event.ts` | int64 | Wall-clock time the monitor emitted the event, as **Unix microseconds** (µs since epoch). | -| `event.type` | string | See [Event taxonomy](#event-taxonomy). | -| `event.category` | string | One of: `console`, `network`, `page`, `interaction`, `system`. | -| `event.truncated` | bool | `true` if `data` was nulled to fit the 1 MB pipeline limit. | - -### Source object - -```json -"source": { - "kind": "cdp", - "event": "Network.requestWillBeSent", - "metadata": { - "cdp_session_id": "...", - "target_id": "...", - "target_type": "page" - } -} -``` - -| Field | Description | -| --- | --- | -| `event` | The raw CDP method that triggered the event (e.g. `Network.requestWillBeSent`). Empty for computed events. | -| `metadata.cdp_session_id` | The CDP WebSocket session multiplexer ID for this target. Changes if Chrome restarts. | -| `metadata.target_id` | Stable identifier for the browser target (tab/window). Survives navigations within the same tab. | -| `metadata.target_type` | Target type as reported by Chrome: `page`, `iframe`, `worker`, etc. | - -### CDP identity primer - -Five IDs appear across events. Understanding how they nest prevents confusion: - -``` -target_id <- one per tab/window; stable across navigations -└── cdp_session_id <- WebSocket multiplexer channel to that target; resets on Chrome restart - └── frame_id <- one per frame (top-level or iframe); changes on navigation - └── loader_id <- one per document load; links a navigation to its network requests - └── request_id <- one per request (stable across redirects in a chain) -``` - -| ID | Where it appears | What it identifies | -| --- | --- | --- | -| `target_id` | `source.metadata`, most `data` objects | The browser tab. Use this to group all events from one tab session. | -| `cdp_session_id` | `source.metadata` | The WebSocket sub-channel. Not stable across reconnects. | -| `frame_id` | `page_navigation`, `network_request`, `network_response`, `network_loading_failed` | The frame the request or navigation belongs to. Top-level frame has no `parent_frame_id`. | -| `source_frame_id` | `page_layout_shift` | The frame where the layout shift occurred. Distinct from the nav context `frame_id`, which is always the top-level navigated frame. | -| `loader_id` | `page_navigation`, `network_request`, `network_response` | The document load that owns a request. Join `network_request.loader_id` to `page_navigation.loader_id` to correlate requests with the navigation that triggered them. | -| `request_id` | `network_request`, `network_response`, `network_loading_failed` | A single request chain (including redirects). Links request to its eventual response or failure. | - -### Navigation context fields - -Most event `data` objects include a nav context block stamped at the last `page_navigation`. These fields reflect the top-level frame most recently navigated in the session: - -| Field | Description | -| --- | --- | -| `session_id` | Same as `source.metadata.cdp_session_id`. Repeated for data-only consumers. | -| `frame_id` | Frame ID of the navigated top-level frame. | -| `loader_id` | Loader ID of the current document. | -| `url` | URL of the current page at the time of the last navigation. | -| `nav_seq` | Monotonically increasing counter, incremented on each `page_navigation`. Use it to detect that the page has navigated between two events in the same session. | - -### Per-event data fields - -Fields below are the unique additions per event type. Unless otherwise noted, events also include the nav context fields described above. Network events are the exception: they carry their own `loader_id` and `frame_id` directly and do not include nav context. - -#### Console events - -| Event | Unique fields | -| --- | --- | -| `console_log` | `level` (CDP type string), `text` (first arg), `args` (all args as strings), `stack_trace` | -| `console_error` | Same as `console_log` when `source.event` is `Runtime.consoleAPICalled`. When `source.event` is `Runtime.exceptionThrown`: `text`, `line`, `column`, `source_url` (script file URL, not page URL), `stack_trace`. | - -#### Network events - -| Event | Fields | -| --- | --- | -| `network_request` | `request_id`, `loader_id`, `frame_id`, `document_url`, `method`, `url`, `headers`, `initiator_type`. Optional: `post_data`, `resource_type`, `is_redirect` + `redirect_url`. | -| `network_response` | `request_id`, `loader_id`, `frame_id`, `method`, `url`, `status`, `headers`. Optional: `status_text`, `mime_type`, `resource_type`, `body` (truncated text body for textual MIME types). | -| `network_loading_failed` | `request_id`, `error_text`, `canceled`. Optional (absent when the request record was not found): `url`, `loader_id`, `frame_id`, `resource_type`. | - -#### Page events - -| Event | Unique fields | -| --- | --- | -| `page_tab_opened` | `target_id`, `target_type`, `url`, `opener_id`, `title`. Emitted before the first navigation; no nav context. | -| `page_navigation` | `session_id`, `target_id`, `target_type`, `url`, `frame_id`, `parent_frame_id` (absent for top-level frames), `loader_id`. This event establishes the nav context stamped on all subsequent events for the session. | -| `page_dom_content_loaded` | Nav context + `cdp_timestamp` (CDP monotonic seconds; not a wall-clock timestamp -- use `event.ts` for ordering). | -| `page_load` | Nav context + `cdp_timestamp` (CDP monotonic seconds). | -| `page_layout_shift` | Nav context + `source_frame_id`, `time`, `duration`. Optional `layout_shift_details` object: `value`, `had_recent_input`. Optional `lcp_details` object: `render_time`, `load_time`, `size`, `element_id`, `url`, `node_id`. Chrome multiplexes LCP candidate data through the same `PerformanceTimeline.timelineEventAdded` notification, so both may appear on a single event. | - -#### Computed events - -`network_idle`, `page_layout_settled`, and `page_navigation_settled` carry nav context fields only. - -#### Interaction events - -All interaction events include nav context plus the fields below. - -| Event | Unique fields | -| --- | --- | -| `interaction_click` | `x`, `y` (viewport coords), `selector` (CSS selector of clicked element), `tag`, `text` (element text; empty for sensitive inputs). | -| `interaction_key` | `key` (key name), `selector`, `tag`. Not emitted for sensitive input fields. | -| `interaction_scroll_settled` | `from_x`, `from_y`, `to_x`, `to_y` (scroll positions in px), `target_selector`. | - -#### Monitor lifecycle events - -Lifecycle events use `source.kind = "local_process"` and carry no nav context, except `monitor_screenshot` which includes nav context alongside the image payload. - -| Event | Fields | -| --- | --- | -| `monitor_screenshot` | Nav context + `png` (base64-encoded PNG). | -| `monitor_disconnected` | `reason: "chrome_restarted"`. | -| `monitor_reconnected` | `reconnect_duration_ms`. | -| `monitor_reconnect_failed` | `reason: "reconnect_exhausted"`. | -| `monitor_init_failed` | `step` (name of the init step that failed, e.g. `"Target.setAutoAttach"`). | \ No newline at end of file +`coder/websocket` guarantees one concurrent `Read` and one concurrent `Write` are safe on the same connection. `readLoop` is the sole reader. All writes go through `send`, which calls `conn.Write` directly -- `conn.Write` is internally serialized by the library, so no external write mutex is needed. \ No newline at end of file diff --git a/server/lib/cdpmonitor/cdp_proto.go b/server/lib/cdpmonitor/cdp_proto.go index 4afe57c5..eb208617 100644 --- a/server/lib/cdpmonitor/cdp_proto.go +++ b/server/lib/cdpmonitor/cdp_proto.go @@ -239,22 +239,6 @@ type cdpPerformanceTimelineEventAddedParams struct { Event cdpPerformanceTimelineEvent `json:"event"` } -// cdpLayoutShiftDetails mirrors PerformanceTimeline.LayoutShiftDetails (PDL wire format). -type cdpLayoutShiftDetails struct { - Value float64 `json:"value"` - HadRecentInput bool `json:"hadRecentInput"` -} - -// cdpLcpDetails mirrors PerformanceTimeline.LargestContentfulPaintDetails (PDL wire format). -type cdpLcpDetails struct { - RenderTime float64 `json:"renderTime"` - LoadTime float64 `json:"loadTime"` - Size float64 `json:"size"` - ElementID string `json:"elementId,omitempty"` - URL string `json:"url,omitempty"` - NodeID int `json:"nodeId,omitempty"` -} - // --- Target domain --- // cdpTargetTargetInfo mirrors Target.TargetInfo. diff --git a/server/lib/cdpmonitor/cdp_test.go b/server/lib/cdpmonitor/cdp_test.go index 91ade37e..a22a1c85 100644 --- a/server/lib/cdpmonitor/cdp_test.go +++ b/server/lib/cdpmonitor/cdp_test.go @@ -345,15 +345,7 @@ func newComputedMonitor(t *testing.T) (*Monitor, *eventCollector) { } // navigateMonitor sends a Page.frameNavigated to reset computed state. -// It ensures session "s1" has a page-like computedState before navigating, -// mirroring what handleAttachedToTarget would do in production. func navigateMonitor(m *Monitor, url string) { - m.sessionsMu.Lock() - if _, ok := m.sessions["s1"]; !ok { - m.sessions["s1"] = targetInfo{targetID: "test-target", targetType: targetTypePage} - m.computedStates["s1"] = newComputedState(m.publish) - } - m.sessionsMu.Unlock() m.handleFrameNavigated(cdpPageFrameNavigatedParams{ Frame: cdpPageFrame{ID: "f1", URL: url}, }, "s1") @@ -374,7 +366,7 @@ func simulateRequest(m *Monitor, id string) { // simulateFinished stores minimal state and sends Network.loadingFinished. func simulateFinished(m *Monitor, id string) { m.pendReqMu.Lock() - m.pendingRequests[id] = networkReqState{sessionID: "s1", method: "GET", url: "https://example.com/" + id} + m.pendingRequests[id] = networkReqState{method: "GET", url: "https://example.com/" + id} m.pendReqMu.Unlock() m.handleLoadingFinished(context.Background(), cdpNetworkLoadingFinishedParams{RequestID: id}, "s1") } diff --git a/server/lib/cdpmonitor/computed.go b/server/lib/cdpmonitor/computed.go index f18b8bb6..ea2714fc 100644 --- a/server/lib/cdpmonitor/computed.go +++ b/server/lib/cdpmonitor/computed.go @@ -2,7 +2,6 @@ package cdpmonitor import ( "encoding/json" - "maps" "sync" "time" @@ -24,23 +23,11 @@ type computedState struct { mu sync.Mutex publish PublishFunc - // dead is set by stop(). Timer callbacks check it under mu and bail, - // preventing orphaned events after a session is detached or cleared. - dead bool - // navSeq is incremented on every resetOnNavigation. AfterFunc callbacks // capture their navSeq at creation and bail if it has changed, preventing // stale timers from publishing events for a previous navigation. navSeq int - // navCtx is the navigation identity stamped at the last Page.frameNavigated. - // navData and navMeta are its precomputed JSON payload and Source.Metadata. - // Maps are replaced (not mutated) on each reset, so in-flight events holding - // a pointer to old navMeta are safe. - navCtx navContext - navData json.RawMessage - navMeta map[string]string - // network_idle: 500 ms debounce after all pending requests finish. netPending int netTimer *time.Timer @@ -51,45 +38,17 @@ type computedState struct { layoutFired bool pageLoadSeen bool - // navigation_settled: fires once dom_content_loaded and layout_settled have - // both fired after the same Page.frameNavigated. Decoupled from network_idle + // navigation_settled: fires once dom_content_loaded, network_idle, and + // layout_settled have all fired after the same Page.frameNavigated. navDOMLoaded bool + navNetIdle bool navLayoutSettled bool navFired bool } // newComputedState creates a fresh computedState backed by the given publish func. -// navData is initialized to {} and navMeta to an empty map so events emitted -// before the first frameNavigated carry consistent empty payloads rather than null. func newComputedState(publish PublishFunc) *computedState { - return &computedState{ - publish: publish, - navData: json.RawMessage(`{}`), - navMeta: make(map[string]string), - } -} - -// navSnapshot returns the precomputed nav payload and metadata under mu. -func (s *computedState) navSnapshot() (json.RawMessage, map[string]string) { - s.mu.Lock() - defer s.mu.Unlock() - return s.navData, s.navMeta -} - -// navDataWith merges extra fields into the current nav payload. -// Nav context fields (session_id, target_id, etc.) always take precedence over -// caller-supplied extra so a page-controlled payload cannot forge nav identity. -func (s *computedState) navDataWith(extra map[string]any) json.RawMessage { - result := make(map[string]any) - maps.Copy(result, extra) - if s != nil { - d, _ := s.navSnapshot() - base := make(map[string]any) - _ = json.Unmarshal(d, &base) - maps.Copy(result, base) - } - out, _ := json.Marshal(result) - return out + return &computedState{publish: publish} } func stopTimer(t *time.Timer) { @@ -104,45 +63,17 @@ func stopTimer(t *time.Timer) { } } -// stop marks the state machine dead and cancels pending timers. Called when the -// owning session detaches or the monitor reconnects. Any AfterFunc goroutine -// already running will check dead under mu and discard its result. -func (s *computedState) stop() { - s.mu.Lock() - s.dead = true - stopTimer(s.netTimer) - stopTimer(s.layoutTimer) - s.mu.Unlock() -} - // resetOnNavigation resets all state machines. Called on Page.frameNavigated. // Increments navSeq so any AfterFunc callbacks already running will discard their results. -// inflight seeds netPending; callers pass 0 because each session only tracks its -// own requests and starts fresh on navigation. -func (s *computedState) resetOnNavigation(inflight int, ctx navContext) error { +// inflight is the number of in-flight requests from other sessions +// (e.g. subframes) that were not cleared by the navigation; netPending is set +// to this value instead of zero so that their eventual loadingFinished events +// decrement correctly. +func (s *computedState) resetOnNavigation(inflight int) { s.mu.Lock() defer s.mu.Unlock() s.navSeq++ - s.navCtx = ctx - navData, err := json.Marshal(map[string]any{ - "session_id": ctx.sessionID, - "target_id": ctx.targetID, - "target_type": ctx.targetType, - "frame_id": ctx.frameID, - "loader_id": ctx.loaderID, - "url": ctx.url, - "nav_seq": s.navSeq, - }) - if err != nil { - return err - } - s.navData = navData - s.navMeta = map[string]string{ - MetadataKeyCDPSessionID: ctx.sessionID, - MetadataKeyTargetID: ctx.targetID, - MetadataKeyTargetType: ctx.targetType, - } stopTimer(s.netTimer) s.netTimer = nil @@ -158,17 +89,14 @@ func (s *computedState) resetOnNavigation(inflight int, ctx navContext) error { s.pageLoadSeen = false s.navDOMLoaded = false + s.navNetIdle = false s.navLayoutSettled = false s.navFired = false - return nil } func (s *computedState) onRequest() { s.mu.Lock() defer s.mu.Unlock() - if s.dead { - return - } s.netPending++ // A new request invalidates any pending network_idle timer stopTimer(s.netTimer) @@ -179,9 +107,6 @@ func (s *computedState) onRequest() { func (s *computedState) onLoadingFinished() { s.mu.Lock() defer s.mu.Unlock() - if s.dead { - return - } s.netPending-- if s.netPending < 0 { @@ -198,31 +123,28 @@ func (s *computedState) onLoadingFinished() { // startNetIdleTimer arms the network_idle debounce timer. Must be called with s.mu held. func (s *computedState) startNetIdleTimer() { - if s.dead { - return - } stopTimer(s.netTimer) navSeq := s.navSeq - navData := s.navData - navMeta := s.navMeta s.netTimer = time.AfterFunc(networkIdleDebounce, func() { s.mu.Lock() - if s.dead || s.navSeq != navSeq || s.netFired || s.netPending > 0 { + if s.navSeq != navSeq || s.netFired || s.netPending > 0 { s.mu.Unlock() return } s.netFired = true - s.mu.Unlock() - s.publish(events.Event{ + s.navNetIdle = true + evs := []events.Event{{ Ts: time.Now().UnixMicro(), Type: EventNetworkIdle, Category: events.CategoryNetwork, - Source: events.Source{ - Kind: events.KindCDP, - Metadata: navMeta, - }, - Data: navData, - }) + Source: events.Source{Kind: events.KindCDP}, + Data: json.RawMessage(`{}`), + }} + evs = append(evs, s.pendingNavigationSettled()...) + s.mu.Unlock() + for _, ev := range evs { + s.publish(ev) + } }) } @@ -230,9 +152,6 @@ func (s *computedState) startNetIdleTimer() { func (s *computedState) onPageLoad() { s.mu.Lock() defer s.mu.Unlock() - if s.dead { - return - } s.pageLoadSeen = true if s.layoutFired { return @@ -247,7 +166,7 @@ func (s *computedState) onPageLoad() { func (s *computedState) onLayoutShift() { s.mu.Lock() defer s.mu.Unlock() - if s.dead || s.layoutFired || !s.pageLoadSeen { + if s.layoutFired || !s.pageLoadSeen { return } // Reset the timer to 1s from now. @@ -259,23 +178,18 @@ func (s *computedState) onLayoutShift() { // emitLayoutSettled is called from the layout timer's AfterFunc goroutine. func (s *computedState) emitLayoutSettled(navSeq int) { s.mu.Lock() - if s.dead || s.navSeq != navSeq || s.layoutFired || !s.pageLoadSeen { + if s.navSeq != navSeq || s.layoutFired || !s.pageLoadSeen { s.mu.Unlock() return } s.layoutFired = true s.navLayoutSettled = true - navData := s.navData - navMeta := s.navMeta evs := []events.Event{{ Ts: time.Now().UnixMicro(), Type: EventLayoutSettled, Category: events.CategoryPage, - Source: events.Source{ - Kind: events.KindCDP, - Metadata: navMeta, - }, - Data: navData, + Source: events.Source{Kind: events.KindCDP}, + Data: json.RawMessage(`{}`), }} evs = append(evs, s.pendingNavigationSettled()...) s.mu.Unlock() @@ -295,23 +209,17 @@ func (s *computedState) onDOMContentLoaded() { } } -// pendingNavigationSettled returns a navigation_settled event if both +// pendingNavigationSettled returns a navigation_settled event if all three // conditions are met. Must be called with s.mu held. func (s *computedState) pendingNavigationSettled() []events.Event { - if s.dead { - return nil - } - if s.navDOMLoaded && s.navLayoutSettled && !s.navFired { + if s.navDOMLoaded && s.navNetIdle && s.navLayoutSettled && !s.navFired { s.navFired = true return []events.Event{{ Ts: time.Now().UnixMicro(), Type: EventNavigationSettled, Category: events.CategoryPage, - Source: events.Source{ - Kind: events.KindCDP, - Metadata: s.navMeta, - }, - Data: s.navData, + Source: events.Source{Kind: events.KindCDP}, + Data: json.RawMessage(`{}`), }} } return nil diff --git a/server/lib/cdpmonitor/computed_test.go b/server/lib/cdpmonitor/computed_test.go index e2ec519e..64295a83 100644 --- a/server/lib/cdpmonitor/computed_test.go +++ b/server/lib/cdpmonitor/computed_test.go @@ -1,13 +1,11 @@ package cdpmonitor import ( - "encoding/json" "testing" "time" "github.com/kernel/kernel-images/server/lib/events" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // newTestComputed creates a computedState with an eventCollector for testing. @@ -21,7 +19,7 @@ func newTestComputed(t *testing.T) (*computedState, *eventCollector) { func TestNetworkIdle(t *testing.T) { t.Run("debounce_500ms", func(t *testing.T) { cs, ec := newTestComputed(t) - require.NoError(t, cs.resetOnNavigation(0, navContext{})) + cs.resetOnNavigation(0) cs.onRequest() cs.onRequest() @@ -39,7 +37,7 @@ func TestNetworkIdle(t *testing.T) { t.Run("timer_reset_on_new_request", func(t *testing.T) { cs, ec := newTestComputed(t) - require.NoError(t, cs.resetOnNavigation(0, navContext{})) + cs.resetOnNavigation(0) cs.onRequest() cs.onLoadingFinished() @@ -57,19 +55,19 @@ func TestNetworkIdle(t *testing.T) { func TestLayoutSettled(t *testing.T) { t.Run("debounce_1s_after_page_load", func(t *testing.T) { cs, ec := newTestComputed(t) - require.NoError(t, cs.resetOnNavigation(0, navContext{})) + cs.resetOnNavigation(0) t0 := time.Now() cs.onPageLoad() - ev := ec.waitFor(t, "page_layout_settled", 3*time.Second) + ev := ec.waitFor(t, "layout_settled", 3*time.Second) assert.GreaterOrEqual(t, time.Since(t0).Milliseconds(), int64(900), "fired too early") assert.Equal(t, events.CategoryPage, ev.Category) }) t.Run("layout_shift_before_page_load_ignored", func(t *testing.T) { cs, ec := newTestComputed(t) - require.NoError(t, cs.resetOnNavigation(0, navContext{})) + cs.resetOnNavigation(0) // layout_shift before page_load should be ignored; layout_settled must // still fire after page_load's 1s debounce. @@ -77,121 +75,49 @@ func TestLayoutSettled(t *testing.T) { t0 := time.Now() cs.onPageLoad() - ec.waitFor(t, "page_layout_settled", 3*time.Second) + ec.waitFor(t, "layout_settled", 3*time.Second) assert.GreaterOrEqual(t, time.Since(t0).Milliseconds(), int64(900), "should fire 1s after page_load, not layout_shift") }) t.Run("layout_shift_resets_timer", func(t *testing.T) { cs, ec := newTestComputed(t) - require.NoError(t, cs.resetOnNavigation(0, navContext{})) + cs.resetOnNavigation(0) cs.onPageLoad() time.Sleep(600 * time.Millisecond) cs.onLayoutShift() t1 := time.Now() - ec.waitFor(t, "page_layout_settled", 3*time.Second) + ec.waitFor(t, "layout_settled", 3*time.Second) assert.GreaterOrEqual(t, time.Since(t1).Milliseconds(), int64(900), "should reset after layout_shift") }) } func TestNavigationSettled(t *testing.T) { - t.Run("fires_after_dom_content_loaded_and_layout_settled", func(t *testing.T) { + t.Run("fires_when_all_three_flags_set", func(t *testing.T) { cs, ec := newTestComputed(t) - require.NoError(t, cs.resetOnNavigation(0, navContext{})) + cs.resetOnNavigation(0) cs.onDOMContentLoaded() + cs.onRequest() + cs.onLoadingFinished() cs.onPageLoad() - ev := ec.waitFor(t, "page_navigation_settled", 3*time.Second) + ev := ec.waitFor(t, "navigation_settled", 3*time.Second) assert.Equal(t, events.CategoryPage, ev.Category) }) - t.Run("not_blocked_by_pending_network_request", func(t *testing.T) { - cs, ec := newTestComputed(t) - require.NoError(t, cs.resetOnNavigation(0, navContext{})) - - cs.onRequest() // never finishes - cs.onDOMContentLoaded() - cs.onPageLoad() - - ec.waitFor(t, "page_navigation_settled", 3*time.Second) - ec.assertNone(t, "network_idle", 100*time.Millisecond) - }) - t.Run("interrupted_by_new_navigation", func(t *testing.T) { cs, ec := newTestComputed(t) - require.NoError(t, cs.resetOnNavigation(0, navContext{})) - - cs.onDOMContentLoaded() - // page_load not yet fired so layout_settled is still pending. - - require.NoError(t, cs.resetOnNavigation(0, navContext{})) - - ec.assertNone(t, "page_navigation_settled", 1500*time.Millisecond) - }) -} - -func TestNavDataMetadata(t *testing.T) { - ctx := navContext{ - sessionID: "s1", - targetID: "t1", - targetType: "page", - frameID: "f1", - loaderID: "l1", - url: "https://example.com", - } - - t.Run("layout_settled_carries_navData_and_navMeta", func(t *testing.T) { - cs, ec := newTestComputed(t) - require.NoError(t, cs.resetOnNavigation(0, ctx)) - cs.onPageLoad() - - ev := ec.waitFor(t, "page_layout_settled", 3*time.Second) - assert.Equal(t, events.CategoryPage, ev.Category) - assert.Equal(t, "s1", ev.Source.Metadata[MetadataKeyCDPSessionID]) - assert.Equal(t, "t1", ev.Source.Metadata[MetadataKeyTargetID]) - assert.Equal(t, "page", ev.Source.Metadata[MetadataKeyTargetType]) - var data map[string]any - require.NoError(t, json.Unmarshal(ev.Data, &data)) - assert.Equal(t, "s1", data["session_id"]) - assert.Equal(t, "l1", data["loader_id"]) - assert.Equal(t, "https://example.com", data["url"]) - }) - - t.Run("navigation_settled_carries_navData_and_navMeta", func(t *testing.T) { - cs, ec := newTestComputed(t) - require.NoError(t, cs.resetOnNavigation(0, ctx)) + cs.resetOnNavigation(0) cs.onDOMContentLoaded() - cs.onPageLoad() - - ev := ec.waitFor(t, "page_navigation_settled", 3*time.Second) - assert.Equal(t, events.CategoryPage, ev.Category) - assert.Equal(t, "s1", ev.Source.Metadata[MetadataKeyCDPSessionID]) - assert.Equal(t, "t1", ev.Source.Metadata[MetadataKeyTargetID]) - var data map[string]any - require.NoError(t, json.Unmarshal(ev.Data, &data)) - assert.Equal(t, "s1", data["session_id"]) - assert.Equal(t, "l1", data["loader_id"]) - }) -} - -func TestStopSuppressesTimers(t *testing.T) { - t.Run("stop_suppresses_network_idle", func(t *testing.T) { - cs, ec := newTestComputed(t) - require.NoError(t, cs.resetOnNavigation(0, navContext{})) cs.onRequest() - cs.onLoadingFinished() // arms 500ms network_idle timer - cs.stop() - ec.assertNone(t, "network_idle", 1200*time.Millisecond) - }) + cs.onLoadingFinished() - t.Run("stop_suppresses_layout_settled", func(t *testing.T) { - cs, ec := newTestComputed(t) - require.NoError(t, cs.resetOnNavigation(0, navContext{})) - cs.onPageLoad() // arms 1s layout_settled timer - cs.stop() - ec.assertNone(t, "page_layout_settled", 1500*time.Millisecond) + // Interrupt before layout_settled fires. + cs.resetOnNavigation(0) + + ec.assertNone(t, "navigation_settled", 1500*time.Millisecond) }) } diff --git a/server/lib/cdpmonitor/domains.go b/server/lib/cdpmonitor/domains.go index 8502c59e..aef74f63 100644 --- a/server/lib/cdpmonitor/domains.go +++ b/server/lib/cdpmonitor/domains.go @@ -45,7 +45,7 @@ func (m *Monitor) enableDomains(ctx context.Context, sessionID string, targetTyp } if _, err := m.send(ctx, "PerformanceTimeline.enable", map[string]any{ - "eventTypes": []string{timelineEventLayoutShift, timelineEventLCP}, + "eventTypes": []string{timelineEventLayoutShift}, }, sessionID); err != nil && ctx.Err() == nil { m.log.Warn("cdpmonitor: failed to enable PerformanceTimeline", "session", sessionID, "err", err) } diff --git a/server/lib/cdpmonitor/handlers.go b/server/lib/cdpmonitor/handlers.go index 15bca908..0f64e950 100644 --- a/server/lib/cdpmonitor/handlers.go +++ b/server/lib/cdpmonitor/handlers.go @@ -25,11 +25,6 @@ func (m *Monitor) publishEvent(eventType string, category events.EventCategory, src.Metadata = make(map[string]string) } src.Metadata[MetadataKeyCDPSessionID] = sessionID - m.sessionsMu.RLock() - info := m.sessions[sessionID] - m.sessionsMu.RUnlock() - src.Metadata[MetadataKeyTargetID] = info.targetID - src.Metadata[MetadataKeyTargetType] = info.targetType } m.publish(events.Event{ Ts: time.Now().UnixMicro(), @@ -134,32 +129,29 @@ func (m *Monitor) handleConsole(p cdpRuntimeConsoleAPICalledParams, sessionID st for _, a := range p.Args { argValues = append(argValues, consoleArgString(a)) } - eventType := EventConsoleLog - if p.Type == "error" { - eventType = EventConsoleError - } - cs := m.computedFor(sessionID) - data := cs.navDataWith(map[string]any{ + data, _ := json.Marshal(map[string]any{ "level": p.Type, "text": text, "args": argValues, "stack_trace": p.StackTrace, }) + eventType := EventConsoleLog + if p.Type == "error" { + eventType = EventConsoleError + } m.publishEvent(eventType, events.CategoryConsole, events.Source{Kind: events.KindCDP}, "Runtime.consoleAPICalled", data, sessionID) } func (m *Monitor) handleExceptionThrown(ctx context.Context, p cdpRuntimeExceptionThrownParams, sessionID string) { - cs := m.computedFor(sessionID) - // source_url is the script file URL; distinct from nav context's url (the page URL). - data := cs.navDataWith(map[string]any{ + data, _ := json.Marshal(map[string]any{ "text": p.ExceptionDetails.Text, "line": p.ExceptionDetails.LineNumber, "column": p.ExceptionDetails.ColumnNumber, - "source_url": p.ExceptionDetails.URL, + "url": p.ExceptionDetails.URL, "stack_trace": p.ExceptionDetails.StackTrace, }) m.publishEvent(EventConsoleError, events.CategoryConsole, events.Source{Kind: events.KindCDP}, "Runtime.exceptionThrown", data, sessionID) - m.tryScreenshot(ctx, "Runtime.exceptionThrown", sessionID) + m.tryScreenshot(ctx) } // bindingMinInterval is the minimum time between accepted __kernelEvent binding @@ -202,57 +194,17 @@ func (m *Monitor) handleBindingCalled(p cdpRuntimeBindingCalledParams, sessionID m.bindingLastSeen[rateKey] = now m.bindingRateMu.Unlock() - var payloadMap map[string]any - _ = json.Unmarshal(payload, &payloadMap) - cs := m.computedFor(sessionID) - m.publishEvent(header.Type, events.CategoryInteraction, events.Source{Kind: events.KindCDP}, "Runtime.bindingCalled", cs.navDataWith(payloadMap), sessionID) + m.publishEvent(header.Type, events.CategoryInteraction, events.Source{Kind: events.KindCDP}, "Runtime.bindingCalled", payload, sessionID) } -// handleTimelineEvent processes PerformanceTimeline layout-shift and LCP events. +// handleTimelineEvent processes PerformanceTimeline layout-shift events. func (m *Monitor) handleTimelineEvent(p cdpPerformanceTimelineEventAddedParams, sessionID string) { - switch p.Event.Type { - case timelineEventLayoutShift: - // source_frame_id is the frame where the shift occurred; distinct from nav - // context's frame_id (the top-level navigated frame). - ev := map[string]any{ - "source_frame_id": p.Event.FrameID, - "time": p.Event.Time, - "duration": p.Event.Duration, - } - var shift cdpLayoutShiftDetails - if p.Event.LayoutShiftDetails != nil && json.Unmarshal(p.Event.LayoutShiftDetails, &shift) == nil { - ev["layout_shift_details"] = map[string]any{ - "value": shift.Value, - "had_recent_input": shift.HadRecentInput, - } - } - cs := m.computedFor(sessionID) - data := cs.navDataWith(ev) - m.publishEvent(EventLayoutShift, events.CategoryPage, events.Source{Kind: events.KindCDP}, "PerformanceTimeline.timelineEventAdded", data, sessionID) - if cs != nil { - cs.onLayoutShift() - } - - case timelineEventLCP: - ev := map[string]any{ - "source_frame_id": p.Event.FrameID, - "time": p.Event.Time, - } - var lcp cdpLcpDetails - if p.Event.LcpDetails != nil && json.Unmarshal(p.Event.LcpDetails, &lcp) == nil { - ev["lcp_details"] = map[string]any{ - "render_time": lcp.RenderTime, - "load_time": lcp.LoadTime, - "size": lcp.Size, - "element_id": lcp.ElementID, - "url": lcp.URL, - "node_id": lcp.NodeID, - } - } - cs := m.computedFor(sessionID) - data := cs.navDataWith(ev) - m.publishEvent(EventLCP, events.CategoryPage, events.Source{Kind: events.KindCDP}, "PerformanceTimeline.timelineEventAdded", data, sessionID) + if p.Event.Type != timelineEventLayoutShift { + return } + data, _ := json.Marshal(p) + m.publishEvent(EventLayoutShift, events.CategoryPage, events.Source{Kind: events.KindCDP}, "PerformanceTimeline.timelineEventAdded", data, sessionID) + m.computed.onLayoutShift() } // handleNetworkRequest publishes network_request events. @@ -287,16 +239,10 @@ func (m *Monitor) handleNetworkRequest(p cdpNetworkRequestWillBeSentParams, sess headers: p.Request.Headers, postData: p.Request.PostData, resourceType: p.Type, - loaderID: p.LoaderID, - frameID: p.FrameID, addedAt: addedAt, } m.pendReqMu.Unlock() ev := map[string]any{ - "request_id": p.RequestID, - "loader_id": p.LoaderID, - "frame_id": p.FrameID, - "document_url": p.DocumentURL, "method": p.Request.Method, "url": p.Request.URL, "headers": p.Request.Headers, @@ -308,16 +254,10 @@ func (m *Monitor) handleNetworkRequest(p cdpNetworkRequestWillBeSentParams, sess if p.Type != "" { ev["resource_type"] = p.Type } - if isRedirect { - ev["is_redirect"] = true - ev["redirect_url"] = existing.url - } data, _ := json.Marshal(ev) m.publishEvent(EventNetworkRequest, events.CategoryNetwork, events.Source{Kind: events.KindCDP}, "Network.requestWillBeSent", data, sessionID) if !isRedirect { - if cs := m.computedFor(sessionID); cs != nil { - cs.onRequest() - } + m.computed.onRequest() } } @@ -343,20 +283,15 @@ func (m *Monitor) handleLoadingFinished(ctx context.Context, p cdpNetworkLoading if !ok { return } - if cs := m.computedFor(state.sessionID); cs != nil { - cs.onLoadingFinished() - } + m.computed.onLoadingFinished() // Fetch response body async to avoid blocking readLoop; binary types are skipped. m.asyncWg.Go(func() { body := m.fetchResponseBody(ctx, p.RequestID, sessionID, state) ev := map[string]any{ - "request_id": p.RequestID, - "loader_id": state.loaderID, - "frame_id": state.frameID, - "method": state.method, - "url": state.url, - "status": state.status, - "headers": state.resHeaders, + "method": state.method, + "url": state.url, + "status": state.status, + "headers": state.resHeaders, } if state.statusText != "" { ev["status_text"] = state.statusText @@ -413,41 +348,24 @@ func (m *Monitor) handleLoadingFailed(p cdpNetworkLoadingFailedParams, sessionID m.pendReqMu.Unlock() ev := map[string]any{ - "request_id": p.RequestID, "error_text": p.ErrorText, "canceled": p.Canceled, } if ok { ev["url"] = state.url - ev["loader_id"] = state.loaderID - ev["frame_id"] = state.frameID - ev["resource_type"] = state.resourceType } data, _ := json.Marshal(ev) m.publishEvent(EventNetworkLoadingFailed, events.CategoryNetwork, events.Source{Kind: events.KindCDP}, "Network.loadingFailed", data, sessionID) if ok { - if cs := m.computedFor(state.sessionID); cs != nil { - cs.onLoadingFinished() - } + m.computed.onLoadingFinished() } } func (m *Monitor) handleFrameNavigated(p cdpPageFrameNavigatedParams, sessionID string) { - // Pre-fetch target info and computedState before acquiring pendReqMu to - // avoid a pendReqMu → sessionsMu ordering cycle. - m.sessionsMu.RLock() - info := m.sessions[sessionID] - cs := m.computedStates[sessionID] - m.sessionsMu.RUnlock() - data, _ := json.Marshal(map[string]any{ - "session_id": sessionID, - "target_id": info.targetID, - "target_type": info.targetType, "url": p.Frame.URL, "frame_id": p.Frame.ID, "parent_frame_id": p.Frame.ParentID, - "loader_id": p.Frame.LoaderID, }) m.publishEvent(EventNavigation, events.CategoryPage, events.Source{Kind: events.KindCDP}, "Page.frameNavigated", data, sessionID) @@ -455,54 +373,36 @@ func (m *Monitor) handleFrameNavigated(p cdpPageFrameNavigatedParams, sessionID // should not disrupt main-page tracking. if p.Frame.ParentID == "" { m.mainSessionID.Store(sessionID) - - navCtx := navContext{ - sessionID: sessionID, - targetID: info.targetID, - targetType: info.targetType, - frameID: p.Frame.ID, - loaderID: p.Frame.LoaderID, - url: p.Frame.URL, - } - m.pendReqMu.Lock() for id, req := range m.pendingRequests { if req.sessionID == sessionID { delete(m.pendingRequests, id) } } - // Reset while holding pendReqMu so new requests arriving concurrently - // can't increment netPending before the reset completes. - // inflight=0: remaining pendingRequests belong to other target sessions; - // their loadingFinished events decrement those sessions' own state machines, - // not this one, so we start fresh. - if cs != nil { - if err := cs.resetOnNavigation(0, navCtx); err != nil { - m.log.Error("cdpmonitor: failed to build nav event payload", "err", err) - } - } + inflight := len(m.pendingRequests) + // Reset computed state while still holding pendReqMu so new requests + // arriving concurrently can't increment netPending before the reset. + m.computed.resetOnNavigation(inflight) m.pendReqMu.Unlock() } } func (m *Monitor) handleDOMContentLoaded(p cdpPageDomContentEventFiredParams, sessionID string) { - cs := m.computedFor(sessionID) - data := cs.navDataWith(map[string]any{"cdp_timestamp": p.Timestamp}) + data, _ := json.Marshal(p) m.publishEvent(EventDOMContentLoaded, events.CategoryPage, events.Source{Kind: events.KindCDP}, "Page.domContentEventFired", data, sessionID) - if cs != nil { - cs.onDOMContentLoaded() + // Only advance the state machine for the main frame; subframe events arrive + // on their own sessionId and would trigger navigation_settled prematurely. + if m.mainSessionID.Load() == sessionID { + m.computed.onDOMContentLoaded() } } func (m *Monitor) handleLoadEventFired(ctx context.Context, p cdpPageLoadEventFiredParams, sessionID string) { - cs := m.computedFor(sessionID) - data := cs.navDataWith(map[string]any{"cdp_timestamp": p.Timestamp}) + data, _ := json.Marshal(p) m.publishEvent(EventPageLoad, events.CategoryPage, events.Source{Kind: events.KindCDP}, "Page.loadEventFired", data, sessionID) - if cs != nil { - cs.onPageLoad() - } if m.mainSessionID.Load() == sessionID { - m.tryScreenshot(ctx, "Page.loadEventFired", sessionID) + m.computed.onPageLoad() + m.tryScreenshot(ctx) } } @@ -516,22 +416,8 @@ func (m *Monitor) handleAttachedToTarget(ctx context.Context, p cdpTargetAttache url: p.TargetInfo.URL, targetType: p.TargetInfo.Type, } - if p.TargetInfo.Type == targetTypePage { - m.computedStates[p.SessionID] = newComputedState(m.publish) - } m.sessionsMu.Unlock() - if p.TargetInfo.Type == targetTypePage { - data, _ := json.Marshal(map[string]any{ - "target_id": p.TargetInfo.TargetID, - "target_type": p.TargetInfo.Type, - "url": p.TargetInfo.URL, - "opener_id": p.TargetInfo.OpenerID, - "title": p.TargetInfo.Title, - }) - m.publishEvent(EventTabOpened, events.CategoryPage, events.Source{Kind: events.KindCDP}, "Target.attachedToTarget", data, p.SessionID) - } - targetType := p.TargetInfo.Type // Async to avoid blocking the readLoop. m.asyncWg.Go(func() { @@ -547,11 +433,6 @@ func (m *Monitor) handleDetachedFromTarget(p cdpTargetDetachedFromTargetParams) return } m.sessionsMu.Lock() - cs := m.computedStates[p.SessionID] delete(m.sessions, p.SessionID) - delete(m.computedStates, p.SessionID) m.sessionsMu.Unlock() - if cs != nil { - cs.stop() - } } diff --git a/server/lib/cdpmonitor/handlers_test.go b/server/lib/cdpmonitor/handlers_test.go index cac89b16..3dd13a07 100644 --- a/server/lib/cdpmonitor/handlers_test.go +++ b/server/lib/cdpmonitor/handlers_test.go @@ -208,30 +208,13 @@ func TestPageEvents(t *testing.T) { _, ec, cleanup := startMonitor(t, srv, nil) defer cleanup() - // Attach a page target first so computedState exists for nav context. srv.sendToMonitor(t, map[string]any{ - "method": "Target.attachedToTarget", + "method": "Page.frameNavigated", "params": map[string]any{ - "sessionId": "sess-page", - "targetInfo": map[string]any{ - "targetId": "target-page", "type": "page", - "url": "about:blank", "attached": true, - }, - "waitingForDebugger": false, + "frame": map[string]any{"id": "frame-1", "url": "https://example.com/page"}, }, }) - ec.waitFor(t, "page_tab_opened", 2*time.Second) - - srv.sendToMonitor(t, map[string]any{ - "method": "Page.frameNavigated", "sessionId": "sess-page", - "params": map[string]any{ - "frame": map[string]any{ - "id": "frame-1", "url": "https://example.com/page", - "loaderId": "loader-1", - }, - }, - }) - ev := ec.waitFor(t, "page_navigation", 2*time.Second) + ev := ec.waitFor(t, "navigation", 2*time.Second) assert.Equal(t, events.CategoryPage, ev.Category) assert.Equal(t, "Page.frameNavigated", ev.Source.Event) var data map[string]any @@ -239,74 +222,17 @@ func TestPageEvents(t *testing.T) { assert.Equal(t, "https://example.com/page", data["url"]) srv.sendToMonitor(t, map[string]any{ - "method": "Page.domContentEventFired", "sessionId": "sess-page", + "method": "Page.domContentEventFired", "params": map[string]any{"timestamp": 1000.0}, }) - ev2 := ec.waitFor(t, "page_dom_content_loaded", 2*time.Second) + ev2 := ec.waitFor(t, "dom_content_loaded", 2*time.Second) assert.Equal(t, events.CategoryPage, ev2.Category) - var data2 map[string]any - require.NoError(t, json.Unmarshal(ev2.Data, &data2)) - assert.Equal(t, float64(1000.0), data2["cdp_timestamp"]) - assert.Equal(t, "loader-1", data2["loader_id"]) - assert.Equal(t, "https://example.com/page", data2["url"]) - srv.sendToMonitor(t, map[string]any{ - "method": "Page.loadEventFired", "sessionId": "sess-page", + "method": "Page.loadEventFired", "params": map[string]any{"timestamp": 1001.0}, }) ev3 := ec.waitFor(t, "page_load", 2*time.Second) assert.Equal(t, events.CategoryPage, ev3.Category) - var data3 map[string]any - require.NoError(t, json.Unmarshal(ev3.Data, &data3)) - assert.Equal(t, float64(1001.0), data3["cdp_timestamp"]) - assert.Equal(t, "loader-1", data3["loader_id"]) -} - -func TestTabOpened(t *testing.T) { - srv := newTestServer(t) - defer srv.close() - - _, ec, cleanup := startMonitor(t, srv, nil) - defer cleanup() - - t.Run("page_target_emits_tab_opened", func(t *testing.T) { - srv.sendToMonitor(t, map[string]any{ - "method": "Target.attachedToTarget", - "params": map[string]any{ - "sessionId": "sess-tab", - "targetInfo": map[string]any{ - "targetId": "target-tab", "type": "page", - "url": "https://example.com", "attached": true, - "title": "Example", - }, - "waitingForDebugger": false, - }, - }) - ev := ec.waitFor(t, "page_tab_opened", 2*time.Second) - assert.Equal(t, events.CategoryPage, ev.Category) - assert.Equal(t, "Target.attachedToTarget", ev.Source.Event) - var data map[string]any - require.NoError(t, json.Unmarshal(ev.Data, &data)) - assert.Equal(t, "target-tab", data["target_id"]) - assert.Equal(t, "page", data["target_type"]) - assert.Equal(t, "https://example.com", data["url"]) - assert.Equal(t, "Example", data["title"]) - }) - - t.Run("iframe_target_no_tab_opened", func(t *testing.T) { - srv.sendToMonitor(t, map[string]any{ - "method": "Target.attachedToTarget", - "params": map[string]any{ - "sessionId": "sess-iframe", - "targetInfo": map[string]any{ - "targetId": "target-iframe", "type": "iframe", - "url": "https://iframe.example.com", "attached": true, - }, - "waitingForDebugger": false, - }, - }) - ec.assertNone(t, "page_tab_opened", 200*time.Millisecond) - }) } func TestBindingAndTimeline(t *testing.T) { @@ -329,15 +255,15 @@ func TestBindingAndTimeline(t *testing.T) { assert.Equal(t, "Runtime.bindingCalled", ev.Source.Event) }) - t.Run("interaction_scroll_settled", func(t *testing.T) { + t.Run("scroll_settled", func(t *testing.T) { srv.sendToMonitor(t, map[string]any{ "method": "Runtime.bindingCalled", "params": map[string]any{ "name": "__kernelEvent", - "payload": `{"type":"interaction_scroll_settled","from_x":0,"from_y":0,"to_x":0,"to_y":500,"target_selector":"body"}`, + "payload": `{"type":"scroll_settled","from_x":0,"from_y":0,"to_x":0,"to_y":500,"target_selector":"body"}`, }, }) - ev := ec.waitFor(t, "interaction_scroll_settled", 2*time.Second) + ev := ec.waitFor(t, "scroll_settled", 2*time.Second) assert.Equal(t, events.CategoryInteraction, ev.Category) var data map[string]any require.NoError(t, json.Unmarshal(ev.Data, &data)) @@ -348,30 +274,12 @@ func TestBindingAndTimeline(t *testing.T) { srv.sendToMonitor(t, map[string]any{ "method": "PerformanceTimeline.timelineEventAdded", "params": map[string]any{ - "event": map[string]any{ - "type": "layout-shift", - "frameId": "frame-ls", - "time": 1.5, - "duration": 0.0, - "layoutShiftDetails": map[string]any{ - "value": 0.12, - "hadRecentInput": true, - }, - }, + "event": map[string]any{"type": "layout-shift"}, }, }) - ev := ec.waitFor(t, "page_layout_shift", 2*time.Second) + ev := ec.waitFor(t, "layout_shift", 2*time.Second) assert.Equal(t, events.KindCDP, ev.Source.Kind) assert.Equal(t, "PerformanceTimeline.timelineEventAdded", ev.Source.Event) - var data map[string]any - require.NoError(t, json.Unmarshal(ev.Data, &data)) - assert.Equal(t, "frame-ls", data["source_frame_id"]) - assert.Equal(t, float64(1.5), data["time"]) - shift := data["layout_shift_details"].(map[string]any) - assert.Equal(t, 0.12, shift["value"]) - assert.Equal(t, true, shift["had_recent_input"]) - _, hasEvent := data["event"] - assert.False(t, hasEvent, "raw CDP event wrapper must not appear in payload") }) t.Run("unknown_binding_ignored", func(t *testing.T) { @@ -424,127 +332,3 @@ func TestBindingAndTimeline(t *testing.T) { assert.Equal(t, countBefore+1, countAfter, "rate limiter should have dropped the 2nd and 3rd events") }) } - -func TestPerTargetStateMachines(t *testing.T) { - // attachTarget sends a Target.attachedToTarget message for a page session. - attachTarget := func(srv *testServer, t *testing.T, sessionID, targetID string) { - t.Helper() - srv.sendToMonitor(t, map[string]any{ - "method": "Target.attachedToTarget", - "params": map[string]any{ - "sessionId": sessionID, - "targetInfo": map[string]any{ - "targetId": targetID, "type": "page", - "url": "about:blank", "attached": true, - }, - "waitingForDebugger": false, - }, - }) - } - - t.Run("two_tabs_independent", func(t *testing.T) { - srv := newTestServer(t) - defer srv.close() - _, ec, cleanup := startMonitor(t, srv, nil) - defer cleanup() - - attachTarget(srv, t, "sess-a", "target-a") - attachTarget(srv, t, "sess-b", "target-b") - - // Navigate sess-a and start a request. - srv.sendToMonitor(t, map[string]any{ - "method": "Page.frameNavigated", "sessionId": "sess-a", - "params": map[string]any{"frame": map[string]any{ - "id": "f-a", "url": "https://a.example.com", "loaderId": "l-a", - }}, - }) - ec.waitFor(t, "page_navigation", 2*time.Second) - - srv.sendToMonitor(t, map[string]any{ - "method": "Network.requestWillBeSent", "sessionId": "sess-a", - "params": map[string]any{ - "requestId": "req-a", "type": "Document", "loaderId": "l-a", - "documentURL": "https://a.example.com/", - "request": map[string]any{"method": "GET", "url": "https://a.example.com/"}, - "initiator": map[string]any{"type": "other"}, - }, - }) - ec.waitFor(t, "network_request", 2*time.Second) - - // Navigate sess-b — must not reset sess-a's state machine. - // With per-session state machines, sess-b starts fresh (netPending=0) and - // fires its own network_idle after the 500 ms debounce, independently. - srv.sendToMonitor(t, map[string]any{ - "method": "Page.frameNavigated", "sessionId": "sess-b", - "params": map[string]any{"frame": map[string]any{ - "id": "f-b", "url": "https://b.example.com", "loaderId": "l-b", - }}, - }) - - // Wait past sess-b's 500 ms debounce so its network_idle fires before we - // set our checkpoint. The next new network_idle will then come from sess-a. - time.Sleep(700 * time.Millisecond) - - // Finish sess-a's request; waitForNew captures the current event count so - // sess-b's already-fired network_idle is excluded from the result. - srv.sendToMonitor(t, map[string]any{ - "method": "Network.responseReceived", "sessionId": "sess-a", - "params": map[string]any{ - "requestId": "req-a", "type": "Document", - "response": map[string]any{"status": 200, "statusText": "OK", "headers": map[string]any{}, "mimeType": "text/html"}, - }, - }) - srv.sendToMonitor(t, map[string]any{ - "method": "Network.loadingFinished", "sessionId": "sess-a", - "params": map[string]any{"requestId": "req-a"}, - }) - - ev := ec.waitForNew(t, "network_idle", 2*time.Second) - var data map[string]any - require.NoError(t, json.Unmarshal(ev.Data, &data)) - assert.Equal(t, "sess-a", data["session_id"], "network_idle must be attributed to sess-a") - assert.Equal(t, "l-a", data["loader_id"]) - }) - - t.Run("detach_stops_timer", func(t *testing.T) { - srv := newTestServer(t) - defer srv.close() - _, ec, cleanup := startMonitor(t, srv, nil) - defer cleanup() - - attachTarget(srv, t, "sess-c", "target-c") - - srv.sendToMonitor(t, map[string]any{ - "method": "Page.frameNavigated", "sessionId": "sess-c", - "params": map[string]any{"frame": map[string]any{ - "id": "f-c", "url": "https://c.example.com", "loaderId": "l-c", - }}, - }) - ec.waitFor(t, "page_navigation", 2*time.Second) - - // Start a request, then finish it (arms the 500 ms network_idle timer). - srv.sendToMonitor(t, map[string]any{ - "method": "Network.requestWillBeSent", "sessionId": "sess-c", - "params": map[string]any{ - "requestId": "req-c", "type": "Document", "loaderId": "l-c", - "documentURL": "https://c.example.com/", - "request": map[string]any{"method": "GET", "url": "https://c.example.com/"}, - "initiator": map[string]any{"type": "other"}, - }, - }) - ec.waitFor(t, "network_request", 2*time.Second) - srv.sendToMonitor(t, map[string]any{ - "method": "Network.loadingFinished", "sessionId": "sess-c", - "params": map[string]any{"requestId": "req-c"}, - }) - - // Detach before the 500 ms timer fires; readLoop processes messages in - // order so the stop() call lands well within the debounce window. - srv.sendToMonitor(t, map[string]any{ - "method": "Target.detachedFromTarget", - "params": map[string]any{"sessionId": "sess-c"}, - }) - - ec.assertNone(t, "network_idle", 1200*time.Millisecond) - }) -} diff --git a/server/lib/cdpmonitor/interaction.js b/server/lib/cdpmonitor/interaction.js index 14c34045..1ca221fe 100644 --- a/server/lib/cdpmonitor/interaction.js +++ b/server/lib/cdpmonitor/interaction.js @@ -113,7 +113,7 @@ var pos = scrollPos(target); if (Math.abs(pos.x - fromX) > 5 || Math.abs(pos.y - fromY) > 5) { send(JSON.stringify({ - type: 'interaction_scroll_settled', + type: 'scroll_settled', from_x: fromX, from_y: fromY, to_x: pos.x, to_y: pos.y, target_selector: s diff --git a/server/lib/cdpmonitor/monitor.go b/server/lib/cdpmonitor/monitor.go index c25dfac0..39de3ac0 100644 --- a/server/lib/cdpmonitor/monitor.go +++ b/server/lib/cdpmonitor/monitor.go @@ -51,7 +51,7 @@ type Monitor struct { pendReqMu sync.Mutex pendingRequests map[string]networkReqState // requestId → networkReqState - computedStates map[string]*computedState // sessionID → state machine; guarded by sessionsMu + computed *computedState lastScreenshotAt atomic.Int64 // unix millis of last capture screenshotInFlight atomic.Bool // true while a captureScreenshot goroutine is running @@ -83,11 +83,11 @@ func New(upstreamMgr UpstreamProvider, publish PublishFunc, displayNum int, log displayNum: displayNum, log: log, sessions: make(map[string]targetInfo), - computedStates: make(map[string]*computedState), pending: make(map[int64]chan cdpMessage), pendingRequests: make(map[string]networkReqState), bindingLastSeen: make(map[string]time.Time), } + m.computed = newComputedState(publish) m.lifecycleCtx = context.Background() m.mainSessionID.Store(mainSessionUnset) return m @@ -180,13 +180,8 @@ func (m *Monitor) Stop() { // It also fails all in-flight send() calls so their goroutines are unblocked. func (m *Monitor) clearState() { m.sessionsMu.Lock() - prev := m.computedStates m.sessions = make(map[string]targetInfo) - m.computedStates = make(map[string]*computedState) m.sessionsMu.Unlock() - for _, cs := range prev { - cs.stop() - } m.mainSessionID.Store(mainSessionUnset) m.pendReqMu.Lock() @@ -198,6 +193,9 @@ func (m *Monitor) clearState() { m.bindingRateMu.Unlock() m.failPendingCommands() + + // pendingRequests is already empty above, so inflight=0 is correct. + m.computed.resetOnNavigation(0) } const pendingRequestTTL = 5 * time.Minute @@ -215,32 +213,18 @@ func (m *Monitor) sweepPendingRequests(ctx context.Context) { case <-ctx.Done(): return case now := <-ticker.C: - var toSweep []networkReqState m.pendReqMu.Lock() for id, state := range m.pendingRequests { if now.Sub(state.addedAt) > pendingRequestTTL { delete(m.pendingRequests, id) - toSweep = append(toSweep, state) + m.computed.onLoadingFinished() // keep netPending consistent } } m.pendReqMu.Unlock() - for _, state := range toSweep { - if cs := m.computedFor(state.sessionID); cs != nil { - cs.onLoadingFinished() - } - } } } } -// computedFor returns the computedState for the given sessionID, or nil if none exists. -func (m *Monitor) computedFor(sessionID string) *computedState { - m.sessionsMu.RLock() - cs := m.computedStates[sessionID] - m.sessionsMu.RUnlock() - return cs -} - // failPendingCommands unblocks all in-flight send() calls by delivering an // error response. This prevents goroutine leaks when the connection is torn // down during reconnect. diff --git a/server/lib/cdpmonitor/monitor_test.go b/server/lib/cdpmonitor/monitor_test.go index 07267078..721554f8 100644 --- a/server/lib/cdpmonitor/monitor_test.go +++ b/server/lib/cdpmonitor/monitor_test.go @@ -83,13 +83,12 @@ func TestScreenshot(t *testing.T) { } t.Run("capture_and_publish", func(t *testing.T) { - m.tryScreenshot(context.Background(), "Page.loadEventFired", "") + m.tryScreenshot(context.Background()) require.Eventually(t, func() bool { return captureCount.Load() == 1 }, 2*time.Second, 20*time.Millisecond) - ev := ec.waitFor(t, "monitor_screenshot", 2*time.Second) + ev := ec.waitFor(t, "screenshot", 2*time.Second) assert.Equal(t, events.CategorySystem, ev.Category) assert.Equal(t, events.KindLocalProcess, ev.Source.Kind) - assert.Equal(t, "Page.loadEventFired", ev.Source.Event) var data map[string]any require.NoError(t, json.Unmarshal(ev.Data, &data)) assert.NotEmpty(t, data["png"]) @@ -97,7 +96,7 @@ func TestScreenshot(t *testing.T) { t.Run("rate_limited", func(t *testing.T) { before := captureCount.Load() - m.tryScreenshot(context.Background(), "Page.loadEventFired", "") + m.tryScreenshot(context.Background()) time.Sleep(100 * time.Millisecond) assert.Equal(t, before, captureCount.Load(), "should be rate-limited within 2s") }) @@ -105,7 +104,7 @@ func TestScreenshot(t *testing.T) { t.Run("captures_after_cooldown", func(t *testing.T) { m.lastScreenshotAt.Store(time.Now().Add(-3 * time.Second).UnixMilli()) before := captureCount.Load() - m.tryScreenshot(context.Background(), "Page.loadEventFired", "") + m.tryScreenshot(context.Background()) require.Eventually(t, func() bool { return captureCount.Load() > before }, 2*time.Second, 20*time.Millisecond) }) } @@ -320,19 +319,36 @@ func TestSubframeNavigationNoReset(t *testing.T) { ec.waitFor(t, "network_idle", 2*time.Second) } -// TestIframeTargetNoStateMachine verifies that attaching an iframe target does -// not create a computedState. Only page targets get state machines; iframes share -// the CDP page domains but must not generate computed events like navigation_settled. -func TestIframeTargetNoStateMachine(t *testing.T) { - m, _ := newComputedMonitor(t) - m.sessionsMu.Lock() - m.sessions["iframe-session"] = targetInfo{targetID: "iframe-target", targetType: "iframe"} - // Intentionally do NOT create a computedState — mirrors handleAttachedToTarget behaviour. - m.sessionsMu.Unlock() +func TestSubframeLifecycleIgnored(t *testing.T) { + t.Run("subframe_dom_content_loaded_does_not_advance_state", func(t *testing.T) { + m, ec := newComputedMonitor(t) + navigateMonitor(m, "https://example.com") // sets mainSessionID = "s1" + + // Fire domContentLoaded from an iframe session, not the main frame. + m.handleDOMContentLoaded(cdpPageDomContentEventFiredParams{}, "iframe-session") + + // Now fire the real main-frame domContentLoaded + the rest of the conditions. + simulateRequest(m, "r1") + simulateFinished(m, "r1") + m.handleLoadEventFired(context.Background(), cdpPageLoadEventFiredParams{}, "s1") + // navigation_settled requires navDOMLoaded; if the iframe event had set it, + // the event might fire without the main-frame DOMContentLoaded arriving. + // Assert it does NOT fire yet (iframe set navDOMLoaded but main frame hasn't). + ec.assertNone(t, "navigation_settled", 1500*time.Millisecond) + }) - m.sessionsMu.RLock() - cs := m.computedStates["iframe-session"] - m.sessionsMu.RUnlock() + t.Run("subframe_load_event_does_not_start_layout_timer", func(t *testing.T) { + m, ec := newComputedMonitor(t) + navigateMonitor(m, "https://example.com") - assert.Nil(t, cs, "iframe target must not have a computedState") + // Subframe fires loadEventFired — should not start the layout_settled timer. + m.handleLoadEventFired(context.Background(), cdpPageLoadEventFiredParams{}, "iframe-session") + ec.assertNone(t, "layout_settled", 1500*time.Millisecond) + + // Main frame fires — timer should start now. + t0 := time.Now() + m.handleLoadEventFired(context.Background(), cdpPageLoadEventFiredParams{}, "s1") + ec.waitFor(t, "layout_settled", 3*time.Second) + assert.GreaterOrEqual(t, time.Since(t0).Milliseconds(), int64(900), "fired too early") + }) } diff --git a/server/lib/cdpmonitor/screenshot.go b/server/lib/cdpmonitor/screenshot.go index b8f17f4a..e367a0a4 100644 --- a/server/lib/cdpmonitor/screenshot.go +++ b/server/lib/cdpmonitor/screenshot.go @@ -6,7 +6,6 @@ import ( "encoding/base64" "encoding/json" "fmt" - "maps" "os/exec" "time" @@ -18,9 +17,7 @@ import ( // consumes the rate-limit window without starting a capture. lastScreenshotAt // is only advanced after the in-flight slot is claimed; if that CAS then loses // to a concurrent goroutine the slot is released and we return cleanly. -// sourceEvent is the CDP event that triggered the capture; sessionID is used -// to snapshot nav context before the async goroutine fires. -func (m *Monitor) tryScreenshot(ctx context.Context, sourceEvent, sessionID string) { +func (m *Monitor) tryScreenshot(ctx context.Context) { now := time.Now().UnixMilli() last := m.lastScreenshotAt.Load() if now-last < 2000 { @@ -33,14 +30,9 @@ func (m *Monitor) tryScreenshot(ctx context.Context, sourceEvent, sessionID stri m.screenshotInFlight.Store(false) return } - var navData json.RawMessage - var navMeta map[string]string - if cs := m.computedFor(sessionID); cs != nil { - navData, navMeta = cs.navSnapshot() - } m.asyncWg.Go(func() { defer m.screenshotInFlight.Store(false) - m.captureScreenshot(ctx, sourceEvent, navData, navMeta) + m.captureScreenshot(ctx) }) } @@ -48,9 +40,7 @@ const screenshotTimeout = 10 * time.Second // captureScreenshot takes a screenshot via ffmpeg x11grab (or the screenshotFn // seam in tests), optionally downscales it, and publishes a screenshot event. -// navData and navMeta are pre-snapped from the owning session's computedState; -// they may be nil if no state machine exists for the session. -func (m *Monitor) captureScreenshot(parentCtx context.Context, sourceEvent string, navData json.RawMessage, navMeta map[string]string) { +func (m *Monitor) captureScreenshot(parentCtx context.Context) { ctx, cancel := context.WithTimeout(parentCtx, screenshotTimeout) defer cancel() var pngBytes []byte @@ -77,20 +67,13 @@ func (m *Monitor) captureScreenshot(parentCtx context.Context, sourceEvent strin } encoded := base64.StdEncoding.EncodeToString(pngBytes) - payload := map[string]any{screenshotDataKey: encoded} - if navData != nil { - var nav map[string]any - if json.Unmarshal(navData, &nav) == nil { - maps.Copy(payload, nav) - } - } - data, _ := json.Marshal(payload) + data, _ := json.Marshal(map[string]string{screenshotDataKey: encoded}) m.publish(events.Event{ Ts: time.Now().UnixMicro(), Type: EventScreenshot, Category: events.CategorySystem, - Source: events.Source{Kind: events.KindLocalProcess, Event: sourceEvent, Metadata: navMeta}, + Source: events.Source{Kind: events.KindLocalProcess}, Data: data, }) } diff --git a/server/lib/cdpmonitor/types.go b/server/lib/cdpmonitor/types.go index 76769180..c9cec495 100644 --- a/server/lib/cdpmonitor/types.go +++ b/server/lib/cdpmonitor/types.go @@ -16,57 +16,49 @@ const mainSessionUnset = "\x00unset" // Each maps 1-to-1 with a specific CDP domain event (Runtime.*, Network.*, // Page.*, PerformanceTimeline.*) received from Chrome. const ( - EventConsoleLog = "console_log" // Runtime.consoleAPICalled (non-error types) - EventConsoleError = "console_error" // Runtime.consoleAPICalled (type=error) or Runtime.exceptionThrown - EventNetworkRequest = "network_request" // Network.requestWillBeSent - EventNetworkResponse = "network_response" // Network.loadingFinished (with prior responseReceived) - EventNetworkLoadingFailed = "network_loading_failed" // Network.loadingFailed - EventNavigation = "page_navigation" // Page.frameNavigated - EventDOMContentLoaded = "page_dom_content_loaded" // Page.domContentEventFired + EventConsoleLog = "console_log" // Runtime.consoleAPICalled (non-error types) + EventConsoleError = "console_error" // Runtime.consoleAPICalled (type=error) or Runtime.exceptionThrown + EventNetworkRequest = "network_request" // Network.requestWillBeSent + EventNetworkResponse = "network_response" // Network.loadingFinished (with prior responseReceived) + EventNetworkLoadingFailed = "network_loading_failed" // Network.loadingFailed + EventNavigation = "navigation" // Page.frameNavigated + EventDOMContentLoaded = "dom_content_loaded" // Page.domContentEventFired EventPageLoad = "page_load" // Page.loadEventFired - EventLayoutShift = "page_layout_shift" // PerformanceTimeline event of type "layout-shift" - EventLCP = "page_lcp" // PerformanceTimeline event of type "largest-contentful-paint" - EventTabOpened = "page_tab_opened" // Target.attachedToTarget for type=page + EventLayoutShift = "layout_shift" // PerformanceTimeline event of type "layout-shift" ) // Computed events — synthetic events derived by computed.go state machines. // None of these correspond to a single CDP notification; they are inferred from // sequences of CDP events and debounce timers. const ( - EventNetworkIdle = "network_idle" // 500 ms after all in-flight requests finish - EventLayoutSettled = "page_layout_settled" // 1 s after page_load with no intervening layout shifts - EventNavigationSettled = "page_navigation_settled" // fires once page_dom_content_loaded and page_layout_settled both hold + EventNetworkIdle = "network_idle" // 500 ms after all in-flight requests finish + EventLayoutSettled = "layout_settled" // 1 s after page_load with no intervening layout shifts + EventNavigationSettled = "navigation_settled" // fires once dom_content_loaded + network_idle + layout_settled all hold ) // Interaction events — fired by injected page-side JS (interaction.js) via the // Runtime.bindingCalled mechanism. They originate in the browser's renderer // process, not from Chrome's network or page domains. const ( - EventInteractionClick = "interaction_click" // document click (target selector, coords, text) - EventInteractionKey = "interaction_key" // keydown (key name, target selector) - EventScrollSettled = "interaction_scroll_settled" // 300 ms after the last scroll event on a target + EventInteractionClick = "interaction_click" // document click (target selector, coords, text) + EventInteractionKey = "interaction_key" // keydown (key name, target selector) + EventScrollSettled = "scroll_settled" // 300 ms after the last scroll event on a target ) // Monitor lifecycle and internal events — emitted by the monitor itself, not by Chrome. const ( - EventScreenshot = "monitor_screenshot" // ffmpeg frame capture on page load or JS exception + EventScreenshot = "screenshot" // ffmpeg frame capture on page load or JS exception EventMonitorDisconnected = "monitor_disconnected" // WebSocket to Chrome closed unexpectedly EventMonitorReconnected = "monitor_reconnected" // successfully reconnected after a disconnect EventMonitorReconnectFailed = "monitor_reconnect_failed" // reconnect attempts exhausted EventMonitorInitFailed = "monitor_init_failed" // could not initialise the CDP session ) -// Metadata keys written into events.Source.Metadata for CDP-sourced events. -const ( - MetadataKeyCDPSessionID = "cdp_session_id" - MetadataKeyTargetID = "target_id" - MetadataKeyTargetType = "target_type" -) +// Metadata key written into events.Source.Metadata for CDP-sourced events. +const MetadataKeyCDPSessionID = "cdp_session_id" -const ( - timelineEventLayoutShift = "layout-shift" - timelineEventLCP = "largest-contentful-paint" -) +// CDP PerformanceTimeline event type for layout shifts. +const timelineEventLayoutShift = "layout-shift" // CDP target type for browser pages (as opposed to workers, iframes, etc.). const targetTypePage = "page" @@ -117,22 +109,9 @@ type networkReqState struct { headers json.RawMessage postData string resourceType string - loaderID string - frameID string status int statusText string resHeaders json.RawMessage mimeType string addedAt time.Time // for TTL eviction } - -// navContext carries the identity of the navigation that owns a computedState. -// Stamped at Page.frameNavigated and precomputed into event payloads/metadata. -type navContext struct { - sessionID string - targetID string - targetType string - frameID string - loaderID string - url string -} diff --git a/server/lib/events/capturesession.go b/server/lib/events/capturesession.go new file mode 100644 index 00000000..80bbc5a6 --- /dev/null +++ b/server/lib/events/capturesession.go @@ -0,0 +1,222 @@ +package events + +import ( + "fmt" + "log/slog" + "sync" + "time" +) + +// CaptureConfig holds caller-supplied capture preferences. All fields are +// optional; zero values mean "use server defaults" (all categories). +type CaptureConfig struct { + // Categories limits which event categories are captured + // nil represents all categories. + Categories []EventCategory +} + +// CaptureSession wraps events in envelopes and fans them out to a fileWriter +// Reusable: call Start with a new ID to begin a new session; call Stop to end +// the current session without closing the underlying writers. Close tears down +// file descriptors and should only be called during server shutdown. +type CaptureSession struct { + mu sync.Mutex + ring *ringBuffer + files *fileWriter + seq uint64 + captureSessionID string + categories map[EventCategory]struct{} + createdAt time.Time +} + +// CaptureSessionConfig holds the parameters for creating a CaptureSession. +type CaptureSessionConfig struct { + LogDir string + // RingCapacity is the number of envelopes the in-memory ring buffer holds. + RingCapacity int +} + +func NewCaptureSession(cfg CaptureSessionConfig) (*CaptureSession, error) { + rb, err := newRingBuffer(cfg.RingCapacity) + if err != nil { + return nil, fmt.Errorf("capture session: %w", err) + } + fw, err := newFileWriter(cfg.LogDir) + if err != nil { + return nil, fmt.Errorf("capture session: %w", err) + } + cats := make(map[EventCategory]struct{}, len(allCategories)) + for _, c := range allCategories { + cats[c] = struct{}{} + } + return &CaptureSession{ + ring: rb, + files: fw, + categories: cats, + }, nil +} + +// Start sets the capture session ID and applies the given config. It resets +// the sequence counter so each session starts at seq 1. Sequence numbers are +// scoped to the active session; Last-Event-ID values from a previous session +// are not valid for reconnecting to a new one. +// The fileWriter is intentionally not rotated: events from different sessions +// are interleaved in the same per-category JSONL files and distinguished by +// their envelope's capture_session_id. +func (s *CaptureSession) Start(captureSessionID string, cfg CaptureConfig) { + s.mu.Lock() + defer s.mu.Unlock() + s.captureSessionID = captureSessionID + s.seq = 0 + s.createdAt = time.Now() + s.ring.reset() + cats := cfg.Categories + if len(cats) == 0 { + cats = allCategories + } + s.categories = make(map[EventCategory]struct{}, len(cats)) + for _, c := range cats { + s.categories[c] = struct{}{} + } +} + +// publishLocked is the core publish path. Requires s.mu held and a captureSessionID +func (s *CaptureSession) publishLocked(ev Event) Envelope { + if ev.Ts == 0 { + ev.Ts = time.Now().UnixMicro() + } + s.seq++ + env := Envelope{ + CaptureSessionID: s.captureSessionID, + Seq: s.seq, + Event: ev, + } + env, data := truncateIfNeeded(env) + if data == nil { + slog.Error("capture_session: marshal failed, skipping file write", "seq", env.Seq, "category", env.Event.Category) + } else { + filename := string(env.Event.Category) + ".log" + if err := s.files.Write(filename, data); err != nil { + slog.Error("capture_session: file write failed", "seq", env.Seq, "category", env.Event.Category, "err", err) + } + } + s.ring.publish(env) + return env +} + +// Publish wraps ev in an Envelope, truncates if needed, then writes to +// fileWriter (durable) before RingBuffer (in-memory fan-out). +func (s *CaptureSession) Publish(ev Event) { + s.mu.Lock() + defer s.mu.Unlock() + + // No active session, drop silently. This can happen when events + // arrive between Stop() and producers noticing, or before Start(). + if s.captureSessionID == "" { + return + } + + // Drop events whose category is outside the configured set. + if _, ok := s.categories[ev.Category]; !ok { + return + } + + s.publishLocked(ev) +} + +// PublishUnfiltered publishes ev without applying the category filter. Use for +// externally-initiated events (e.g. API callers) that must not be silently +// dropped by capture preferences set by the session owner. +func (s *CaptureSession) PublishUnfiltered(ev Event) Envelope { + s.mu.Lock() + defer s.mu.Unlock() + if s.captureSessionID == "" { + return Envelope{} + } + return s.publishLocked(ev) +} + +// NewReader returns a Reader positioned at the start of the ring buffer. +func (s *CaptureSession) NewReader(afterSeq uint64) *Reader { + return s.ring.newReader(afterSeq) +} + +// ID returns the current capture session ID, or "" if no session is active. +func (s *CaptureSession) ID() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.captureSessionID +} + +// Seq returns the current sequence number (last published). +func (s *CaptureSession) Seq() uint64 { + s.mu.Lock() + defer s.mu.Unlock() + return s.seq +} + +// Config returns the current capture configuration. +func (s *CaptureSession) Config() CaptureConfig { + s.mu.Lock() + defer s.mu.Unlock() + cats := make([]EventCategory, 0, len(s.categories)) + for c := range s.categories { + cats = append(cats, c) + } + return CaptureConfig{ + Categories: cats, + } +} + +// CreatedAt returns when the current session was started. +func (s *CaptureSession) CreatedAt() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.createdAt +} + +// UpdateConfig applies a new CaptureConfig to the running session without +// resetting the sequence counter or ring buffer. +func (s *CaptureSession) UpdateConfig(cfg CaptureConfig) { + s.mu.Lock() + defer s.mu.Unlock() + cats := cfg.Categories + if len(cats) == 0 { + cats = allCategories + } + s.categories = make(map[EventCategory]struct{}, len(cats)) + for _, c := range cats { + s.categories[c] = struct{}{} + } +} + +// Active reports whether a capture session is currently running. +func (s *CaptureSession) Active() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.captureSessionID != "" +} + +// Stop ends the current session. It publishes a synthetic session_ended +// envelope so open SSE stream connections receive a terminal frame and can +// close cleanly, then clears the session ID. The ring buffer is intentionally +// left intact so existing readers can finish draining. A new session can be +// started by calling Start again. +func (s *CaptureSession) Stop() { + s.mu.Lock() + defer s.mu.Unlock() + if s.captureSessionID == "" { + return + } + _ = s.publishLocked(Event{ + Type: SessionEnded, + Category: CategorySystem, + Source: Source{Kind: KindKernelAPI}, + }) + s.captureSessionID = "" +} + +// Close flushes and releases all open file descriptors. +func (s *CaptureSession) Close() error { + return s.files.Close() +} diff --git a/server/lib/events/event.go b/server/lib/events/event.go index aa0e6b77..837d2c7e 100644 --- a/server/lib/events/event.go +++ b/server/lib/events/event.go @@ -21,15 +21,16 @@ const ( CategorySystem EventCategory = "system" ) -// AllCategories is the canonical list of all known event categories. -var AllCategories = []EventCategory{ +// allCategories is the canonical list of all known event categories. +// Package-internal; treat as read-only. +var allCategories = []EventCategory{ CategoryConsole, CategoryNetwork, CategoryPage, CategoryInteraction, CategoryLiveview, CategoryCaptcha, CategorySystem, } var validCategories = func() map[EventCategory]struct{} { - m := make(map[EventCategory]struct{}, len(AllCategories)) - for _, c := range AllCategories { + m := make(map[EventCategory]struct{}, len(allCategories)) + for _, c := range allCategories { m[c] = struct{}{} } return m @@ -41,6 +42,13 @@ func ValidCategory(c EventCategory) bool { return ok } +// System event types emitted by the pipeline itself. +const ( + SessionEnded = "session_ended" + EventsDropped = "events_dropped" + EventsStorageError = "storage_error" +) + type SourceKind string const ( @@ -59,7 +67,7 @@ type Source struct { } // Event is the portable event schema. It contains only producer-emitted content; -// pipeline metadata (seq) lives on the Envelope. +// pipeline metadata (seq, capture session) lives on the Envelope. type Event struct { Ts int64 `json:"ts"` // Unix microseconds (µs since epoch) Type string `json:"type"` @@ -71,8 +79,9 @@ type Event struct { // Envelope wraps an Event with pipeline-assigned metadata. type Envelope struct { - Seq uint64 `json:"seq"` - Event Event `json:"event"` + CaptureSessionID string `json:"capture_session_id"` + Seq uint64 `json:"seq"` + Event Event `json:"event"` } // truncateIfNeeded marshals env and returns the (possibly truncated) envelope. diff --git a/server/lib/events/events_test.go b/server/lib/events/events_test.go index a7eea87b..5827c9bb 100644 --- a/server/lib/events/events_test.go +++ b/server/lib/events/events_test.go @@ -1,8 +1,12 @@ package events import ( + "bytes" "context" "encoding/json" + "os" + "path/filepath" + "strings" "sync" "testing" "time" @@ -59,7 +63,8 @@ func TestEventSerialization(t *testing.T) { func TestEnvelopeSerialization(t *testing.T) { env := Envelope{ - Seq: 1, + CaptureSessionID: "test-session-id", + Seq: 1, Event: Event{ Ts: 1000, Type: "console.log", @@ -74,8 +79,8 @@ func TestEnvelopeSerialization(t *testing.T) { var decoded map[string]any require.NoError(t, json.Unmarshal(b, &decoded)) + assert.Equal(t, "test-session-id", decoded["capture_session_id"]) assert.Equal(t, float64(1), decoded["seq"]) - assert.NotContains(t, decoded, "capture_session_id") inner, ok := decoded["event"].(map[string]any) require.True(t, ok) assert.Equal(t, "console.log", inner["type"]) @@ -325,6 +330,275 @@ func TestConcurrentReaders(t *testing.T) { } } +// TestFileWriter: per-category JSONL appender tests. +func TestFileWriter(t *testing.T) { + t.Run("category_routing", func(t *testing.T) { + dir := t.TempDir() + fw, err := newFileWriter(dir) + require.NoError(t, err) + defer fw.Close() + + envsToFile := []struct { + env Envelope + file string + category string + }{ + {Envelope{Seq: 1, Event: Event{Type: "console.log", Category: CategoryConsole, Source: Source{Kind: KindCDP}, Ts: 1}}, "console.log", "console"}, + {Envelope{Seq: 2, Event: Event{Type: "network.request", Category: CategoryNetwork, Source: Source{Kind: KindCDP}, Ts: 1}}, "network.log", "network"}, + {Envelope{Seq: 3, Event: Event{Type: "liveview.click", Category: CategoryLiveview, Source: Source{Kind: KindKernelAPI}, Ts: 1}}, "liveview.log", "liveview"}, + {Envelope{Seq: 4, Event: Event{Type: "captcha.solve", Category: CategoryCaptcha, Source: Source{Kind: KindExtension}, Ts: 1}}, "captcha.log", "captcha"}, + {Envelope{Seq: 5, Event: Event{Type: "page.navigation", Category: CategoryPage, Source: Source{Kind: KindCDP}, Ts: 1}}, "page.log", "page"}, + {Envelope{Seq: 6, Event: Event{Type: "input.click", Category: CategoryInteraction, Source: Source{Kind: KindCDP}, Ts: 1}}, "interaction.log", "interaction"}, + {Envelope{Seq: 7, Event: Event{Type: "monitor.connected", Category: CategorySystem, Source: Source{Kind: KindKernelAPI}, Ts: 1}}, "system.log", "system"}, + } + + for _, e := range envsToFile { + data, err := json.Marshal(e.env) + require.NoError(t, err) + require.NoError(t, fw.Write(e.file, data)) + } + + for _, e := range envsToFile { + data, err := os.ReadFile(filepath.Join(dir, e.file)) + require.NoError(t, err, "missing file %s for type %s", e.file, e.env.Event.Type) + + line := bytes.TrimRight(data, "\n") + require.True(t, json.Valid(line), "invalid JSON in %s", e.file) + + var decoded map[string]any + require.NoError(t, json.Unmarshal(line, &decoded)) + inner, ok := decoded["event"].(map[string]any) + require.True(t, ok) + assert.Equal(t, e.category, inner["category"], "wrong category in %s", e.file) + srcMap, ok := inner["source"].(map[string]any) + require.True(t, ok, "source should be an object in %s", e.file) + assert.Equal(t, string(e.env.Event.Source.Kind), srcMap["kind"], "wrong source kind in %s", e.file) + } + }) + + t.Run("empty_filename_rejected", func(t *testing.T) { + dir := t.TempDir() + fw, err := newFileWriter(dir) + require.NoError(t, err) + defer fw.Close() + + err = fw.Write("", []byte(`{"seq":1}`)) + require.Error(t, err) + assert.Contains(t, err.Error(), "empty filename") + }) + + t.Run("concurrent_writes", func(t *testing.T) { + dir := t.TempDir() + fw, err := newFileWriter(dir) + require.NoError(t, err) + defer fw.Close() + + const goroutines = 10 + const eventsPerGoroutine = 100 + + var wg sync.WaitGroup + for i := 0; i < goroutines; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + for j := 0; j < eventsPerGoroutine; j++ { + env := Envelope{ + Seq: uint64(i*eventsPerGoroutine + j), + Event: Event{Type: "console.log", Category: CategoryConsole, Source: Source{Kind: KindCDP}, Ts: 1}, + } + envData, err := json.Marshal(env) + require.NoError(t, err) + require.NoError(t, fw.Write("console.log", envData)) + } + }(i) + } + wg.Wait() + + data, err := os.ReadFile(filepath.Join(dir, "console.log")) + require.NoError(t, err) + + lines := strings.Split(strings.TrimRight(string(data), "\n"), "\n") + assert.Len(t, lines, goroutines*eventsPerGoroutine) + for _, line := range lines { + assert.True(t, json.Valid([]byte(line)), "invalid JSON line: %s", line) + } + }) + + t.Run("lazy_open", func(t *testing.T) { + dir := t.TempDir() + fw, err := newFileWriter(dir) + require.NoError(t, err) + defer fw.Close() + + entries, err := os.ReadDir(dir) + require.NoError(t, err) + assert.Empty(t, entries, "files opened before first Write") + + env := Envelope{Seq: 1, Event: Event{Type: "console.log", Category: CategoryConsole, Source: Source{Kind: KindCDP}, Ts: 1}} + envData, err := json.Marshal(env) + require.NoError(t, err) + require.NoError(t, fw.Write("console.log", envData)) + + entries, err = os.ReadDir(dir) + require.NoError(t, err) + assert.Len(t, entries, 1, "expected exactly one file after first Write") + assert.Equal(t, "console.log", entries[0].Name()) + }) +} + +func TestCaptureSession(t *testing.T) { + newCaptureSession := func(t *testing.T) (*CaptureSession, string) { + t.Helper() + dir := t.TempDir() + p, err := NewCaptureSession(CaptureSessionConfig{LogDir: dir, RingCapacity: 100}) + require.NoError(t, err) + p.Start("test-session", CaptureConfig{}) + t.Cleanup(func() { p.Close() }) + return p, dir + } + + t.Run("concurrent_publish_seq_order", func(t *testing.T) { + const goroutines = 8 + const eventsEach = 50 + const total = goroutines * eventsEach + + p, err := NewCaptureSession(CaptureSessionConfig{LogDir: t.TempDir(), RingCapacity: total}) + require.NoError(t, err) + p.Start("test-concurrent", CaptureConfig{}) + t.Cleanup(func() { p.Close() }) + reader := p.NewReader(0) + + var wg sync.WaitGroup + for i := 0; i < goroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for j := 0; j < eventsEach; j++ { + p.Publish(cdpEvent("console.log", CategoryConsole)) + } + }() + } + wg.Wait() + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + for want := uint64(1); want <= total; want++ { + env := readEnvelope(t, reader, ctx) + assert.Equal(t, want, env.Seq, "events must arrive in seq order") + } + }) + + t.Run("publish_increments_seq", func(t *testing.T) { + p, _ := newCaptureSession(t) + reader := p.NewReader(0) + + for i := 0; i < 3; i++ { + p.Publish(Event{Type: "page.navigation", Category: CategoryPage, Source: Source{Kind: KindCDP}, Ts: 1}) + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + for want := uint64(1); want <= 3; want++ { + env := readEnvelope(t, reader, ctx) + assert.Equal(t, want, env.Seq, "expected seq %d got %d", want, env.Seq) + } + }) + + t.Run("publish_sets_ts", func(t *testing.T) { + p, _ := newCaptureSession(t) + reader := p.NewReader(0) + + before := time.Now().UnixMicro() + p.Publish(Event{Type: "page.navigation", Category: CategoryPage, Source: Source{Kind: KindCDP}}) + after := time.Now().UnixMicro() + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + env := readEnvelope(t, reader, ctx) + assert.GreaterOrEqual(t, env.Event.Ts, before) + assert.LessOrEqual(t, env.Event.Ts, after) + }) + + t.Run("publish_writes_file", func(t *testing.T) { + p, dir := newCaptureSession(t) + + p.Publish(Event{Type: "console.log", Category: CategoryConsole, Source: Source{Kind: KindCDP}, Ts: 1}) + + data, err := os.ReadFile(filepath.Join(dir, "console.log")) + require.NoError(t, err) + + lines := strings.Split(strings.TrimRight(string(data), "\n"), "\n") + require.Len(t, lines, 1) + assert.True(t, json.Valid([]byte(lines[0]))) + assert.Contains(t, lines[0], `"console.log"`) + }) + + t.Run("publish_writes_ring", func(t *testing.T) { + p, _ := newCaptureSession(t) + + reader := p.NewReader(0) + p.Publish(Event{Type: "page.navigation", Category: CategoryPage, Source: Source{Kind: KindCDP}, Ts: 1}) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + env := readEnvelope(t, reader, ctx) + assert.Equal(t, "page.navigation", env.Event.Type) + assert.Equal(t, CategoryPage, env.Event.Category) + }) + + t.Run("start_sets_capture_session_id", func(t *testing.T) { + p, _ := newCaptureSession(t) + p.Start("test-uuid", CaptureConfig{}) + + reader := p.NewReader(0) + p.Publish(Event{Type: "page.navigation", Category: CategoryPage, Source: Source{Kind: KindCDP}, Ts: 1}) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + env := readEnvelope(t, reader, ctx) + assert.Equal(t, "test-uuid", env.CaptureSessionID) + }) + + t.Run("truncation_applied", func(t *testing.T) { + p, dir := newCaptureSession(t) + reader := p.NewReader(0) + + largeData := strings.Repeat("x", 1_100_000) + rawData, err := json.Marshal(map[string]string{"payload": largeData}) + require.NoError(t, err) + + p.Publish(Event{ + Type: "page.navigation", + Category: CategoryPage, + Source: Source{Kind: KindCDP}, + Ts: 1, + Data: json.RawMessage(rawData), + }) + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + env := readEnvelope(t, reader, ctx) + assert.True(t, env.Event.Truncated) + assert.True(t, json.Valid(env.Event.Data)) + + marshaled, err := json.Marshal(env) + require.NoError(t, err) + assert.LessOrEqual(t, len(marshaled), maxS2RecordBytes) + + data, err := os.ReadFile(filepath.Join(dir, "page.log")) + require.NoError(t, err) + lines := strings.Split(strings.TrimRight(string(data), "\n"), "\n") + require.Len(t, lines, 1) + assert.Contains(t, lines[0], `"truncated":true`) + }) + +} func TestRingBufferResetWithActiveReader(t *testing.T) { rb := newTestRingBuffer(t,10) diff --git a/server/lib/events/eventsstorage.go b/server/lib/events/eventsstorage.go new file mode 100644 index 00000000..94217857 --- /dev/null +++ b/server/lib/events/eventsstorage.go @@ -0,0 +1,93 @@ +package events + +import ( + "context" + "encoding/json" + "log/slog" +) + +// EventsStorage is the durable storage interface for the storage writer. +type EventsStorage interface { + Append(ctx context.Context, streamName string, data []byte) error + Close() error +} + +// sessionRemover is implemented by storage implementations that support per-session cleanup. +type sessionRemover interface { + Remove(streamName string) +} + +// EventsStorageWriter reads envelopes from the ring buffer and writes them to +// an EventsStorage. A single goroutine drives the write loop; call +// Run to start it and Close to drain in-flight writes after Run returns. +type EventsStorageWriter struct { + session *CaptureSession + eventsStorage EventsStorage +} + +// NewEventsStorageWriter constructs a writer for the given storage. +func NewEventsStorageWriter(session *CaptureSession, eventsStorage EventsStorage) *EventsStorageWriter { + return &EventsStorageWriter{session: session, eventsStorage: eventsStorage} +} + + +// Run reads from the ring buffer until ctx is cancelled. It returns nil on +// clean shutdown (ctx.Err()) and must not be called concurrently. +func (w *EventsStorageWriter) Run(ctx context.Context) { + reader := w.session.NewReader(0) + for { + result, err := reader.Read(ctx) + if err != nil { + return + } + if result.Dropped > 0 { + slog.Warn("events_storage_writer: ring buffer overflow, events dropped", + "count", result.Dropped) + continue + } + env := result.Envelope + if env.Event.Type == EventsStorageError { + // Skip system error events to prevent feedback loops. + continue + } + data, err := json.Marshal(env) + if err != nil { + slog.Error("events_storage_writer: marshal failed, skipping", + "seq", env.Seq, "err", err) + continue + } + if err := w.eventsStorage.Append(ctx, env.CaptureSessionID, data); err != nil { + if ctx.Err() != nil { + return + } + slog.Error("events_storage_writer: append failed", + "seq", env.Seq, "stream", env.CaptureSessionID, "err", err) + errData, _ := json.Marshal(map[string]string{"error": err.Error()}) + w.session.PublishUnfiltered(Event{ + Type: EventsStorageError, + Category: CategorySystem, + Source: Source{Kind: KindLocalProcess}, + Data: errData, + }) + } else if env.Event.Type == SessionEnded { + if r, ok := w.eventsStorage.(sessionRemover); ok { + r.Remove(env.CaptureSessionID) + } + } + } +} + +// Close drains in-flight writes and tears down the storage. Call after Run +// returns to ensure all pending records are flushed. +func (w *EventsStorageWriter) Close() error { + return w.eventsStorage.Close() +} + +// RemoveSession evicts the storage's producer for the given session ID, +// allowing it to drain and release resources. No-op if the storage does not +// implement sessionRemover. +func (w *EventsStorageWriter) RemoveSession(id string) { + if r, ok := w.eventsStorage.(sessionRemover); ok { + r.Remove(id) + } +} diff --git a/server/lib/events/eventsstorage_writer_test.go b/server/lib/events/eventsstorage_writer_test.go new file mode 100644 index 00000000..d3544981 --- /dev/null +++ b/server/lib/events/eventsstorage_writer_test.go @@ -0,0 +1,353 @@ +package events + +import ( + "context" + "encoding/json" + "errors" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// mockStorage is an in-memory EventsStorage for writer tests. No S2 dependency. +// It also implements sessionRemover so that RemoveSession paths can be exercised. +type mockStorage struct { + mu sync.Mutex + calls []appendCall + removed []string + err error +} + +type appendCall struct { + streamName string + data []byte +} + +func (m *mockStorage) Append(_ context.Context, streamName string, data []byte) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.err != nil { + return m.err + } + cp := make([]byte, len(data)) + copy(cp, data) + m.calls = append(m.calls, appendCall{streamName: streamName, data: cp}) + return nil +} + +func (m *mockStorage) Remove(streamName string) { + m.mu.Lock() + defer m.mu.Unlock() + m.removed = append(m.removed, streamName) +} + +func (m *mockStorage) Close() error { return nil } + +func (m *mockStorage) recorded() []appendCall { + m.mu.Lock() + defer m.mu.Unlock() + out := make([]appendCall, len(m.calls)) + copy(out, m.calls) + return out +} + +func (m *mockStorage) removedSessions() []string { + m.mu.Lock() + defer m.mu.Unlock() + out := make([]string, len(m.removed)) + copy(out, m.removed) + return out +} + +// blockingStorage is an EventsStorage whose Append blocks until release is closed. +// appendCalled is closed (once) when Append is first entered so callers can +// synchronise on "Append is in progress". +type blockingStorage struct { + appendCalled chan struct{} + release chan struct{} + once sync.Once +} + +func newBlockingStorage() *blockingStorage { + return &blockingStorage{ + appendCalled: make(chan struct{}), + release: make(chan struct{}), + } +} + +func (b *blockingStorage) Append(ctx context.Context, _ string, _ []byte) error { + b.once.Do(func() { close(b.appendCalled) }) + select { + case <-b.release: + return ctx.Err() // return non-nil so the writer sees a failed append + case <-ctx.Done(): + return ctx.Err() + } +} + +func (b *blockingStorage) Close() error { return nil } + +// Compile-time interface guards. +// mockStorage must implement sessionRemover (so RemoveSession paths are exercised). +// blockingStorage must NOT implement sessionRemover (so the no-op path is exercised). +var _ sessionRemover = (*mockStorage)(nil) + +// waitForN polls until at least n appends are recorded or the timeout elapses. +func waitForN(t *testing.T, b *mockStorage, n int, timeout time.Duration) []appendCall { + t.Helper() + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + if calls := b.recorded(); len(calls) >= n { + return calls + } + time.Sleep(time.Millisecond) + } + t.Fatalf("timed out waiting for %d appends after %v, got %d; calls: %v", n, timeout, len(b.recorded()), b.recorded()) + return nil +} + +func newWriterTest(t *testing.T, ringCap int) (*CaptureSession, *EventsStorageWriter, *mockStorage) { + t.Helper() + session, err := NewCaptureSession(CaptureSessionConfig{ + LogDir: t.TempDir(), + RingCapacity: ringCap, + }) + require.NoError(t, err) + backend := &mockStorage{} + writer := NewEventsStorageWriter(session, backend) + return session, writer, backend +} + +func startWriter(writer *EventsStorageWriter) (context.CancelFunc, <-chan struct{}) { + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + go func() { + defer close(done) + writer.Run(ctx) + }() + return cancel, done +} + +func TestEventsStorageWriter_NormalAppend(t *testing.T) { + session, writer, backend := newWriterTest(t, 64) + cancel, done := startWriter(writer) + defer func() { cancel(); <-done }() + + session.Start("session-abc", CaptureConfig{}) + session.Publish(Event{Type: "test.event", Category: CategoryConsole}) + session.Publish(Event{Type: "test.event2", Category: CategoryNetwork}) + + calls := waitForN(t, backend, 2, 200*time.Millisecond) + for _, c := range calls { + assert.Equal(t, "session-abc", c.streamName) + var env Envelope + require.NoError(t, json.Unmarshal(c.data, &env)) + assert.Equal(t, "session-abc", env.CaptureSessionID) + } + assert.Equal(t, "test.event", unmarshalEnv(t, calls[0].data).Event.Type) + assert.Equal(t, "test.event2", unmarshalEnv(t, calls[1].data).Event.Type) +} + +func TestEventsStorageWriter_ContextCancel(t *testing.T) { + _, writer, _ := newWriterTest(t, 16) + cancel, done := startWriter(writer) + + cancel() + select { + case <-done: + case <-time.After(500 * time.Millisecond): + t.Fatal("Run did not return after context cancellation") + } +} + +func TestEventsStorageWriter_AppendError(t *testing.T) { + session, writer, backend := newWriterTest(t, 64) + backend.err = errors.New("s2 unavailable") + cancel, done := startWriter(writer) + defer func() { cancel(); <-done }() + + session.Start("session-err", CaptureConfig{}) + session.Publish(Event{Type: "test.event", Category: CategoryConsole}) + + // Wait for the error event to appear in the ring (published via PublishUnfiltered). + // We can detect it by checking the ring: stop the session and look at what was published. + // Use a small reader to observe the ring directly. + reader := session.NewReader(0) + ctx, timeoutCancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + defer timeoutCancel() + + var found bool + for !found { + res, err := reader.Read(ctx) + require.NoError(t, err) + if res.Dropped > 0 { + continue + } + if res.Envelope.Event.Type == EventsStorageError { + found = true + } + } + assert.True(t, found, "expected system_durable_error event in ring") + // The storage should have received no successful appends. + assert.Empty(t, backend.recorded()) +} + +func TestEventsStorageWriter_EventsStorageErrorSkipped(t *testing.T) { + session, writer, backend := newWriterTest(t, 64) + cancel, done := startWriter(writer) + defer func() { cancel(); <-done }() + + session.Start("session-skip", CaptureConfig{}) + // Inject a system_durable_error event directly. The writer must not re-append it. + session.PublishUnfiltered(Event{ + Type: EventsStorageError, + Category: CategorySystem, + Source: Source{Kind: KindLocalProcess}, + }) + // Also publish a normal event so we have a signal the writer processed the above. + session.Publish(Event{Type: "after.error", Category: CategoryConsole}) + + calls := waitForN(t, backend, 1, 200*time.Millisecond) + for _, c := range calls { + env := unmarshalEnv(t, c.data) + assert.NotEqual(t, EventsStorageError, env.Event.Type, + "system_durable_error must not be forwarded to storage") + } +} + +func TestEventsStorageWriter_SessionEndedForwarded(t *testing.T) { + session, writer, backend := newWriterTest(t, 64) + cancel, done := startWriter(writer) + + session.Start("session-x", CaptureConfig{}) + session.Stop() // emits session_ended before clearing the session ID + + calls := waitForN(t, backend, 1, 500*time.Millisecond) + cancel() + <-done + + require.NotEmpty(t, calls, "session_ended event should be forwarded to storage") + for _, c := range calls { + env := unmarshalEnv(t, c.data) + assert.Equal(t, "session-x", env.CaptureSessionID) + } +} + +func TestEventsStorageWriter_RingOverflow(t *testing.T) { + // Publish twice the ring capacity before starting the writer so the writer + // encounters result.Dropped > 0 on its first Read. After Fix 4 the writer + // only logs a warning and continues — no PublishUnfiltered feedback loop. + // + // Verifiable invariants: + // (a) Run exits cleanly after context cancellation (no goroutine leak). + // (b) backend received at most ringCap appends — the events that survived + // the overflow and were available for the writer to drain. + const ringCap = 64 + session, writer, backend := newWriterTest(t, ringCap) + + session.Start("session-overflow", CaptureConfig{}) + const published = ringCap * 2 + for i := 0; i < published; i++ { + session.Publish(Event{Type: "ev", Category: CategoryConsole}) + } + + cancel, done := startWriter(writer) + cancel() + + select { + case <-done: + case <-time.After(500 * time.Millisecond): + t.Fatal("Run did not return after context cancellation") + } + + assert.LessOrEqual(t, len(backend.recorded()), ringCap, + "writer should not see more events than ring capacity") +} + +func TestEventsStorageWriter_ShutdownDuringAppend(t *testing.T) { + // Verify that cancelling the writer context while Append is blocked causes + // Run to return cleanly without panicking. + session, err := NewCaptureSession(CaptureSessionConfig{ + LogDir: t.TempDir(), + RingCapacity: 16, + }) + require.NoError(t, err) + + bs := newBlockingStorage() + writer := NewEventsStorageWriter(session, bs) + + session.Start("session-shutdown", CaptureConfig{}) + session.Publish(Event{Type: "test.event", Category: CategoryConsole}) + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + go func() { + defer close(done) + writer.Run(ctx) + }() + + // Wait until Append has been entered — the writer is now blocked inside bs.Append. + select { + case <-bs.appendCalled: + case <-time.After(500 * time.Millisecond): + t.Fatal("timed out waiting for Append to be called") + } + + // Cancel the context while Append is still blocking. + cancel() + + // Unblock Append so it can return ctx.Err() and the writer can exit. + close(bs.release) + + // Writer must exit cleanly. + select { + case <-done: + case <-time.After(500 * time.Millisecond): + t.Fatal("Run did not return after context cancellation during Append") + } +} + +func TestEventsStorageWriter_RemoveSession(t *testing.T) { + t.Run("storage implements sessionRemover", func(t *testing.T) { + session, err := NewCaptureSession(CaptureSessionConfig{ + LogDir: t.TempDir(), + RingCapacity: 16, + }) + require.NoError(t, err) + + backend := &mockStorage{} // mockStorage implements sessionRemover via Remove() + writer := NewEventsStorageWriter(session, backend) + + writer.RemoveSession("test-session-id") + + removed := backend.removedSessions() + require.Len(t, removed, 1, "Remove should have been called once") + assert.Equal(t, "test-session-id", removed[0]) + }) + + t.Run("storage does not implement sessionRemover — no panic", func(t *testing.T) { + session, err := NewCaptureSession(CaptureSessionConfig{ + LogDir: t.TempDir(), + RingCapacity: 16, + }) + require.NoError(t, err) + + // blockingStorage satisfies EventsStorage but intentionally does NOT + // implement sessionRemover (no Remove method). RemoveSession must be a no-op. + writer := NewEventsStorageWriter(session, newBlockingStorage()) + + assert.NotPanics(t, func() { + writer.RemoveSession("test-session-id") + }) + }) +} + +func unmarshalEnv(t *testing.T, data []byte) Envelope { + t.Helper() + var env Envelope + require.NoError(t, json.Unmarshal(data, &env)) + return env +} diff --git a/server/lib/events/filewriter.go b/server/lib/events/filewriter.go new file mode 100644 index 00000000..d57002f1 --- /dev/null +++ b/server/lib/events/filewriter.go @@ -0,0 +1,70 @@ +package events + +import ( + "fmt" + "os" + "path/filepath" + "sync" +) + +// fileWriter is a JSONL appender keyed by filename. It opens each file lazily +// on first write (O_APPEND|O_CREATE|O_WRONLY) and serialises all concurrent +// writes with a single mutex. +type fileWriter struct { + mu sync.Mutex + files map[string]*os.File + dir string +} + +// newFileWriter returns a fileWriter that writes to dir, creating it if needed. +func newFileWriter(dir string) (*fileWriter, error) { + if err := os.MkdirAll(dir, 0o755); err != nil { + return nil, fmt.Errorf("filewriter: create dir %s: %w", dir, err) + } + return &fileWriter{dir: dir, files: make(map[string]*os.File)}, nil +} + +// Write appends data as a single JSONL line to the named file under the +// writer's directory. +func (fw *fileWriter) Write(filename string, data []byte) error { + if filename == "" { + return fmt.Errorf("filewriter: empty filename") + } + + fw.mu.Lock() + defer fw.mu.Unlock() + + f, ok := fw.files[filename] + if !ok { + path := filepath.Join(fw.dir, filename) + var err error + f, err = os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return fmt.Errorf("filewriter: open %s: %w", path, err) + } + fw.files[filename] = f + } + + if _, err := f.Write(data); err != nil { + return fmt.Errorf("filewriter: write: %w", err) + } + if _, err := f.Write([]byte{'\n'}); err != nil { + return fmt.Errorf("filewriter: write newline: %w", err) + } + + return nil +} + +// Close closes all open log file descriptors +func (fw *fileWriter) Close() error { + fw.mu.Lock() + defer fw.mu.Unlock() + + var firstErr error + for _, f := range fw.files { + if err := f.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} diff --git a/server/lib/events/ringbuffer.go b/server/lib/events/ringbuffer.go index e9733309..25962827 100644 --- a/server/lib/events/ringbuffer.go +++ b/server/lib/events/ringbuffer.go @@ -12,8 +12,8 @@ type ringBuffer struct { mu sync.RWMutex buf []Envelope cap uint64 - latestSeq uint64 // highest envelope.Seq published - readerWake chan struct{} // closed-and-replaced on each Publish to wake blocked readers + latestSeq uint64 // highest envelope.Seq published + readerWake chan struct{} // closed-and-replaced on each Publish to wake blocked readers } func newRingBuffer(capacity int) (*ringBuffer, error) { @@ -46,7 +46,9 @@ func (rb *ringBuffer) reset() { func (rb *ringBuffer) publish(env Envelope) { rb.mu.Lock() rb.buf[env.Seq%rb.cap] = env - rb.latestSeq = env.Seq + if env.Seq > rb.latestSeq { + rb.latestSeq = env.Seq + } old := rb.readerWake rb.readerWake = make(chan struct{}) rb.mu.Unlock() diff --git a/server/lib/events/s2storage.go b/server/lib/events/s2storage.go new file mode 100644 index 00000000..b0a9c418 --- /dev/null +++ b/server/lib/events/s2storage.go @@ -0,0 +1,134 @@ +package events + +import ( + "context" + "fmt" + "sync" + "time" + + s2 "github.com/s2-streamstore/s2-sdk-go/s2" +) + +// s2Producer bundles a Producer with its supporting session. +type s2Producer struct { + cancel context.CancelFunc + ctx context.Context + producer *s2.Producer + session *s2.AppendSession +} + +func (p *s2Producer) close() { + _ = p.producer.Close() + _ = p.session.Close() + p.cancel() +} + +// S2Storage is an EventsStorage backed by S2 append-only streams. One +// producer is lazily created per capture session and evicted on session end. +type S2Storage struct { + basin *s2.BasinClient + lingerMs int + maxRecs int + mu sync.Mutex + producers map[string]*s2Producer +} + +// NewS2Storage creates an S2Storage for the given basin. Connectivity errors +// are surfaced lazily on the first Append call, so this constructor succeeds +// even with append-only tokens that lack streams:list permission. +func NewS2Storage(basinName, token string, lingerMs, maxRecs int) (*S2Storage, error) { + client := s2.New(token, nil) + basin := client.Basin(basinName) + + return &S2Storage{ + basin: basin, + lingerMs: lingerMs, + maxRecs: maxRecs, + producers: make(map[string]*s2Producer), + }, nil +} + +// getOrCreate returns the existing producer for streamName or lazily creates +// one. Must be called with s.mu held. +func (s *S2Storage) getOrCreate(streamName string) (*s2Producer, error) { + if p, ok := s.producers[streamName]; ok { + return p, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + stream := s.basin.Stream(s2.StreamName(streamName)) + + session, err := stream.AppendSession(ctx, nil) + if err != nil { + cancel() + return nil, fmt.Errorf("s2: open append session for %q: %w", streamName, err) + } + + batcher := s2.NewBatcher(ctx, &s2.BatchingOptions{ + Linger: time.Duration(s.lingerMs) * time.Millisecond, + MaxRecords: s.maxRecs, + }) + + producer := s2.NewProducer(ctx, batcher, session) + p := &s2Producer{ + cancel: cancel, + ctx: ctx, + producer: producer, + session: session, + } + s.producers[streamName] = p + return p, nil +} + +// Append submits data to the named stream and waits for the S2 ack before +// returning. The batcher coalesces records before flushing; Append blocks +// until the batch is confirmed durable so the caller can rely on the nil +// return value as a true durability signal. +func (s *S2Storage) Append(ctx context.Context, streamName string, data []byte) error { + s.mu.Lock() + p, err := s.getOrCreate(streamName) + s.mu.Unlock() + if err != nil { + return err + } + + fut, err := p.producer.Submit(s2.AppendRecord{Body: data}) + if err != nil { + return fmt.Errorf("s2: submit to %q: %w", streamName, err) + } + + pendingAck, err := fut.Wait(ctx) + if err != nil { + return fmt.Errorf("s2: ack wait for %q: %w", streamName, err) + } + if _, err := pendingAck.Ack(ctx); err != nil { + return fmt.Errorf("s2: ack for %q: %w", streamName, err) + } + return nil +} + +// Remove drains and closes the producer for streamName, preventing unbounded +// producer accumulation on long-running servers that cycle through sessions. +func (s *S2Storage) Remove(streamName string) { + s.mu.Lock() + p, ok := s.producers[streamName] + if ok { + delete(s.producers, streamName) + } + s.mu.Unlock() + if ok { + p.close() + } +} + +// Close drains all in-flight producers and releases resources. +func (s *S2Storage) Close() error { + s.mu.Lock() + producers := s.producers + s.producers = make(map[string]*s2Producer) + s.mu.Unlock() + for _, p := range producers { + p.close() + } + return nil +} diff --git a/server/lib/oapi/oapi.go b/server/lib/oapi/oapi.go index 28ae96f3..7f220074 100644 --- a/server/lib/oapi/oapi.go +++ b/server/lib/oapi/oapi.go @@ -459,7 +459,7 @@ type CaptureSession struct { CreatedAt time.Time `json:"created_at"` Id string `json:"id"` - // Seq Process-monotonic sequence number of the last published event. Does not reset between sessions. + // Seq Monotonically increasing sequence number (last published). Resets to 0 at the start of each capture session. Seq int64 `json:"seq"` Status CaptureSessionStatus `json:"status"` } @@ -14840,184 +14840,184 @@ func (sh *strictHandler) StopRecording(w http.ResponseWriter, r *http.Request) { // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+x9+XMbN9bgv4LqnSpbOyRFX5mNp74fFFtOtLFjlWVvvknoZcDuRxKfuoEOgKZEuzx/", - "+xYegD7RvCT5yH5VqRmZ3Y3jXXh458coFlkuOHCtoqcfIwkqF1wB/uMHmryBPwtQ+lRKIc1PseAauDZ/", - "0jxPWUw1E/z4v5Tg5jcVLyGj5q+/SZhHT6P/cVyNf2yfqmM72qdPnwZRAiqWLDeDRE/NhMTNGH0aRM8E", - "n6cs/lyz++nM1Gdcg+Q0/UxT++nIBcgVSOJeHES/CP1CFDz5TOv4RWiC80XmmXvdkoKOl89Elhca5Els", - "XveIMitJEmZ+oum5FDlIzQwBzWmqoD3DCZmZoYiYk9gNRyiOp4gWBK4hLjQQZQbnmtE0XY+iQZTXxv0Y", - "uQ/Mn83RX8sEJCQkZUqbKbojj8gp/sEEJ0qLXBHBiV4CmTOpNAEDGTMh05CpbXBsAsTgK2P8zH75YBDp", - "dQ7R04hKSdcIUAl/FkxCEj39vdzD+/I9MfsvsNT3jOa6kGAIki32BLD7lsxZqkEyviC5hDlI4DGoLihj", - "qmEhpPtXc6jTFXBNqjcMGGM7/Ij8ugRORMa0hoQISSDL9XpAaJrWv6AS/CfJaMLrgAVeZAYQseBKpBAN", - "Ig76SshLs0a6MD8wwxYWUNEgStkKVgyuokFkhoyXNBpEaq00ZDUoKm02baDYAX8fnC9AKWb5Zy9Kdhsj", - "yn5PJChRyBgCUC4xuZGcGmj/NIhiCVRDMqXIZXMhM/NXlFANQ80yA6LOrlli3u38rODPLoLPpYhBqWEm", - "uNCCs9jxXQyEF9kMpOEhwxwpVZrkxSxlagkJAUMYI/JcgCJcaLNx0GQG+gqAe3AgsZVrZlx/9zhCBmGZ", - "Qfy4XLvB8gJQ3ClNddGgDllwbrZgnok8hySA6hZnsSQqRxp40FsINEAa5LyUxZevRKFgV/HWRPSs0NpS", - "UhPSOCSxTw0becomV0wvo0G53RTmOhpEki2WGqGVJMgaMxpfWnBeUZkEyT02S5/an9vTv13ngCLXvENK", - "jvKzJuLK/LPIIzdMcIKlSJPpJaxVaHsJmzOQxDw2+zPvkqRA+WMIyI5a4/4t3DqIeJFN8Ss33ZwWqUax", - "2jqyKkJlmZVREnKgujFvl9Suu7v4TxILIRPGqQZP+RZiuVDMwaw70ro70r8OGalFxteRGbqHSPOZoDJ5", - "VlMGdqdRDde6u+RnhZQo7v3gxLxHvL6xjelw0OBim2fkvjJWMb5Ioa0r1FUFqkhOpT3urXIxIm+XQP4w", - "S/mDzBmkCVGQQqwVuVqyeDnh1Sg5SCOjBoTyxKJJSKsEJ4Z27dcGCJQZPWIJfgU5lTQDDVKNJvz0msY6", - "XRPBy+f2y8ysxzOBWRDJCmVEJcmlWLHEn4qt4wJZOTMyY+uZ0RFYRqmTdLHb588lXbS/zsQKdvv6lVhB", - "++tcglJGTGz7+Ny8+DOsa9+qWIo03fbhBb5V/wz0NC6kshryxk9BP8MX61+nAPnWD81LlZrXI2U9jkvN", - "s0Zho5q8reO3AW878hSZqQ7KEjQN3DZ27jfSpwlNPdtv2qY5J97CtS7B0+ZyM3KQy/FYfc4kxFrI9WGH", - "ZyaSAFRf5/ZzkvjRiXmR3BexpimxuxwQGC1G5B9PnhyNyHN7WOBZ8I8nT1Ado9rcsKKn0f/9fTz8x/uP", - "jwaPP/0tpD/lVC+7iziZKZEaaVMtwryIGjFuvTXJ8eh/bhWZOFMImM8hBQ3nVC8Pg+OWLfiFJzjN7S/8", - "DcR49i0OW71VYFv348RcBlHDcKep9JPUdkJO0nxJeZGBZLG5kyzX+RJ4G/90+OFk+Nt4+P3w/d//Ftxs", - "d2NM5Sld73gha+5nCajM9R64iR2b2PcI4yRn15CqoK4hYS5BLaeSatg+pHubmLfNwD99IPczujbHDy/S", - "lLA5qu8JaIg1naVwFJz0iiUhgmrPhq9tXH8QtO0T6G4UbiM2e5TtUsm2WndIgCaQ0nVDDx23VZXn5hWz", - "+4ylKVMQC56o8k7kFmIUbdQ0lKZSO+o18p/QVDgtwXDXaOtNKSkkWn6mWUAdf0vlAjTRwghI/2ZnbXMh", - "cULDWhIshMxaMoPUK3O9V5kQevkfWhYwIq8zpvEbWmiRUc1io3GbPcyoggTtKDghypcU+MLtg17bfTwY", - "j8fj2r6eBDd2k1uG2cJel4ywpGxbkX6/HpD1+7pKn1MmVYk7vZSiWCyNcpnaRSwYX4zIK6PqOd2RUE1S", - "MNfohyQXjGvVsDK1l1wDSEavnUnpYd2+9LC7m40PLS4bNGzw2ibjdwrIssgoH6bsEsgP8MEAPC7kCipq", - "Rgxf0bXdCGFcaaCJAVXKOFBpr7e5SJHwnK0IZyNKQ66mOcipggVSmmUHyKfIZNPMGo3YggsJyaiSIjMh", - "UqDcmglqrze29GRPvpRg1rgCu64OBs/sKrrcsIMlo7XP5i123H+NLZeEtGXXlYMkHl6MV2Kif4HklV0e", - "edBY64Ot187ew700QbeUNlCKLiDAbq2B/YvBsVclhMImNmtx6rNerrfYLtcN5fuu7I0J1TSwBzljWlK5", - "tnsgOV2ngiYjJBI0F2611pvvLuyrRobJgpt9BdSkC9BktkbyUNaRgHSBopFq6u6kV1SRchAjxObMGksU", - "+wAkZRnTYbbTAZn8jrNra3nRNMstn8VSOD5r6uJ4vXZKg/nErs7ZkIPWwi5fhW9eFtt492KlqjjabrTo", - "u87UQb6fxeJcihVwyuPS7tNDuuBJvkNHl4yHcIurIebhiEyiS5Ac0inN2SQiTKEJVq4gwRPaIn+4AA4S", - "kYyTKWtqKPkgyaNBbRzz6FoDV476RUzTaW4txEF6z0BTT/NhCPWfwB7UIY3QOmjgPKXrK1TJDvM0ua/q", - "pqJqSBKbu2bXTJ/0GE8v8N/H/5uuqP0TB2j4ld6i8SgBsqSK0NiAzTy/Z2TLvQG5h5a0a33PmpruzaS4", - "UiDvkRWVzOjezo6U5Sk8JZOIXlFmpMUCRguhxf17S61z9fT4GOw7o1hk947+SSToQnJSe10zncL9o39O", - "ogkP3XAN64lCTxXEjUPpu86h9Mqqbm6PaM8wbMs4afO3IcHvxg1171FD2dt+8CDw3+9GDwoXvCc5mI8M", - "U7aooNpdl0f9kdeSNuZn4s4zc4mq4DOnLIUkBHVZLrprNFrRtACHSUi8CMfF3WdzQvn6yOoUCcjAei40", - "5QmVifVekrkUmZU8tY11HUI6EYXeMJgodF7oXUcrkOC7w/26BL0EWW3I8UtC3CfzIk3XgeOmRR1+ghCB", - "vGApnPG56ConTE0TJjevCpVZpgitbDvh4y8TyRS9bp3hXhq9PsPbifXHI580TrSNPruw4ctsy5q6Zkwr", - "ch9Gi9GATKJEXl3LoflvEhEhySQayquhHJr/JtHRKDQDp6F1/0AVEPPIn1VzM6WQQUjsbCLzF9gukbAP", - "MJ2tdcjXfGF0D8YJPh6RMZ5jfhnMeq63CBDco1tdY7KBp4MaDh3Q+8jpAtW8HrXUvGD1QBIvKV/0nvG7", - "kB+dzyE2/LAzHR6Ky3KqQ5G6H5Vs09TqOvmzN6cnb0+jQfTrmzP8/+enL0/xjzenv5y8Ot3u9MWng/7b", - "y0umNOItpKFLujZ760KMccvAhqWBa0+IOwWHlFIpYHd4KRa9V55ULHCudSV6a5E+XSKrXcBaUkksykPK", - "aB6jPmUAVffAyWTOevT+lysyV4dciqSIWwr7BvHWcw2sTx1CGBrwzp239I0LS+tK+F3duN5Jcrj7tm+E", - "nd22HW/ZfpbOW7T4ofvohra+hCltrjkNne/JXVv4zJr3svDd3OzlBHNl4zJ/Uq5bUAzL6m3kWZkQPYUR", - "LQ4i011H2otcD/dBJaD0dJsvDZQ2i7fudKs0bHNFDSIl420DW7vKzmO2VU0/waC2ixCEXl/W5dIed5Ef", - "zcWcxeT1z8QH3HblurjcSrVnPDHHAiivTI+2K9LiMriXc6rjpXNzHYbxPj/X837/VikoHj4e7+/tet7r", - "5RqRs7m3Kw1IocBGbizZYglKE7qiLDVXbvuJl4oSkHzcIetUk+/Gg0fjwcMngwfj9+ElIminLElhO77m", - "zgouYW5kB8YqodUNRXDKVkBWDK6MElI6OI8l4DaNahhrtoKwpJGAPqVpvJQiY2btH/tnx1fJM/cqoXMN", - "srZ/r9ZqQYCrQgJhmtCE5taOx+EKbYWN2z/SBMJyCTSZF+kAZyt/SXvIs9e9+LzXrViSzaOH492cjO1Y", - "k8NO3i0OQH/q+mPL0BSeY+j1a53FdRI16B4P7LtUAtE0z61+tdnHsOEgLYMmsm0n6iWsCQaalLGfo70O", - "2PD8L53rzIyu1tlMpDg5TjQipzReEjMFUUtRpAmZAaG1d4kq8lxIbW0h14nQQqQTfl8BkP988AD3ss5I", - "AnPGEYnqaESc7UwRxuO0SIBMojdoUZlE5tZ8sWRzbf98pmVq/zpJ3U8vnkyi0cS6z6yHhSnr/4txgTRV", - "wqwyFtnMHVnKxZzY8f6u/WUc/4Wz/f0tneGwewC0Ja0RukF5bQ2zp9cQ35p5lJrtZeiPW3MjR7goVDD+", - "Xi6abrff33eTKexIVC6KDNruzq1URdVUCtF0moW3UTh3mIUHuviJ+ZTkkq1YCgvoETtUTQsFgdt5e0iq", - "LDmYt81QvEjx9PAyvhuJa/ceuPwioPHkEZKoJaRpCXJzFhQ8eEeLrwJj/SrkpeHh6rJ6n9Yv60duRGd5", - "s5MwHtrAdp0L+GovK3+Js4+dFJNTvmJScLx4lKZvs1YFujyKHehr0Kgov2O+3s9i3Y/AfsO0RedWNryR", - "VZrWma5EWLmPLhNuvA9WSS59l8FR8JYB10xPw24Qt1ViXkFTbngEa6Sezr57HLZRffd4CNx8nhD7KpkV", - "83nQW+eN1LsOJgrdP9infuz9zKpw0v3Qd8EW5pBF6rU83KLeJsoUvt4QatHb0zevos3j1i1l7vWfz16+", - "jAbR2S9vo0H007vz7QYyN/cGIn6DquihpwmqsZScv/3XcEbjS0j6wRCLNECyv8AV0SAzZnYei7TIbArJ", - "JhfSIJLiattY5pU9gyBw1IFd6AaIXeT0qpEHl6av59HT37cFPneO7k+Dtl2LpqkwV7up1uvtp+CJe5tQ", - "kisoEjEsd3///O2/jtqC1Wr2eBCVMQ8rsCdSz3EZRtqZ0b8MpbYQZy809U2YO0IndGYPlHZmMq8dPk1X", - "HLzv4PUAeX5WMxjTmRFIlCgz2iZ+yEMhr68vSmSdPQ+LWvd8yoKhIBgCQJXhe0hqYRGhQ7a04xYFS8KC", - "mMoqE61rJ7bRH2W0iV+5+2wPU3Evq5WZYfvkQrpgE5sMZk/ZfqmUF9M8DuzvVGmWYRjFs/N3pEB7eg4y", - "Bq7pon4K2py5LcfoqT8+CZs3YLWk9my14NqmowyiDLI+Z1q1YgkKMU8yyIyOaFdf+tmiviS8Dec/Pq4f", - "SVWKnl1++CzqR2zCDswlfk41NZLsSjJrAG2RnvVjM54XAd9cQjXdSbFI6rNsjykqx32/dc830hfNclwA", - "sTLDdXdo3tDA+4ikijjEF4h7fRTtalJxW5FAK0fpPrrTxakPhiMScgnKSCjMV7YYdAEIQpKUzSFexyn4", - "QKYbYrN0rFXEYnYRVEEh7Kd72VxSx6NpWCEYNbWTaCgFqR2cKTLBDydRH8ua9fcGjdnH3pOFIIiXBb+s", - "L9jFg5RRJjsysc0JRvzfzA4xE8kajyaXZmwogXIPAO642/5zVqhtsaBevXbxmoOvNTzUp+u7HHRiF3Zu", - "+eLwGNHPEiV57hPCT/kKUpHv6wZ5i8kH9lNSaipaGJ2pFhzUSTzvj6XcCqIbpMKXC6SxFMp6FIxgQgOD", - "Yy0beDki7xRYS9RLqvQQZx6ePXf2/sK51Y0AdJzpBBJTNjfAmgz7c+e3X2BssruFSwh1NkcLZDhsas44", - "wnsXda9KxPJf9Sl7W+1mPaULmCozymrPG+kAOyun1WrdRwcuNlRyoL7OEMwvYgnA1VLoN7DYJRd6N//a", - "T9avVubFLZyxZ0MWWY/H5Vf0tOwz0I7RF3ase+bamQ9TmJtTTnK4UTzGHmMGXd4eCgMP2G0oO8RzJEtE", - "b0lobhJG8Khtpj3v641PNZ1eb3Zg/SQk+yA4JtXiXIRmouB6RGwYzgrc74pg9OyAcFjQxu8GD2ENxa5g", - "Sw7d/zErjneYPxFXPDB9kYcnv0nESZl4vbvzYhtXUG3rENSyw5tT7c8Uew+5cxhIJ2V+T6nFkgT4lrhg", - "G65S+QLdR1tjGdx7Pct+wVI4B5kxW47msPUvpCjysIERH7mQS0l+bFhp9o3tDeSyf/f48dF+qeviiof8", - "WWat+Ag9WH6973rWu0sc6NVSKLSBeNhat7X1kGLoQHJoWvmGuNx6DYY9k3JooaAepS8k2uUgNryflD6S", - "PZ0sdY8/Fl8I+Vjq+RCN4LjxVqasTx4EiFFhmjWsDruDldECPt3OFk3yUCEnaWpzxxRxwttbW1ymEXri", - "rYkTg2ivWsXB8N9c2EseU8Rc9YOFTw6pk/WpDzQv1K9Ux7daRKGscIEWISw2E072MDKNrWC76b4UhG48", - "Un6brncI5+oNTkMI3LAUw1zSDMLBV28qtd+/ZKh/nhthtgIpWQLKZyE6CBzV2eHheJsfIGgV95QasGfX", - "dHvLDLdUEAIX7Xn9jF9Y3u73PVfrqPtefQzuZuhsBEhGrzE3gX2AM/7qh/4VIA8ql1Hx6ocdMdLOz3+w", - "Y3DVhRb5TQlNyBjMONv55SzLIGFUQ7rGAo54zxaFJgtJY5gXKVHLQhsFcUTemht1hiGCaDZlHGNcpCxy", - "I5hWLAGBwAq7vPapRGI52CzoDsuQtMvz7H0JuFkRC6MiaykuQQUTz4Out3By/EFB2T5apFqHD0qvBWdT", - "MmfX5kg3OxlNeCP/WBZA7iujAFGF0dIYjn+c+BIkRyNygVH0VTTjhLvwM6LXuZkLzTqUE+ElUW2+BqTI", - "ffztP8YGLi5m/Gg04bViCFhhzUBtnUNiwH4lZDI0jJtYA62LZyp3zriWdGjeshOqCac8IZzqQhqhyDVI", - "+zg3Ko+yWal2bTb526xlA+omPJz5HSwZZ0gR4Yo1r6zBeikwZs5Wa+tJCxJToyTGsJkWz0EO4yWVNNaG", - "uda5IIwbTsBSm1TDP0nGlKaXvsyokNJmUiHMZjS+VDmNoSICMh6R1zxd23waUCEIkPuKpcB1um7AacKr", - "15A2jiyoSuE5Hj0IUr33Ce5aLu9dnlANt6HUvbAKmxakwDE9hlpVS0d3p4T9KpmGsljhYUJrM+U1PH8+", - "Jc9PeGjNQvMac3ZRzKWOnkY/Y5o8OcvoAhQ5OT+LBtEKpC0bG41HD0ZjvIHlwGnOoqfRo9F49MglpOFG", - "jn1g9vE8pQuvYsYBHfMVyAVgkDW+ackZrplC75jgoAYepa1BA6HdK0aJKnKQK6aETAZWYGCyeME1SxFy", - "5dvPYfVWiFSRSZQypYEzvphEmACWMg6G+sXMFRqYwVxIn7WMWpfLQUDGMDi0ClOCFzAdL/0sL3D/FhWg", - "9A8iWe9V2bqlOnhotvwqfksWhlqQDMHqsmh/n0TD4SUT6tLG/w6HCVNGxA4XeTGJ3h8dHrJrFxQmq+o9", - "I2ts1H5Vb/3heBywFOD6Lb4TLB1Qbs0hu51L/WkQPbYjhfi3nPG4Xd790yB6sst3zdroWCi8yDIq1+bI", - "tnRZLjGlBY+XDglm8W7N+FlFvblIWVxdvPq5olAgh750ZjUNYL0hyRQQHGpNKo239B3OaPl4ZKhqMOFb", - "2YXszy0Tvi+7PAOJJaI8FEhGOV3Y4Hdbn4MwPpdUaVnEKLuRismpL9dxAdrIBjWY8FyK6/UQawiZ+7gb", - "0e6jHN+TIV6dnj0/P/ZpfoIf4Vk6S0V8CcmEo2PKw3IrZ597NB7O3OGjIaQd7oL8EfnZJ1W4R5xmoCb8", - "vgvdd5rBMyEuGSgHx0l0hPDCsgzOtrUsR7C/jib8AoD4ohxIyVCtZLQQYpFCSdjH1uZUJh75311JFpu6", - "YAvtKxafFHr5egXyJ63zUwzTSzwMggvG+6J5Wb3LF5ImoMqv3KH6il4/E5xb7Umdgzw3dBI9ffRwEJ2L", - "vMjVSZqKK0heCPlOpgqtq92CI9H7T7cl1zytfLOirU12WO6+V8IVeSpoMiwr7Kgh5cnQv2vEnlABRecd", - "foZ1hYUkmZEg5RDkA8sJlfGSrQyHw7XGiuJ6CRkpuLmRHi9FBsdWhBxXUx9PivH4UWxYAf+CwYQr0EQa", - "GZfVZ7Bym/EDFI1Sck74Z1Q0LLxKwahOePLGwXiTTMqKVLOcSn08FzIb+lCNPp2jAmV/5lP1jlXBEY+Y", - "ERNrtqK6kcbcHD5c4OGFSA1O0X6vBclTGoMrzOLRtR/WW/aIk+FvdPhhPPx+NB2+//hg8PDJk7Cb4QPL", - "p3OWBpb4W0WQvu6hC+MpeG6Dwiv2KVd9H0ti+6ytjHI2B6XxiD6qu+dnjBtO3KbVl8tzlTJCt6yNClwN", - "u4dpcQ9CoV0lNVhSgGQQkHaWa0rmwLpeNPnScq8jgkps1oj8PlVGIKmjuhAst+ikobMLHM+8jheWeqc+", - "IY0T0arF2WkagzY9VyX+5PyMxDRNR+TEPcWT3/pDjTpTbyvjij0uRZr4WLPrOC2UIV6j/gyIEoQLItA8", - "j1GkpBQ2isSUW3tLCnQFWLtrW1+Zssa8BzxhZQK39d762vFYRWo04WjAtKln8yJFHSJeOq5KwIbCm3th", - "Fa2EUc62MoGZ7RLWtpi/A9eEe3NpTtdmFBfkRqQoeDLUkuXEqI48tsF4gJmaPGErlhQ0dcOEJG+gQ9AN", - "1MBNlocNvYgOVUZwyJ7SVF+S90pG2NA1qU7TLTZr9RHwzNZEXNVB4I7wFWhRcCCabFFn34DBs/UXxdAF", - "y4rUZt5Yrqu3WAkbRTs4suaqYyPq+9H0BmjyrGbaCkHrttDV7C4SapVWNglxU+I51eGbG0PXbNpaycuQ", - "7Y6Vrw+caBvsh2fTOHlHpB+2gB5K/mj1dGH62HmgxMJXI7B+tQZZ7xjYAV9l344wmsrwozvCULcjyM7I", - "uZX5azVkQnxmI6NWTLEZS5lel7flrwbjP7HEZbOLq3qhrCaamx1pwlofFulArQVj8LxAtaXzB6XDzWhu", - "1JenMtNKbT1cAzM9b5fTX7CVr1huFdMUqALUreq1H7fUeg9pPGXngjsizW5vngPlhhnoKzkucSlVCTKL", - "Jop4aFHMArQlmGnZMqtXSPwIulEu7i6Px3BdujDvYvKh3Wm5iduA4o+gG8W0neZhhYWfaRflo9nqKQzc", - "smzdHZF5t4nUjbRDBwWzsy9L6q98NbYGdvypWMYeVpJG7YKxRnutDXLUlbyq5sGQBJSZtdiFMvDR2smr", - "CNxa3Z4JD1XjGZEXKH/NwiQsgdt7c7fsz4AogAk3iwmX7iFUV2b0BdOjuQRIQF1qkY+EXBxfm//JpdDi", - "+PrBA/tHnlLGj+1gCcxHSyvPXXTTUnAhVT2IZZjCCqr9mhu1i12LHSgwgFM5E5rFgkiCHg9XS+qO2KHT", - "Fu1AbkCEIrV8TdqCPePrtiSkyx0IX5VJEv2i6i29hCqZ4q40xk5OyCeHo40nDsvoAo5zm8NUzbTdutk5", - "WKoFEBz0iyLU5y9SUiHIR8ZtQadr9RcWYjbbhaxcRki6NtrbsTC87bNUzG+6puPVJGlTW2zY+RoF0Zwa", - "2Eg3cf1nOEnFApNRNIsvFbnPhXapUNbEWaMgMoMlXTFD0nRNVlSu/0l0gVY6127LM7CP/5oJvaxtxbob", - "ffYL5so426VzdQ/qTT18+BJ6ehomzfvlGKgKVxMc2bgPtCLZwCdIXTK1E4V/+Dg3a8AYDl0X1V/IcGgD", - "yMbEehCsQm59CH+EJOSFTzq5I/ard388UDo68vpKbEh2MZWuYNFDtdGM99DmfNZvj3B0waN3hJdu68gb", - "GDlsQORXc2ph+2Q0avRjwXXBa0SwBEIlXFXLu1IeAlVcP7NBo9kqMXB8vXMWDN82sJFrchM0Px5/v/07", - "s66UxbcfF9CzHUMaVs4eu8DLqarazdvGlaEmHCJXoWhNPCiYVuTZ83OSCc60kIOaa9x6nFCfdR/Yci3E", - "lsJU5PH4sW37WL5Q1ZUNiXIt8laX/Ls0PTdnCuk+5a5sM3hE++PtCPxF6Bei4ElQ/GqRh2BtBl+ADuW+", - "WFjWr+AI5rJWfCvE9lDo/wj66wQ+1XAroC8NGV3I94QBWm6rOnGvbg/aodDrO5LTm6K8P7O83h3tzvR8", - "MxF9Q4JxwraPZsJXCU2lVmHGPNEkM2e5uZF6IjEqO4ZbWw1Mswx8my1DU98bmqINkkoxSsOTlu0ERtMU", - "pO32bpuZuFgdF8btP3fBTWU/WIqB34L3SONOwuddadn9qaXB2+6DLyOG5I3p8YuoDAjdMBHX9AVX9qb/", - "jnyG6TAqUCZpkYoZTWvVkpAmy2JSVSkbUta7wfsj47EEqiyBtsrf8ITMKTdfiQK9ezRNiciB+yI2cRVg", - "GrSb1UpF3ZX6G6hG9ZnFabcmUoCCbSEmGseQ+8jXssbR4eTcNLTZ8TaV0GoQW1VBLKjyvM6BWzfZqlG4", - "SMy9FaF00HdpD8u9Y5owFjlEHVXIjPzBkqfko4I/P00mPKGaPiUffT2ooQG7+X0y4X+MyEWTGkvjSKtm", - "k4FkIrCpuQQFukzIc/yl/kloqyYTrptyLIK/YqJQdcm+oimzUfJYt6msCWLLxZHn0qjpZim2uq61kC9o", - "rnyX3j9Y8ofNvHvq6z1KiIGtILHPmLJmUL2knDwgdOnyGLG6lFmoMss3rw48pK8AG+IyzHUrwe4KH5Nn", - "KcO3nC1fSxpfBkazbfM1xBrXOyIvsIdGjYdt3hEXLXjZuL5y2lL/9QgyKBA8XRMFNomp3nu0fZyVtQMV", - "BqMaEtEgFZbQ7RY+zMB1qsBKWS0B5ZoWYfdTnpCxTccNrtXbVHYlK4wEpK5Lo6WXLrXYwl7KpyGmZfsu", - "qutFvZjZjEEyZkfaSN+oAeFoUBM1bWPw+62iS8O1tkw9rHj6FmXXyz4JMMJSSmZjuLL/HF5cnA5dDNHw", - "bbAg3StIGHXpo3McFGsTOjq/35bCRw3Q+IJ9HVkdqFz4qX3+4tLdPFQRVy33wlCHo0cUjnN1bA5EDdOy", - "zD4ex0Uojg5fLAtE3FUwXXOWvU65B5vqWdh9fkVGN7tTpxBX4Pd4saaTHfDyHF+8a7zYWer9sg6O1ihR", - "Yrf42S9ctxHmgSuvt7Js480nEGxA2QsbxP91YwsLOf0FEIX4KHEkrngqaGK4a/qB5b1qobe0UPLb2bkt", - "O1LL+7CdRxBdqqyQWRVdqncPbeHfzf+cyd9Yvk01qPrJlZyDcV3miuKSUaxeaAcd+VP4zwJQHLhD2JWf", - "atJA/cDZWs7q/V73CgfXG7mCDdT9HstyJEhYdQB/i3TpkFUXIeagtoTmttxDr0onOxCspnL0QWlyX1NZ", - "S1rKfMgEar9mrKONdD3hGwib/Ka00ebnRrU0VwRsEI3FJeZUGU3WT+hM+xOeQP0n8zeVtijuB5Y7VzaN", - "lwxW2I0TdHsUZKNwvGKNqwyMvhW2Gnzs9pYqt4txPSPyE1ssQdp/lS1qicqsFc4nSZJZoYmml0BSwRcg", - "RxM+tJhQ+in5t8G2HYI8GBBX2sMgFhJy/9+PxuPhk/GYvPrhWB2ZD13pkuaHjwZkRlPKY6NKmS+PEQPk", - "/r8fPKl9axHX/PQfA49P/8mT8fB/NT7qLPPBAH8tv3g4Hj4uv+jBSI1apjhMQ62uSmf7v6ra2A5U0aD2", - "zC4Z/1Chiuf7SkXHvTcSi28db/9/Jhp1c9uleDTya+ormjix2BQNZa/qXWXC1nbgX8MJu59OWPXr7hIU", - "anm1ZuDfINn8CLrRztx3p+lgrySblCmNerrqpZuqq/phh8m3SSnVrgOkUl3fUuvq+QZpBXPYEfM2vbZL", - "G9iHu+/65jtH32HA+G1c3TBAuzJ3fIN4wh2gcRqrAmxiZgk0KS/dQV5+AzRxV+7dWBkn8yqhGf9r4WYR", - "awhbPw/QJVD0B7MbvzFiwVzK8irTsHEqsIJ+Wivp3Mvd3crad5ea11PC++CaM7WK1V8omuE2vMegu4xe", - "r8Z9jNW+1ZLlJYZt0Yl+VzJW//G1KdD9aysqCElsbZQU3IHgvIgSMuFkgM3wHPXUYvHqwa0VXyk1kp7q", - "KVXP/v4q5uYd18a5lGCulqBTaHepXz6IvEDdt0aJq09SLXXvIiUWCrdWnwSxVJYm+dZFXaBkydzpa3V2", - "8KbNjaWXKBpekN9sQ15bZYlpVdk2O0ldbfrqYw5r3bw11tiX9JN6NfNa/ajy4qzFbnxQLwl0g3o9m/jh", - "QML+jeUVWdcQ+JchclovA9Yi0Q69O+PKFoLf1zTaxxcTvp0xtptIGxbRCW+ZRPuLgDkb560xl7eqdDMW", - "ltA2vZRHyFZmGHw5pjV/5dOK7jaXY67aRaZgVQQ8OKvPbcCFZLnvnuPWhiW+sIC3IafhEN8ZVt8dbasX", - "3pIXHg93Ii5OHAz/4iKjTa49YuOqXaYrEI7qmmzcZRxqq4/H7rg9sKQwbjvYVfkdZ38WEGo+UXHllQPH", - "1nr+3bsmbpPcduXLL0RsdjN1I7UrX8YXNU0MoXX80YP8UzMrppuMUpFby0iBhgdnaXB2hxKPm2wP200N", - "gaayHlGYhPKtIwrzXxBWNgC9azxqI8kFjfaakmx81QvVF9p3d7i6xWg4bLWCV1vXOTQQid4Ig6vuwi6y", - "7KsOiPsGydS2Sm1D2RUCsmK3pFhzmd8cZISlqnYxeD6vKV+0Y/z8jH7v11XLEd89rrdxHKk3Pvnu8eO+", - "ZWK3tZ5lbWw3Z5lvlxP/hubYA60ZZaG0b/0YRbOUOTl9PGQVqpWKRSByv+WiEwvXpL1HDrcIwvWb3kS5", - "XtA4Eq+qPgebhoenmYs0FVfhyINGx91a47M2mjHMvKxlz+bErp0wRdzSNjBm/6myzzy1vYdnq16Yurjy", - "Lxff/VIsdjzKDGF9c+HcZtFY+t9MbRkkT+n6CpvVHrvirjsUHZYzpiWVa3Jefm278qMvdI55ElUvSUTN", - "tSZ0QRlX9iY+k+JKgSSy4FjYnAtOUhHTdCmUfvr9w4cPXZKgGXVJFaYCKRTV93K6gHsDcs+Ne8+WhL7n", - "hrxX9oLytUtcazOfeWxGrBaHBaR1IbltL1WvPRwynDgQVPt+Zk+Hu7jZdeb6QglbgXUYgAYrulXA/RqL", - "BFdbwGIcF7hySxEB4nQMYmUSckf/Rd81xzcT3VnVq3KGL5W4V19BHwVUNb6le+erKA4diywzUkKtebyU", - "gotC+VrQHsEqp1d8K4Yv8K07RTFO8WVx7JbQh2R8/IVLAnVxSzcg96P7A+/ml6xZVyuI6J8ZFmjafi+v", - "Rt6oEpaafFGw5CaXhYMQanbzVdbvff3zNxlfYEQJJrRioxGntvZTnATFPsBWmntjX/vLUJ3dz3/T3e0F", - "KGHLYkrO3/5rOLMNRrYTn9JUF/2mSC/y7Vufm/bu+ByzmwodYe7JNxml7BBAlN9eP+oTtoNOg2/9ZaQO", - "bucL6092CX360w9rbGhjzW/frMWtOvmIpbONdCgKvc0QVwFPFHqjRe4LyaObVA7weysLOmy3MXnoikLn", - "hUYrR8rmEK/jFP7bgXJ3DpQaVYtCtwxm0vfxP66csGHpajOHy77/d5qoXc6yveJyO93TffjlUrS/UImp", - "MrHbFypBE7aBBiRkxRIQNT9CDesuuaxXivnsszriN3rPSqeVm13WoidGBIshi8wcFc0ax4WvYO+8AuXn", - "fY4sFHphNxYdfjgZ/jYefj98//e/HSQaEWDHWf74xukEFUW6mMeGgCufDl8wjqVYhieh9ucsA6Vplhsh", - "h03sXWGhcmj78Yj8WFBJuQYbLzcD8ubFs0ePHn0/2uwBaSzlwsajHLQSF8ty6ELMUh6OH25ibGYkGUtT", - "wrgRbQsJSg1Ijl1eiJZra/vEpnayCe43oOV6eDI3D7pFA4vFwuaKYrMZ7IvKOLHdBFStJ6lcWyaoNlHG", - "sj0IxLJ9+oYTTm2BaoW8CBiiuYNESZk9PXrzB984xlY3LW5a5gNsOlD8bDbTsxNkHyh3ZDtayHKVt5Zg", - "R9O0PmwTbJ2+wIHQu7s+fJuTbK//2Mei33qhRt/boJJrI/Kap2tMMKhkXQ6SnD3HxqBY8X/BlMbepWUZ", - "0VEXyyLfhGSR3z2Oa3Mcrl416jF/qTL6vo5zCV/cyP8LAAD//9rwG2/j3gAA", + "H4sIAAAAAAAC/+x9+XMbN9bgv4LqnSpLOyRF+chsPPX9oNhyoo0dqyx7801CrwJ2P5L41A10ADQl2uX5", + "27fwAPSJ5iXJR/arSs3I7G4c78LDOz9GschywYFrFT39GElQueAK8B8/0OQN/FmA0qdSCml+igXXwLX5", + "k+Z5ymKqmeBH/6UEN7+peAEZNX/9TcIsehr9j6Nq/CP7VB3Z0T59+jSIElCxZLkZJHpqJiRuxujTIHom", + "+Cxl8eea3U9npj7jGiSn6Wea2k9HLkAuQRL34iD6RegXouDJZ1rHL0ITnC8yz9zrlhR0vHgmsrzQIE9i", + "87pHlFlJkjDzE03PpchBamYIaEZTBe0ZTsjUDEXEjMRuOEJxPEW0IHADcaGBKDM414ym6WoUDaK8Nu7H", + "yH1g/myO/lomICEhKVPaTNEdeURO8Q8mOFFa5IoITvQCyIxJpQkYyJgJmYZMbYJjEyAGXxnjZ/bL40Gk", + "VzlETyMqJV0hQCX8WTAJSfT093IP78v3xPS/wFLfM5rrQoIhSDbfEcDuWzJjqQbJ+JzkEmYggceguqCM", + "qYa5kO5fzaFOl8A1qd4wYIzt8CPy6wI4ERnTGhIiJIEs16sBoWla/4JK8J8kowmvAxZ4kRlAxIIrkUI0", + "iDjoayGvzBrp3PzADFtYQEWDKGVLWDK4jgaRGTJe0GgQqZXSkNWgqLTZtIFiB/x9cL4ApZjln50o2W2M", + "KPs9kaBEIWMIQLnE5FpyaqD90yCKJVANySVFLpsJmZm/ooRqGGqWGRB1ds0S827nZwV/dhH8SnChBWex", + "YTPCuJlPGZKx7BcD4UU2BUkOUqo0yYtpytQCksMReQMKNFLEmFCNHKQ0lch1QONFGzgGJuUGGNffPY6Q", + "W1hmqGBcbsSgfA4o+5SmumiQiiw4N/sxz0SeQxLAe4vNWBKVIw08Hiw4GvANsmHK4qtXolCwraxrYn1a", + "aG3Jqg32QgGxTw0EPZmTa6YX0aDcbgozHQ0iyeYLjdBKEuSTKY2vLDivqUyCtB+bpV/an9vTv13lgPLX", + "vENK9vKzJuLa/LPIIzdMcIKFSJPLK1ip0PYSNmMgiXls9mfeJUmBwsgQih21Jgo2sO4g4kV2iV+56Wa0", + "SDXK2Nb5ZclVzIhhD5xcQg6OQP28XVK76e7iP0kshEwYpxqhVQ5AcqGYg1l3pFV3pH/tM1KLjG8iM3QP", + "keZTQWXyrKYZbE+jGm50d8nPCilR9vvBiXmPeOVjE9PhoMHFNg/MXQWukU0ptBWHut5AFcmdFKJW0xiR", + "twsgf5il/EFmDNKEKEgh1opcL1i8mPBqlBykkVEDQnli0SSk1YgTQ7v2awMEyoxSsQC/gpxKmoEGqUYT", + "fnpDY52uiODlc/tlZtbjmcAsiGSF0mQKJJdiyRJ/RLbODmTlzMiMjQdIR2AZDU/S+XafP5d03v46E0vY", + "7utXYgntr3MJShkxsenjc/Piz7CqfatiKdJ004cX+Fb9M9CXcSGVVZfXfgr6Gb5Y/zoFyDd+aF6qdL4e", + "KetxXKqhNQob1eRtHb8NeNuRL5GZ6qAsQdPAbWPnfiN9atGlZ/t12zTnxFu40SV42lxuRg5yOR6rz5mE", + "WAu52u/wzEQSgOrr3H5OEj86MS+SAxFrmhK7ywGB0XxE/vHkyeGIPLeHBZ4F/3jyBHUzqs11K3oa/d/f", + "x8N/vP/4aPD4099CylRO9aK7iJOpEqmRNtUizIuoHuPWW5Mcjf7nRpGJM4WA+RxS0HBO9WI/OG7Ygl94", + "gtPc/cLfQIxn33y/1VtttnVZTszNEDUMd5pKP0ltJ+QkzReUFxlIFpsLymKVL4C38U+HH06Gv42H3w/f", + "//1vwc12N8ZUntLVlrez5n4WgMpc74Gb2LGJfY8wTnJ2A6kK6hoSZhLU4lJSDZuHdG8T87YZ+KcP5CCj", + "K3P88CJNCZsRLjRJQEOs6TSFw+Ck1ywJEVR7Nnxt7fqDoG2fQPejcBux2aNsl0q21bpDAjSBlK4aeui4", + "rao8N6+Y3WcsTZmCWPBEkSnoawDuF2IUbdQ08OLkqNfIf0JT4bQEw12jjTelpJBoBrrMAur4WyrnoIkW", + "RkD6NztrmwmJExrWkmAhZNaSGaRem7u+yoTQi//QsoAReZ0xjd/QQouMahYbjdvsYUoVJGhUwQlRvqTA", + "524f9Mbu43g8Ho9r+3oS3NhtbhlmCztdMsKSsm1S+v1mQFbv6yp9TplUJe70QopivjDKZWoXMWd8PiKv", + "jKrndEdzY07BXKkfklwwrlXD5NRecg0gGb1x9qWHdWPTw+5u1j60uGzQsMFrm4zfKSCLIqN8mLIrID/A", + "BwPwuJBLqKgZMXxNV3YjhHGlgSYGVCnjQKW93uYiRcJzhiOcjSgNubrMQV4qmCOlWXaA/BKZ7DKzFiQ2", + "50JCMqqkyFSIFCi3ZoLa640tPdmRLyWYNS7BrquDwTO7ii43bGHJaO2zeYsd919jyyUhbdl15SCJhxfj", + "lZjoXyB5ZZdHjhtrPd547ew93Et7dEtpA6XoHALs1hrYvxgce1lCKGxvA/NCrylztcGQuWoo3/dlfEyo", + "poE9yCnTksqV3QPJ6SoVNBkhkaDtcKPp3nx3YV81MkwW3OwroCZdgCbTlbXMWa8C0gWKRqqpu5NeU0XK", + "QYwQmzFnzWMfgKQsYzrMdjogk99xdmMtL5pmueWzWArHZ01dHK/XTmkwn9jVOYNy0FrY5avwzctiG+9e", + "rFQVR5uNFn3XmTrId7NYnEuxBE55XNp9ekgXPMl36OiK8RBucTXEPByRSXQFkkN6SXM2iQhTRAJiPMET", + "2iJ/OAcOEpGMkylraij5IMmjQW0c8+hGA1eO+kVM08tcihiUCtJ7Bpp6mg9DqP8E9qAOaYTWWwPnKV1d", + "o0q2n9vJfVU3FVVDktjcNbs2+6THeHqB/z7633RJ7Z84QMPJ9BaNRwmQBVWExgZs5vkDI1seDMgDtKTd", + "6AfW1PRgKsW1AvmALKlkRvd2dqQsT+EpmUT0mjIjLeYwmgstDh4stM7V06MjsO+MYpE9OPwnkaALyUnt", + "dc10CgeH/5xEEx664RrWE4W+VBA3DqXvOofSK6u6uT2iPcOwLeOkzd+GBL8bN9S9Rw1lb/PBg8B/vx09", + "KFzwjuRgPjJM2aKCanddHvVHXkvamJ+JO8/MJaqCz4yyFJIQ1GW56K7RaEnTAhwmIfEiHBd3wGaE8tWh", + "1SkSkIH1XGjKEyoT68okMykyK3lqG+t6h3QiCr1mMFHovNDbjlYgwXeH+3UBegGy2pDjl4S4T2ZFmq4C", + "x02LOvwEIQJ5wVI44zPRVU6YukyYXL8qVGaZIrSy7YSPv0wkl+iC6wz30uj1Gd5OrHMe+aRxoq114IUN", + "X2Zb1tQ1ZVqRAxjNRwMyiRJ5fSOH5r9JRIQkk2gor4dyaP6bRIej0Aychtb9A1VAzCN/Vs3MlEIGIbG1", + "icxfYLtEwj7A5XSlQ47nC6N7ME7w8YiM8Rzzy2DWjb1BgOAe3eoakw08HdRw6IDeR04XqOb1qKXmBasH", + "knhB+bz3jN+G/OhsBrHhh63pcF9cllPti9TdqGSTplbXyZ+9OT15exoNol/fnOH/Pz99eYp/vDn95eTV", + "6WanLz4d9N9eXjKlEW8hDV3SldlbF2KMWwY2LA1ce0LcKlKklEoBu8NLMe+98qRijnOtKtFbC/vpElnt", + "AtaSSmJeHlJG8xj1KQOougdOJnPWm+mrFZmrQy5FUsQthX2NeOu5BtanDiEMDXjnzlv6xsWodSX8tm5c", + "7yTZ333bN8LWbtuOt2w3S+cdWvzQfXRLW1/ClDbXnIbO9+S+LXxmzTtZ+G5v9nKCubJxmT8p1y0ohmX1", + "JvKsTIiewogWe5HptiPtRK77+6ASUPpyky8NlDaLt+50qzRsckUNIiXjTQNbu8rWY7ZVTT/BoLaLEIRe", + "X9Xl0g53kR/NxZzF5PXPxEffduW6uNpItWc8MccCKK9MjzYr0uIquJdzquOFc3Pth/E+P9fzfv9WKSge", + "Ph7v7u163uvlGpGzmbcrDUihwEZuLNh8AUoTuqQsNVdu+4mXihKQfNwh61ST78aDR+PBwyeD4/H78BIR", + "tJcsSWEzvmbOCi5hZmQHxiqh1Q1FcMqWQJYMro0SUjo4jyTgNo1qGGu2hLCkkYA+pct4IUXGzNo/9s9u", + "4/aeuVcJnWmQtf17tVYLAlwVEgjThCY0t3Y8DtdoK2zc/pEmEJYLoMmsSAc4W/lL2kOeve7F571uxZJs", + "Hj0cb+dkbMea7HfybnAA+lPXH1uGpvAcQ69f6yyuk6hB93hg36USiKZ5bvWr9T6GNQdpGTSRbTpRr2BF", + "MNCkjAAd7XTAhud/6VxnZnS1yqYixclxohE5pfGCmCmIWogiTcgUCK29S1SR50Jqawu5SYQWIp3wAwVA", + "/vP4GPeyykgCM8YRiepwRJztTBHG47RIgEyiN2hRmUTm1nyxYDNt/3ymZWr/OkndTy+eTKLRxLrPrIeF", + "Kev/i3GBNFXCrDIW2dQdWcrFnNjx/q79ZRz/hbP9/S2d4rA7ALQlrRG6QXltDbOnNxDfmXmUmu1l6I9b", + "cSNHuChUMBhfzptut9/fdzMr7EhUzosM2u7OjVRF1aUUouk0C2+jcO4wCw908RPzKcklW7IU5tAjdqi6", + "LBQEbuftIamy5GDeNkPxIsXTw8v4biSu3Xvg8ouAxpNHSKIWkKYlyM1ZUPDgHS2+Doz1q5BXhoery+oB", + "rV/WD92IzvJmJ2E8tIHNOhfw5U5W/hJnHzv5Jqd8yaTgePEoTd9mrQp0eRQ70NegUVF+x3y9m8W6H4H9", + "hmmLzo1seCurNK0zXYmwch9dJlx7H6wyXvoug6PgLQNumL4Mu0HcVol5BU254RGskfpy+t3jsI3qu8dD", + "4ObzhNhXybSYzYLeOm+k3nYwUej+wT71Y+9nVoWT7oa+CzY3hyxSr+XhFvU2Uabw9YZQi96evnkVrR+3", + "bilzr/989vJlNIjOfnkbDaKf3p1vNpC5udcQ8RtURfc9TVCNpeT87b+GUxpfQdIPhlikAZL9Ba6JBpkx", + "s/NYpEXG1abYhUEkxfWmscwrOwZB4KgDu9A1ELvI6XUjKS5NX8+ip79vCnzuHN2fBm27Fk1TYa52l1qv", + "Np+CJ+5tQkmuoEjEsNz9wfnbfx22BavV7PEgKmMelmBPpJ7jMoy0M6N/GUptIc5eaOqbMHeETujMDijt", + "zGRe23+arjh438HrHvL8rGYwplMjkChRZrR1/JCHQl5fX5TIOnseFrXu+SULhoJgCABVhu8hqYVFhA7Z", + "0o5bFCwJC2Iqq7S0rp3YRn+U0SZ+5e6zHUzFvaxWZobtkhjpgk1sMpg9ZfulUl5c5nFgf6dKswzDKJ6d", + "vyMF2tNzkDFwTef1U9Bmzm04Rk/98UnYrAGrBbVnqwXXJh1lEGWQ9TnTqhVLUIh5kkFmdES7+tLPFvUl", + "4a05//Fx/UiqUvTs8sNnUT9iE7ZnYvFzqqmRZNeSWQNoi/SsH5vxvAj45hKq6VaKRVKfZXNMUTnu+417", + "vpW+aJbjAoiVGa67Q/OGBt5HJFXEIb5A3OujaFuTituKBFo5SnfRnS5OfTAckZBLUEZCYfKyxaALQBCS", + "pGwG8SpOwQcy3RKbpWOtIhazi6AKCmE/3cvmkjoeTcMKwaiprURDKUjt4EyRCX44ifpY1qy/N2jMPvae", + "LARBvCj4VX3BLh6kjDLZkoltfjDi/3Z2iKlIVng0uZRjQwmUewBwx932n9NCbYoF9eq1i9ccfK3hoT53", + "3yWkE7uwc8sX+8eIfpYoyXOfHH7Kl5CKfFc3yFtMPrCfklJT0cLoTLXgoCoJfWMs5UYQ9ebFO3E2zHx+", + "fCchvlwgjaVQ1qNgBBMaGBxr2cDLEXmnwFqiXlKlhzjz8Oy5s/cXzq1uBKDjTCeQmLK5AdZk2J87v/kC", + "Y5PdLVxCqLM5WiDDYVMzxhHe26h7VSKW/6pP2dtoN+upY8BUmVFWe95IB9haOa1W6z7ac7GhkgP1dYZg", + "fhFLAK4WQr+B+Ta50Nv5136yfrUyL27ujD1rssh6PC6/oqdll4G2jL6wYz0w1858mMLMnHKSw63iMXYY", + "M+jy9lAYeMBuQtk+niNZInpDQnOTMIJHbTPteVdvfKrp5c16B9ZPQrIPgmNSLc5FaCYKrkfEhuEswf2u", + "CEbPDgiHOW38bvAQ1lDsCjbk0P0fs+J4i/kTcc0D0xd5ePLbRJyUidfbOy82cQXVtg5BLTu8OdXuTLHz", + "kFuHgXRS5neUWixJgG+IC7bhKpUv0H20MZbBvdez7BcshXOQGUPNSu23/rkURR42MOIjF3IpyY8NK82u", + "sb2BXPbvHj8+3C11XVzzkD/LrBUfoQfLr/ddz3q3iQO9XgiFNhAPW+u2th5SDB1I9k0rXxOXW6/BsGNS", + "Di0U1KP0hUS7HMSG95PSR7Kjk6Xu8cfiCyEfSz0fohEcN97IlPXJgwAxKkyzoNV+d7AyWsCn29miSR4q", + "5CRNbe6YIk54e2uLyzRCT7w1cWIQ7XWrUhj+mwt7yWOKmKt+sPDJPkWzPvWB5oX6ler4TosolBUu0CKE", + "xWbCyR5GprElbDbdl4LQjUfKb9PVFuFcvcFpCIFblmKYSZpBOPjqTaX2+5cM9c9yI8yWICVLQPksRAeB", + "wzo7PBxv8gMEreKeUgP27Jpu7wqQ3U1BCFy05/UzfmF5u9/3XK2j7nv1MbjrobMWIBm9wdwE9gHO+Ksf", + "+leAPKhcRsWrH7bESDs//3jL4KoLLfLbEpqQMZhxNvPLWZZBwqiGdIXVHPGeLQpN5pLGMCtSohaFNgri", + "iLw1N+oMQwTRbMo4xrhIWeRGMC1ZAgKBFXZ57VKJxHKwWdA9liFpl+fZ+RJwuyIWRkXWUlyBCiaeB11v", + "4eT4vYKyfbRItQ4flF4LzqZkxm7MkW52MprwRv6xLIAcKKMAUYXR0hiOf5T4EiSHI3KBUfRVNOOEu/Az", + "ole5mQvNOpQT4SVRbb4GpMgB/vYfYwMXFzN+OJrwWjEErLBmoLbKITFgvxYyGRrGTayB1sUzlTtnXEs6", + "NG/ZCdWEU54QTnUhjVDkGqR9nBuVR9msVLs2m/xt1rIGdRMezvwOlowzpIhwxZpX1mC9EBgzZ6u19aQF", + "iUujJMawnhbPQQ7jBZU01oa5VrkgjBtOwLqbVMM/ScaUple+5qiQ0mZSIcymNL5SOY2hIgIyHpHXPF3Z", + "fBpQIQiQA8VS4DpdNeA04dVrSBuHFlSl8ByPjoNU732C25bLe5cnVMNdKHUvrMKmBSlwTI+hQJXOe1LC", + "fpVMQ1mscD+htZ7yGp4/n5LnJ9y3ZqF5jTm7KOZSR0+jnzFNnpxldA6KnJyfRYNoCdLWkI3Go+PRGG9g", + "OXCas+hp9Gg0Hj1yCWm4kSMfmH00S+ncq5hxQMd8BXIOGGSNb1pyhhum0DsmOKiBR2lr0EBo95JRoooc", + "5JIpIZOBFRiYLF5wzVKEXPn2c1i+FSJVZBKlTGngjM8nESaApYyDoX4xdYUGpjAT0mcto9blchCQMQwO", + "rcKU4AVMxws/ywvcv0UFKP2DSFY7lbluqQ4emi2/it+ShaEWJEOwuiza3yfRcHjFhLqy8b/DYcKUEbHD", + "eV5MoveH+4fs2gWFyap6z8gaG7VfFV9/OB4HLAW4fovvBEsHlFtzyG7nUn8aRI/tSCH+LWc8atd6/zSI", + "nmzzXbNQOlYNL7KMypU5si1dlktMacHjhUOCWbxbM35WUW8uUhZXF69+rigUyKEvnVlNA1hvSDIFBIda", + "kUrjLX2HU1o+HhmqGkz4RnYhu3PLhO/KLs9AYokoDwWSUU7nNvjd1ucgjM8kVVoWMcpupGJy6st1XIA2", + "skENJjyX4mY1xBpC5j7uRrT7KMf3ZIhXp2fPz498mp/gh3iWTlMRX0Ey4eiY8rDcyNnnHo37M3f4aAhp", + "h9sgf0R+9kkV7hGnGagJP3Ch+04zeCbEFQPl4DiJDhFeWJbB2bYW5Qj219GEXwAQX5QDKRmqlYzmQsxT", + "KAn7yNqcysQj/7sryWJTF2zVfcXik0IvXi9B/qR1fopheomHQXDBeF80L6t3+VzSBFT5lTtUX9GbZ4Jz", + "qz2pc5Dnhk6ip48eDqJzkRe5OklTcQ3JCyHfyVShdbVbcCR6/+mu5JqnlW9WtLXJDmvf90q4Ik8FTYZl", + "hR01pDwZ+neN2BMqoOi8w8+wrrCQJDMSpByCfGA5oTJesKXhcLjRWFFcLyAjBTc30qOFyODIipCjauqj", + "STEeP4oNK+BfMJhwBZpII+Oy+gxWbjO+h6JRSs4J/4yKhoVXKRjVCU/eOBivk0lZkWqWU6mPZkJmQx+q", + "0adzVKDsz3yq3rEqOOIRM2JizZZUN9KYm8OHCzy8EKnBKdrvtSB5SmNwhVk8unbDessecTL8jQ4/jIff", + "jy6H7z8eDx4+eRJ2M3xg+eWMpYEl/lYRpK976MJ4Cp7boPCKfcpVH2BJbJ+1lVHOZqA0HtGHdff8lHHD", + "iZu0+nJ5rlJG6Ja1VoGrYXc/Le44FNpVUoMlBUgGAWlnuaZkDqzrRZMvLfc6IqjEZo3ID6gyAkkd1oVg", + "uUUnDZ1d4Gjqdbyw1Dv1CWmciFYtzk4HGbTpuSrxJ+dnJKZpOiIn7ime/NYfatSZeo8ZV+xxIdLEx5rd", + "xGmhDPEa9WdAlCBcEIHmeYwiJaWwUSSm3NpbUqBLwNpdm5rMlDXmPeAJKxO4rffW147HKlKjCUcDpk09", + "mxUp6hDxwnFVAjYU3twLq2gljHK2lQnMbFewssX8Hbgm3JtLc7oyo7ggNyJFwZOhliwnRnXksQ3Gw74e", + "ZpVLlhQ0dcOEJG+gXdAt1MB1loc1jYn2VUZwyJ7SVF+S90pGWNNCqU7TLTZr9RHwzNZEXNVB4J7wFWhR", + "sCeabFFn34DBs/UXxdAFy4rUZt5Yrqu3WAkbRTs4suaqIyPq+9H0BmjyrGbaCkHrrtDV7C4S6ptWNglx", + "U+I51eGbW0PXbNpaycuQ7Y6Vrw+caBvsh2fTOHlPpB+2gO5L/mj1dGH62HmgxMJXI7B+tQZZ7xjYAl9l", + "344wmsrwo3vCULcjyNbIuZP5azVkQnxmI6OWTLEpS5lelbflrwbjP7HEZbOL63qhrCaamx1pwlofFulA", + "rQVj8LxAtaXzB6XDzWhu1JenMtNKbT1cAzM9b5fTn7Olr1huFdMUqALUreq1HzfUeg9pPGXngnsizW5v", + "nj3lhhnoKzkucSlVCTKLJop4aFHMHLQlmMuyZVavkPgRdKNc3H0ej+G6dGHexeRDu9NyE3cBxR9BN4pp", + "O83DCgs/0zbKR7PVUxi4Zdm6eyLzbhOpW2mHDgpmZ1+W1F/5amwN7PhTsYw9rCSN2gZjjfZaa+SoK3lV", + "zYMhCSgza7ELZeCjtZNXEbi1uj0THqrGMyIvUP6ahUlYALf35m7ZnwFRABNuFhMu3UOorszoc6ZHMwmQ", + "gLrSIh8JOT+6Mf+TS6HF0c3xsf0jTynjR3awBGajhZXnLrppIbiQqh7EMkxhCdV+zY3axa7FDhQYwKmc", + "Cc1iQSRBj4erJXVP7NBpi7YnNyBCkVq+Jm3BnvF1WxLS5RaEr8okiX5R9ZZeQZVMcV8aYycn5JPD0doT", + "h2V0Dke5zWGqZtps3ewcLNUCCA76RRHq8xcpqRDkI+M2oNO1+gsLMZvtQpYuIyRdGe3tSBje9lkq5jdd", + "0/FqkrSpLTbsfI2CaE4NbKSbuP4znKRijskomsVXihxwoV0qlDVx1iiITGFBl8yQNF2RJZWrfxJdoJXO", + "tdvyDOzjv6ZCL2pbse5Gn/2CuTLOdulc3YN6Uw8fvoSenoZJ86AcA1XhaoJDG/eBViQb+ASpS6Z2ovAP", + "H+dmDRjDoeui+gsZDm0A2ZhYD4JVyK0P4Y+QhLzwSSf3xH717o97SkdHXl+JDckuptIVLHqoNprxDtqc", + "z/rtEY4uePSe8NJtHXkLI4cNiPxqTi1sn4xGjX4suC54jQiWQKiEq2p5X8pDoIrrZzZoNFslBo6vd86C", + "4dsGNnJNboPmx+PvN39n1pWy+O7jAnq2Y0jDytkjF3h5qare87ZxZagJh8hVKFoTDwqmFXn2/JxkgjMt", + "5KDmGrceJ9Rn3Qe2XAuxpTAVeTx+bNs+li9UdWVDolyLvNUy/z5Nz82ZQrpPuSvbDB7R/ngzAn8R+oUo", + "eBIUv1rkIVibweegQ7kvFpb1KziCuawV3wqx3Rf6P4L+OoFPNdwJ6EtDRhfyPWGAltuqTtzLu4N2KPT6", + "nuT0uijvzyyvt0e7Mz3fTkTfkmCcsO2jmfBVQlOpVZgxTzTJzFlubqSeSIzKjuHWVgPTLAPfZsvQ1PeG", + "pmiDpFKM0vCkZTuB0TQFabu922YmLlbHhXH7z11wU9kPlmLgt+A90riT8HlfWnZ/amnwtnv8ZcSQvDU9", + "fhGVAaEbJuKavuDK3vTfkc8wHUYFyiTNUzGlaa1aEtJkWUyqKmVDyno3eH9kPJZAlSXQVvkbnpAZ5eYr", + "UaB3j6YpETlwX8QmrgJMg3azWqmo+1J/A9WoPrM47dZEClCwLcRE4xhyH/la1jjan5ybhjY73roSWg1i", + "qyqIBVWe1zlw6yZbNgoXiZm3IpQO+i7tYbl3TBPGIoeoowqZkT9Y8pR8VPDnp8mEJ1TTp+Sjrwc1NGA3", + "v08m/I8RuWhSY2kcadVsMpBMBDY1l6BAlwl5jr/UPwlt1WTCdVOORfCXTBSqLtmXNGU2Sh7rNpU1QWy5", + "OPJcGjXdLMVW17UW8jnNle/S+wdL/rCZd099vUcJMbAlJPYZU9YMqheUk2NCFy6PEatLmYUqs3zz6sBD", + "+hqwIS7DXLcS7K7wMXmWMnzL2fK1pPFVYDTbNl9DrHG9I/ICe2jUeNjmHXHRgpeN6yunLfVfjyCDAsHT", + "FVFgk5jqvUfbx1lZO1BhMKohEQ1SYQndbuHDDFynCqyU1RJQrmkRdj/lCRnbdNzgWr1NZVuywkhA6ro0", + "WnrpUost7KV8GmJatu+iul7Ui5nNGCRjdqSN9I0aEI4GNVHTNga/3yi6NNxoy9TDiqfvUHa97JMAIyyl", + "ZDaGK/vP4cXF6dDFEA3fBgvSvYKEUZc+OsNBsTaho/ODthQ+bIDGF+zryOpA5cJP7fMXl+7moYq4arkX", + "hjocPaJwnKkjcyBquCzL7ONxXITi6PDFskDEfQXTNWfZ6ZQ7XlfPwu7zKzK62Z06hbgCv8eLNZ1sgZfn", + "+OJ948XOUu+XtXe0RokSu8XPfuG6izAPXHm9lWUbbz6BYA3KXtgg/q8bW1jI6S+AKMRHiSNxzVNBE8Nd", + "lx9Y3qsWeksLJb+dnduyI7W8D9t5BNGlygqZVdGlevfQFv7d/M+Z/I3lm1SDqp9cyTkY12WuKC4ZxeqF", + "dtCRP4X/LADFgTuEXfmpJg3UD5yN5aze73SvcHC9lSvYQN3vsSxHgoRVB/C3SJcOWXURYg5qS2huyz30", + "qnSyBcFqKkcflCYHmspa0lLmQyZQ+zVjHa6l6wlfQ9jkN6WNNj8zqqW5ImCDaCwuMaPKaLJ+Qmfan/AE", + "6j+Zv6m0RXE/sNy5smm8YLDEbpyg26MgG4XjFWtcZWD0rbDV4GO3t1S5XYzrGZGf2HwB0v6rbFFLVGat", + "cD5JkkwLTTS9ApIKPgc5mvChxYTST8m/DbbtEOR4QFxpD4NYSMjBvx+Nx8Mn4zF59cOROjQfutIlzQ8f", + "DciUppTHRpUyXx4hBsjBv4+f1L61iGt++o+Bx6f/5Ml4+L8aH3WWeTzAX8svHo6Hj8svejBSo5ZLHKah", + "Vlels/1fVW1sB6poUHtml4x/qFDF812louPeW4nFt463/z8Tjbq57VI8Gvl16SuaOLHYFA1lr+ptZcLG", + "duBfwwm7m05Y9evuEhRqebVm4N8g2fwIutHO3Hen6WCvJJuUKY16uuqlm6qr+n6HybdJKdWuA6RSXd9S", + "6+r5BmkFc9gR8za9tksb2Ie77/rmO0ffY8D4XVzdMEC7Mnd8g3jCHaBxGqsCrGNmCTQpL91BXn4DNHFX", + "7u1YGSfzKqEZ/2vhZhFrCFs/99AlUPQHsxu/MWLBXMryKtOwcSqwgv6yVtK5l7u7lbXvLzWvp4T33jVn", + "ahWrv1A0w114j0F3Gb1ejfsIq32rBctLDNuiE/2uZKz+42tToPvXVlQQktjaKCm4A8F5ESVkwskAm+E5", + "6qnF4tWDOyu+UmokPdVTqp79/VXMzTuujXMpwVwtQafQblO/fBB5gbprjRJXn6Ra6s5FSiwU7qw+CWKp", + "LE3yrYu6QMmSmdPX6uzgTZtrSy9RNLwgv9mGvLbKEtOqsm12krra9NXHHNa6eWessSvpJ/Vq5rX6UeXF", + "WYvt+KBeEugW9XrW8cOehP0byyuyriHwL0PktF4GrEWiHXp3xpUNBL+rabSPLyZ8M2NsNpE2LKIT3jKJ", + "9hcBczbOO2Mub1XpZiwsoG16KY+Qjcww+HJMa/7KLyu6W1+OuWoXmYJVEfDgrD63AReS5b57jlsblvjC", + "At6GnIZDfGdYfXe4qV54S154PNyLuDhxMPyLi4w2ufaIjet2ma5AOKprsnGfcaitPh7b43bPksK47WBX", + "5Xec/VlAqPlExZXXDhwb6/l375q4TXLXlS+/ELHZzdSN1K58GZ/XNDGE1tFHD/JPzayYbjJKRW4tIwUa", + "HpylwdkdSjyusz1sNjUEmsp6RGESyreOKMx/QVjZAPSu8aiNJBc02mtKsvFVL1RfaN/94eoOo+Gw1Qpe", + "bV3n0EAkeiMMrroLu8iyrzog7hskU9sqtQ1lVwjIit2SYs1lfn2QEZaq2sbg+bymfNGO8fMz+r1fVy1H", + "fPe43sZxpN745LvHj/uWid3Wepa1tt2cZb5tTvxbmmP3tGaUhdK+9WMUzVLm5PTxkFWoVirmgcj9lotO", + "zF2T9h453CII1296HeV6QeNIvKr6HGwaHp5mJtJUXIcjDxodd2uNz9poxjDzspY9mxG7dsIUcUtbw5j9", + "p8ou89T2Hp6teuHSxZV/ufjul2K+5VFmCOubC+c2i8bS/2ZqyyB5SlfX2Kz2yBV33aLosJwyLalckfPy", + "a9uVH32hM8yTqHpJImpuNKFzyriyN/GpFNcKJJEFx8LmXHCSipimC6H00+8fPnzokgTNqAuqMBVIoah+", + "kNM5PBiQB27cB7Yk9AM35IOyF5SvXeJam/nMYzNitTgsIK0LyW17qXrt4ZDhxIGg2vczezrcx82uM9cX", + "StgKrMMANFjRrQLu11gkuNoCFuO4wJVbiggQp2MQK5OQO/ov+q45vpno3qpelTN8qcS9+gr6KKCq8S3d", + "O19FcehYZJmREmrF44UUXBTK14L2CFY5veYbMXyBb90rinGKL4tjt4Q+JOPjL1wSqItbuga5H90feDe/", + "Ys26WkFE/8ywQNPme3k18lqVsNTki4Ilt7ks7IVQs5uvsn7v65+/yfgCI0owoRUbjTi1tZ/iJCj2ATbS", + "3Bv72l+G6ux+/pvu7i5ACVsWU3L+9l/DqW0wspn4lKa66DdFepFv3/rctHfP55jdVOgIc0++yShlhwCi", + "/Pb6UZ+wLXQafOsvI3VwO19Yf7JL6NOfflhhQxtrfvtmLW7VyUcsna2lQ1HoTYa4Cnii0Gstcl9IHt2m", + "coDfW1nQYbONyUNXFDovNFo5UjaDeBWn8N8OlPtzoNSoWhS6ZTCTvo//UeWEDUtXmzlc9v2/10TtcpbN", + "FZfb6Z7uwy+Xov2FSkyVid2+UAmasA00ICFLloCo+RFqWHfJZb1SzGef1RG/1ntWOq3c7LIWPTEiWAxZ", + "ZOaoaNY4LnwFe+cVKD/vc2Sh0Au7sejww8nwt/Hw++H7v/9tL9GIADvK8se3TieoKNLFPDYEXPl0+IJx", + "LMUyPAm1P2cZKE2z3Ag5bGLvCguVQ9uPR+THgkrKNdh4uSmQNy+ePXr06PvReg9IYykXNh5lr5W4WJZ9", + "F2KW8nD8cB1jMyPJWJoSxo1om0tQakBy7PJCtFxZ2yc2tZNNcL8BLVfDk5l50C0aWMznNlcUm81gX1TG", + "ie0moGo9SeXKMkG1iTKW7TgQy/bpG044tQWqFfIiYIjmFhIlZfb06M0ffOMYW922uGmZD7DuQPGz2UzP", + "TpB9oNyR7Wghy1XeWYIdTdP6sE2wdfoCB0Lv7vvwbU6yuf5jH4t+64UafW+DSq6NyGuerjDBoJJ1OUhy", + "9hwbg2LF/zlTGnuXlmVER10si3wdkkV+/ziuzbG/etWox/ylyuj7Os4lfHEj/y8AAP//9FbNPPDeAAA=", } // GetSwagger returns the content of the embedded swagger specification file diff --git a/server/openapi.yaml b/server/openapi.yaml index 112334b9..44c262c7 100644 --- a/server/openapi.yaml +++ b/server/openapi.yaml @@ -1474,8 +1474,8 @@ components: type: integer format: int64 description: >- - Process-monotonic sequence number of the last published event. - Does not reset between sessions. + Monotonically increasing sequence number (last published). + Resets to 0 at the start of each capture session. minimum: 0 created_at: type: string