Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added .coverage
Binary file not shown.
31 changes: 31 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
repos:
- repo: https://github.com/psf/black
rev: 23.11.0
hooks:
- id: black
language_version: python3.10

- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files

- repo: local
hooks:
- id: mypy
name: mypy
entry: mypy
language: system
types: [python]
args: ["--strict", "shellscope/backend"]
pass_filenames: false

- id: flutter-analyze
name: flutter analyze
entry: flutter analyze
language: system
types: [dart]
pass_filenames: false
484 changes: 275 additions & 209 deletions index.html

Large diffs are not rendered by default.

28 changes: 28 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
[tool.black]
line-length = 88
target-version = ['py310']
include = '\.pyi?$'

[tool.mypy]
python_version = "3.10"
warn_return_any = true
warn_unused_configs = true
disallow_untyped_defs = true
disallow_incomplete_defs = true
check_untyped_defs = true
disallow_untyped_decorators = true
no_implicit_optional = true
warn_redundant_casts = true
warn_unused_ignores = true
warn_no_return = true
warn_unreachable = true

[tool.pytest.ini_options]
minversion = "6.0"
addopts = "-ra -q --cov=shellscope/backend --cov-report=term-missing"
testpaths = [
"shellscope/backend/tests",
]
python_files = "test_*.py"
python_classes = "Test*"
python_functions = "test_*"
8 changes: 8 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
wmi==1.5.1
pywin32==306
pytest==7.4.3
mypy==1.8.0
black==23.11.0
flake8==6.1.0
coverage==7.3.2
pytest-cov==4.1.0
235 changes: 146 additions & 89 deletions shellscope/backend/db.py
Original file line number Diff line number Diff line change
@@ -1,124 +1,181 @@
import sqlite3
import os
import sys
import time
import threading
from datetime import datetime, timedelta
from typing import Any, List, Optional

class DatabaseHandler:
def __init__(self, db_name: str = "shellscope.db"):
def __init__(self, db_name: str = "shellscope.db") -> None:
self.db_path = self._get_db_path(db_name)
self.conn: Optional[sqlite3.Connection] = None
self.lock = threading.Lock()
self.setup()

def _get_db_path(self, db_name: str) -> str:
if getattr(sys, 'frozen', False):
if db_name == ":memory:":
return ":memory:"
if getattr(sys, "frozen", False):
base_path = os.path.dirname(sys.executable)
else:
base_path = os.path.dirname(os.path.abspath(__file__))

return os.path.join(base_path, db_name)

def _get_connection(self) -> sqlite3.Connection:
"""Returns a persistent connection or creates one if closed."""
if self.conn is None:
try:
self.conn = sqlite3.connect(self.db_path, check_same_thread=False)
Copy link

Copilot AI Feb 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using check_same_thread=False with a persistent connection requires careful thread safety management. While SQLite with WAL mode supports concurrent reads, writes must still be serialized. The current implementation uses 'with conn:' blocks which provide transaction safety within a single thread, but if multiple threads call database methods simultaneously, race conditions could occur. Consider adding a threading.Lock to serialize access to the persistent connection, or document that this class should only be used from a single thread.

Copilot uses AI. Check for mistakes.
# Optimize: WAL mode is crucial for concurrency with the UI reader
self.conn.execute("PRAGMA journal_mode=WAL;")
# synchronous=NORMAL is faster and safe enough for WAL
self.conn.execute("PRAGMA synchronous=NORMAL;")
except sqlite3.Error as e:
sys.stderr.write(f"DB CONNECT ERROR: {e}\n")
raise
return self.conn

def setup(self) -> None:
"""Initialize DB with Lifecycle columns"""
try:
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()

cursor.execute("PRAGMA journal_mode=WAL;")

# Check if table exists to see if we need to migrate (drop/recreate for dev)
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='logs';")
table_exists = cursor.fetchone()
"""Initialize DB with Lifecycle columns."""
with self.lock:
try:
conn = self._get_connection()
cursor = conn.cursor()

# Check for migration
cursor.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='logs';"
)
table_exists = cursor.fetchone()

# For simplicity in this dev phase, if we are changing schema, we might need to recreate.
# But let's check columns or just try to create with IF NOT EXISTS and hope for best or ALTER.
# Given the prompt instruction: "drop the table if it exists or create a new one"
# We will DROP to ensure schema match.
# WARNING: This wipes history on update. Acceptable for this "dev -> prod" transition step.

# Simple migration flag/check: check for 'duration' column.
needs_migration = False
if table_exists:
cursor.execute("PRAGMA table_info(logs)")
columns = [info[1] for info in cursor.fetchall()]
if 'duration' not in columns:
needs_migration = True

if needs_migration:
sys.stderr.write("MIGRATION: Dropping old table to update schema.\n")
cursor.execute("DROP TABLE logs")
needs_migration = False
if table_exists:
cursor.execute("PRAGMA table_info(logs)")
columns = [info[1] for info in cursor.fetchall()]
if "duration" not in columns:
needs_migration = True

cursor.execute("""
CREATE TABLE IF NOT EXISTS logs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
pid INTEGER,
date TEXT,
time TEXT,
child TEXT,
parent TEXT,
args TEXT,
suspicious INTEGER,
status TEXT,
start_time_epoch REAL,
end_time TEXT,
duration REAL,
is_running INTEGER DEFAULT 1
if needs_migration:
sys.stderr.write("MIGRATION: Dropping old table to update schema.\n")
cursor.execute("DROP TABLE logs")

cursor.execute(
"""
CREATE TABLE IF NOT EXISTS logs (
id INTEGER PRIMARY KEY AUTOINCREMENT,
pid INTEGER,
date TEXT,
time TEXT,
child TEXT,
parent TEXT,
args TEXT,
suspicious INTEGER,
status TEXT,
start_time_epoch REAL,
end_time TEXT,
duration REAL,
is_running INTEGER DEFAULT 1
)
"""
)
""")
conn.commit()
conn.close()
except sqlite3.Error as e:
sys.stderr.write(f"DB SETUP ERROR: {e}\n")
conn.commit()
# Do not close the persistent connection here
except sqlite3.Error as e:
sys.stderr.write(f"DB SETUP ERROR: {e}\n")

def insert_log(self, log_obj) -> None:
def insert_log(self, log_obj: Any) -> None:
"""Inserts a single log entry."""
try:
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cursor.execute("""
INSERT INTO logs (pid, date, time, child, parent, args, suspicious, status, start_time_epoch, is_running)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", log_obj.to_tuple())
conn.commit()
conn.close()
with self.lock:
conn = self._get_connection()
with conn:
conn.execute(
"""
INSERT INTO logs (pid, date, time, child, parent, args, suspicious, status, start_time_epoch, is_running)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
log_obj.to_tuple(),
)
except Exception as e:
sys.stderr.write(f"DB INSERT ERROR: {e}\n")

def insert_logs_batch(self, log_objs: List[Any]) -> None:
"""Inserts multiple log entries in a single transaction."""
if not log_objs:
return
try:
data = [log.to_tuple() for log in log_objs]
with self.lock:
conn = self._get_connection()
with conn:
conn.executemany(
"""
INSERT INTO logs (pid, date, time, child, parent, args, suspicious, status, start_time_epoch, is_running)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
data,
)
except Exception as e:
sys.stderr.write(f"DB BATCH INSERT ERROR: {e}\n")

def get_process_start_time(self, pid: int) -> float:
"""Retrieves the start time epoch for a running process."""
try:
with self.lock:
conn = self._get_connection()
# Optimization: Use indexed query (pid, is_running)
# We assume is_running=1 for active processes.
cursor = conn.execute(
"SELECT start_time_epoch FROM logs WHERE pid = ? AND is_running = 1 ORDER BY id DESC LIMIT 1",
(pid,),
)
row = cursor.fetchone()
if row:
return float(row[0])
except Exception as e:
sys.stderr.write(f"DB GET START TIME ERROR: {e}\n")
return 0.0

def update_log_duration(self, pid: int, end_time_str: str, duration: float) -> None:
"""Updates a process entry when it stops."""
try:
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()

# Update the most recent running entry for this PID
# We use is_running=1 to target the active session.
# If PID reuse happens very fast, we assume the latest one.
# We order by id DESC to get the latest.

cursor.execute("""
UPDATE logs
SET is_running = 0, end_time = ?, duration = ?
WHERE pid = ? AND is_running = 1
""", (end_time_str, duration, pid))

if cursor.rowcount == 0:
# This might happen if we missed the start event or it was already closed.
# Just ignore or log debug.
pass

conn.commit()
conn.close()
with self.lock:
conn = self._get_connection()
with conn:
cursor = conn.execute(
"""
UPDATE logs
SET is_running = 0, end_time = ?, duration = ?
WHERE pid = ? AND is_running = 1
""",
(end_time_str, duration, pid),
)
if cursor.rowcount == 0:
pass
except Exception as e:
sys.stderr.write(f"DB UPDATE ERROR: {e}\n")

def prune_old_logs(self, days_to_keep: int = 7) -> None:
try:
conn = sqlite3.connect(self.db_path)
cursor = conn.cursor()
cutoff_date = (datetime.now() - timedelta(days=days_to_keep)).strftime("%Y-%m-%d")
cursor.execute("DELETE FROM logs WHERE date < ?", (cutoff_date,))
count = cursor.rowcount
conn.commit()
conn.close()
cutoff_date = (datetime.now() - timedelta(days=days_to_keep)).strftime(
"%Y-%m-%d"
)
with self.lock:
conn = self._get_connection()
with conn:
cursor = conn.execute("DELETE FROM logs WHERE date < ?", (cutoff_date,))
count = cursor.rowcount
if count > 0:
sys.stderr.write(f"MAINTENANCE: Pruned {count} old logs.\n")
except Exception as e:
sys.stderr.write(f"DB PRUNE ERROR: {e}\n")
sys.stderr.write(f"DB PRUNE ERROR: {e}\n")

def close(self) -> None:
"""Closes the persistent connection."""
with self.lock:
if self.conn:
try:
self.conn.close()
except Exception:
pass
self.conn = None
4 changes: 2 additions & 2 deletions shellscope/backend/models.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import time
from typing import Tuple, Any
from typing import Tuple, Any, List, Optional

class ProcessLog:
def __init__(self, pid: int, child: str, parent: str, args: str, suspicious: bool, status: str = "NEW", is_running: bool = True):
Expand All @@ -21,7 +21,7 @@ def __str__(self) -> str:
return f"[{self.timestamp}] {self.parent} -> {self.child} (PID: {self.pid})"

@classmethod
def from_wmi_process(cls, process: Any, parent_name: str, status: str = "NEW", suspicious_keywords: list = None) -> 'ProcessLog':
def from_wmi_process(cls, process: Any, parent_name: str, status: str = "NEW", suspicious_keywords: Optional[List[str]] = None) -> 'ProcessLog':
if suspicious_keywords is None:
suspicious_keywords = []

Expand Down
Loading