From e7b271d86e5fb3a4f4d68a04c9c43919ea952795 Mon Sep 17 00:00:00 2001 From: tyagian Date: Sat, 2 May 2026 20:12:55 -0400 Subject: [PATCH] fix pii errors --- pyproject.toml | 4 ++-- sentinelguard/__init__.py | 2 +- sentinelguard/scanners/output/bias.py | 5 ++--- sentinelguard/scanners/prompt/jailbreak.py | 5 ++--- sentinelguard/scanners/prompt/prompt_injection.py | 5 ++--- sentinelguard/scanners/prompt/toxicity.py | 5 ++--- 6 files changed, 11 insertions(+), 15 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 22de028..1f6d230 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "sentinelguard" -version = "0.0.3" +version = "0.0.4" description = "A comprehensive, production-ready LLM security and guardrails framework" readme = "README.md" license = {text = "MIT"} @@ -36,7 +36,7 @@ dependencies = [ "presidio-analyzer>=2.2.0", "presidio-anonymizer>=2.2.0", "spacy>=3.6.0", - "en-core-web-lg>=3.6.0", + "en-core-web-lg>=3.7.1", # Model-based detection (HuggingFace) "transformers>=4.30.0", "torch>=2.0.0", diff --git a/sentinelguard/__init__.py b/sentinelguard/__init__.py index e7d4281..c234ab3 100644 --- a/sentinelguard/__init__.py +++ b/sentinelguard/__init__.py @@ -36,7 +36,7 @@ guard = SentinelGuard(config=config) """ -__version__ = "0.0.3" +__version__ = "0.0.4" __author__ = "SentinelGuard Contributors" from sentinelguard.core.guard import SentinelGuard diff --git a/sentinelguard/scanners/output/bias.py b/sentinelguard/scanners/output/bias.py index 758c036..8755071 100644 --- a/sentinelguard/scanners/output/bias.py +++ b/sentinelguard/scanners/output/bias.py @@ -16,8 +16,6 @@ import re from typing import Any, ClassVar, Dict, List, Optional, Tuple -from transformers import pipeline - from sentinelguard.core.scanner import OutputScanner, RiskLevel, ScanResult, register_scanner logger = logging.getLogger(__name__) @@ -112,8 +110,9 @@ def __init__( def _load_model(self) -> None: if self._model is None: try: + from transformers import pipeline as hf_pipeline logger.info("Loading bias detection model: %s", _BIAS_MODEL_ID) - self._model = pipeline("text-classification", model=_BIAS_MODEL_ID) + self._model = hf_pipeline("text-classification", model=_BIAS_MODEL_ID) except Exception as exc: logger.warning("Failed to load bias model, falling back to regex only: %s", exc) self._model = False # sentinel: tried and failed diff --git a/sentinelguard/scanners/prompt/jailbreak.py b/sentinelguard/scanners/prompt/jailbreak.py index 8068dfd..31a0524 100644 --- a/sentinelguard/scanners/prompt/jailbreak.py +++ b/sentinelguard/scanners/prompt/jailbreak.py @@ -19,8 +19,6 @@ import re from typing import Any, ClassVar, Dict, List, Optional, Tuple -from transformers import pipeline - from sentinelguard.core.scanner import PromptScanner, RiskLevel, ScanResult, register_scanner logger = logging.getLogger(__name__) @@ -179,8 +177,9 @@ def __init__( def _load_model(self) -> None: if self._model is None: try: + from transformers import pipeline as hf_pipeline logger.info("Loading jailbreak detection model: %s", _JAILBREAK_MODEL_ID) - self._model = pipeline("text-classification", model=_JAILBREAK_MODEL_ID) + self._model = hf_pipeline("text-classification", model=_JAILBREAK_MODEL_ID) except Exception as exc: logger.warning("Failed to load jailbreak model, falling back to patterns only: %s", exc) self._model = False diff --git a/sentinelguard/scanners/prompt/prompt_injection.py b/sentinelguard/scanners/prompt/prompt_injection.py index 4e0f14f..26de01d 100644 --- a/sentinelguard/scanners/prompt/prompt_injection.py +++ b/sentinelguard/scanners/prompt/prompt_injection.py @@ -12,8 +12,6 @@ import re from typing import Any, ClassVar, List, Optional -from transformers import pipeline - from sentinelguard.core.scanner import PromptScanner, RiskLevel, ScanResult, register_scanner logger = logging.getLogger(__name__) @@ -99,8 +97,9 @@ def __init__( def _load_model(self) -> None: if self._model is None: try: + from transformers import pipeline as hf_pipeline logger.info("Loading prompt injection model: %s", _INJECTION_MODEL_ID) - self._model = pipeline("text-classification", model=_INJECTION_MODEL_ID) + self._model = hf_pipeline("text-classification", model=_INJECTION_MODEL_ID) except Exception as exc: logger.warning("Failed to load injection model, falling back to patterns+heuristics: %s", exc) self._model = False diff --git a/sentinelguard/scanners/prompt/toxicity.py b/sentinelguard/scanners/prompt/toxicity.py index 21077c1..2db1385 100644 --- a/sentinelguard/scanners/prompt/toxicity.py +++ b/sentinelguard/scanners/prompt/toxicity.py @@ -11,8 +11,6 @@ import re from typing import Any, ClassVar, Dict, List, Optional -from transformers import pipeline - from sentinelguard.core.scanner import PromptScanner, RiskLevel, ScanResult, register_scanner logger = logging.getLogger(__name__) @@ -86,8 +84,9 @@ def __init__( def _load_model(self) -> None: if self._model is None: try: + from transformers import pipeline as hf_pipeline logger.info("Loading toxicity model: %s", _TOXICITY_MODEL_ID) - self._model = pipeline("text-classification", model=_TOXICITY_MODEL_ID, top_k=None) + self._model = hf_pipeline("text-classification", model=_TOXICITY_MODEL_ID, top_k=None) except Exception as exc: logger.warning("Failed to load toxicity model, falling back to patterns: %s", exc) self._model = False