src/pqc_lint/findings.py
| 1 | """Finding dataclass and severity levels.""" |
| 2 | |
| 3 | from __future__ import annotations |
| 4 | |
| 5 | import json |
| 6 | from dataclasses import asdict, dataclass, field |
| 7 | from enum import Enum |
| 8 | |
| 9 | |
| 10 | class Severity(str, Enum): |
| 11 | CRITICAL = "critical" # actively broken by known quantum attacks (RSA, ECDSA, DH, ECDH) |
| 12 | HIGH = "high" # vulnerable to future quantum attack, currently in use (DSA, old sig algos) |
| 13 | MEDIUM = "medium" # weak classical crypto (MD5, SHA-1) |
| 14 | LOW = "low" # style / best-practice (missing explicit algorithm, hard-coded key size) |
| 15 | INFO = "info" # informational (PQC-safe patterns detected) |
| 16 | |
| 17 | @property |
| 18 | def order(self) -> int: |
| 19 | return {"critical": 4, "high": 3, "medium": 2, "low": 1, "info": 0}[self.value] |
| 20 | |
| 21 | @classmethod |
| 22 | def from_str(cls, value: str) -> "Severity": |
| 23 | try: |
| 24 | return cls(value.lower()) |
| 25 | except ValueError: |
| 26 | raise ValueError( |
| 27 | f"Invalid severity '{value}'. Must be one of: " |
| 28 | + ", ".join(s.value for s in cls) |
| 29 | ) |
| 30 | |
| 31 | |
| 32 | @dataclass |
| 33 | class Finding: |
| 34 | """A single lint finding in a source file.""" |
| 35 | rule_id: str # e.g. "PQC001" |
| 36 | severity: Severity |
| 37 | message: str # short description |
| 38 | file: str # path relative to scan root |
| 39 | line: int # 1-based |
| 40 | column: int = 1 # 1-based |
| 41 | end_line: int | None = None |
| 42 | end_column: int | None = None |
| 43 | snippet: str = "" # the exact matching text |
| 44 | suggestion: str = "" # PQC replacement hint |
| 45 | cwe: str | None = None # e.g. "CWE-327" |
| 46 | language: str = "" # python | javascript | go | rust | java | c |
| 47 | |
| 48 | def to_dict(self) -> dict: |
| 49 | d = asdict(self) |
| 50 | d["severity"] = self.severity.value |
| 51 | return d |
| 52 | |
| 53 | @classmethod |
| 54 | def from_dict(cls, data: dict) -> "Finding": |
| 55 | return cls( |
| 56 | rule_id=data["rule_id"], |
| 57 | severity=Severity.from_str(data["severity"]), |
| 58 | message=data["message"], |
| 59 | file=data["file"], |
| 60 | line=data["line"], |
| 61 | column=data.get("column", 1), |
| 62 | end_line=data.get("end_line"), |
| 63 | end_column=data.get("end_column"), |
| 64 | snippet=data.get("snippet", ""), |
| 65 | suggestion=data.get("suggestion", ""), |
| 66 | cwe=data.get("cwe"), |
| 67 | language=data.get("language", ""), |
| 68 | ) |
| 69 | |
| 70 | |
| 71 | @dataclass |
| 72 | class ScanReport: |
| 73 | """Aggregate report of a scan session.""" |
| 74 | findings: list[Finding] = field(default_factory=list) |
| 75 | files_scanned: int = 0 |
| 76 | files_skipped: int = 0 |
| 77 | scan_root: str = "." |
| 78 | started_at: str = "" |
| 79 | duration_ms: int = 0 |
| 80 | |
| 81 | def counts_by_severity(self) -> dict[str, int]: |
| 82 | counts = {s.value: 0 for s in Severity} |
| 83 | for f in self.findings: |
| 84 | counts[f.severity.value] += 1 |
| 85 | return counts |
| 86 | |
| 87 | def counts_by_rule(self) -> dict[str, int]: |
| 88 | counts: dict[str, int] = {} |
| 89 | for f in self.findings: |
| 90 | counts[f.rule_id] = counts.get(f.rule_id, 0) + 1 |
| 91 | return counts |
| 92 | |
| 93 | def has_failing(self, fail_on: Severity) -> bool: |
| 94 | """True if any finding meets or exceeds the fail-on threshold.""" |
| 95 | return any(f.severity.order >= fail_on.order for f in self.findings) |
| 96 | |
| 97 | def to_json(self) -> str: |
| 98 | return json.dumps( |
| 99 | { |
| 100 | "schema_version": "1.0", |
| 101 | "scan_root": self.scan_root, |
| 102 | "started_at": self.started_at, |
| 103 | "duration_ms": self.duration_ms, |
| 104 | "files_scanned": self.files_scanned, |
| 105 | "files_skipped": self.files_skipped, |
| 106 | "counts_by_severity": self.counts_by_severity(), |
| 107 | "counts_by_rule": self.counts_by_rule(), |
| 108 | "findings": [f.to_dict() for f in self.findings], |
| 109 | }, |
| 110 | indent=2, |
| 111 | ) |
| 112 | |