tests/test_integration.py
| 1 | """End-to-end integration tests.""" |
| 2 | |
| 3 | from __future__ import annotations |
| 4 | |
| 5 | from quantumshield.identity.agent import AgentIdentity |
| 6 | |
| 7 | from pqc_content_provenance import ( |
| 8 | AIGeneratedAssertion, |
| 9 | ContentManifest, |
| 10 | GenerationContext, |
| 11 | ManifestSigner, |
| 12 | ModelAttribution, |
| 13 | ProvenanceChain, |
| 14 | UsageAssertion, |
| 15 | embed_manifest, |
| 16 | extract_manifest, |
| 17 | ) |
| 18 | |
| 19 | |
| 20 | def test_full_lifecycle_text_content() -> None: |
| 21 | identity = AgentIdentity.create("lifecycle-signer") |
| 22 | signer = ManifestSigner(identity) |
| 23 | |
| 24 | content = b"An AI-generated article about post-quantum cryptography." |
| 25 | manifest = ContentManifest.create( |
| 26 | content=content, |
| 27 | content_type="text/plain", |
| 28 | model_attribution=ModelAttribution( |
| 29 | model_did=identity.did, |
| 30 | model_name="Llama-3-8B-Instruct", |
| 31 | model_version="1.0", |
| 32 | ), |
| 33 | generation_context=GenerationContext( |
| 34 | prompt_hash="a" * 64, |
| 35 | parameters={"temperature": 0.7}, |
| 36 | generated_at="2026-04-20T10:00:00Z", |
| 37 | ), |
| 38 | assertions=[ |
| 39 | AIGeneratedAssertion( |
| 40 | model_name="Llama-3-8B-Instruct", |
| 41 | model_version="1.0", |
| 42 | generator_type="text", |
| 43 | ), |
| 44 | UsageAssertion( |
| 45 | license="cc-by-4.0", |
| 46 | commercial_use=True, |
| 47 | attribution_required=True, |
| 48 | attribution_text="Generated by Llama-3", |
| 49 | ), |
| 50 | ], |
| 51 | ) |
| 52 | |
| 53 | signed = signer.sign(manifest) |
| 54 | envelope = embed_manifest(content, signed, mode="sidecar") |
| 55 | |
| 56 | # Consumer receives envelope and verifies |
| 57 | recovered_manifest, recovered_content = extract_manifest(envelope, mode="sidecar") |
| 58 | result = ManifestSigner.verify(recovered_manifest, recovered_content) |
| 59 | assert result.valid is True |
| 60 | assert result.content_hash_match is True |
| 61 | assert result.signature_match is True |
| 62 | |
| 63 | |
| 64 | def test_derived_content_chain_verifies() -> None: |
| 65 | model_identity = AgentIdentity.create("model-signer") |
| 66 | editor_identity = AgentIdentity.create("editor-signer") |
| 67 | model_signer = ManifestSigner(model_identity) |
| 68 | editor_signer = ManifestSigner(editor_identity) |
| 69 | |
| 70 | attribution = ModelAttribution( |
| 71 | model_did=model_identity.did, |
| 72 | model_name="Llama-3-8B-Instruct", |
| 73 | model_version="1.0", |
| 74 | ) |
| 75 | ctx = GenerationContext( |
| 76 | prompt_hash="a" * 64, |
| 77 | parameters={"temperature": 0.6}, |
| 78 | generated_at="2026-04-20T10:00:00Z", |
| 79 | ) |
| 80 | |
| 81 | original_content = b"Draft press release." |
| 82 | original = ContentManifest.create( |
| 83 | content=original_content, |
| 84 | content_type="text/plain", |
| 85 | model_attribution=attribution, |
| 86 | generation_context=ctx, |
| 87 | assertions=[AIGeneratedAssertion(model_name="Llama-3-8B-Instruct")], |
| 88 | ) |
| 89 | original_signed = model_signer.sign(original) |
| 90 | |
| 91 | edited_content = b"Final press release (human-edited)." |
| 92 | edited = ContentManifest.create( |
| 93 | content=edited_content, |
| 94 | content_type="text/plain", |
| 95 | model_attribution=attribution, |
| 96 | generation_context=ctx, |
| 97 | assertions=[ |
| 98 | AIGeneratedAssertion(model_name="Llama-3-8B-Instruct", human_edited=True) |
| 99 | ], |
| 100 | previous_manifest_id=original_signed.manifest_id, |
| 101 | ) |
| 102 | edited_signed = editor_signer.sign(edited) |
| 103 | |
| 104 | chain = ProvenanceChain() |
| 105 | chain.add(original_signed) |
| 106 | chain.add(edited_signed) |
| 107 | |
| 108 | ok, errors = chain.verify_chain() |
| 109 | assert ok is True, errors |
| 110 | assert len(chain.links) == 2 |
| 111 | |
| 112 | |
| 113 | def test_tampered_content_detected() -> None: |
| 114 | identity = AgentIdentity.create("tamper-signer") |
| 115 | signer = ManifestSigner(identity) |
| 116 | |
| 117 | original = b"The patient has low risk." |
| 118 | manifest = ContentManifest.create( |
| 119 | content=original, |
| 120 | content_type="text/plain", |
| 121 | model_attribution=ModelAttribution( |
| 122 | model_did=identity.did, |
| 123 | model_name="Medical-AI", |
| 124 | model_version="1.0", |
| 125 | ), |
| 126 | generation_context=GenerationContext( |
| 127 | prompt_hash="c" * 64, |
| 128 | generated_at="2026-04-20T10:00:00Z", |
| 129 | ), |
| 130 | ) |
| 131 | signed = signer.sign(manifest) |
| 132 | |
| 133 | # Tamper with the actual content bytes |
| 134 | tampered = b"The patient has high risk." |
| 135 | result = ManifestSigner.verify(signed, tampered) |
| 136 | assert result.valid is False |
| 137 | assert result.content_hash_match is False |
| 138 | # Signature over the (untampered) manifest is still valid |
| 139 | assert result.signature_match is True |
| 140 | |