examples/provenance_chain.py
| 1 | """Chain of provenance: original AI output -> human-edited derivation. |
| 2 | |
| 3 | Run: python examples/provenance_chain.py |
| 4 | """ |
| 5 | |
| 6 | from __future__ import annotations |
| 7 | |
| 8 | from quantumshield import AgentIdentity |
| 9 | |
| 10 | from pqc_content_provenance import ( |
| 11 | AIGeneratedAssertion, |
| 12 | ContentManifest, |
| 13 | GenerationContext, |
| 14 | ManifestSigner, |
| 15 | ModelAttribution, |
| 16 | ProvenanceChain, |
| 17 | ) |
| 18 | |
| 19 | |
| 20 | def main() -> None: |
| 21 | model_identity = AgentIdentity.create("llama-3") |
| 22 | editor_identity = AgentIdentity.create("human-editor-alice") |
| 23 | model_signer = ManifestSigner(model_identity) |
| 24 | editor_signer = ManifestSigner(editor_identity) |
| 25 | |
| 26 | # Step 1: AI generates original |
| 27 | original_content = b"Draft press release: QuantaMrkt ships tool #4." |
| 28 | attribution = ModelAttribution( |
| 29 | model_did=model_identity.did, |
| 30 | model_name="Llama-3-8B-Instruct", |
| 31 | model_version="1.0", |
| 32 | ) |
| 33 | ctx = GenerationContext( |
| 34 | prompt_hash="d" * 64, |
| 35 | parameters={"temperature": 0.6}, |
| 36 | generated_at="2026-04-20T12:00:00Z", |
| 37 | ) |
| 38 | original = ContentManifest.create( |
| 39 | content=original_content, |
| 40 | content_type="text/plain", |
| 41 | model_attribution=attribution, |
| 42 | generation_context=ctx, |
| 43 | assertions=[ |
| 44 | AIGeneratedAssertion(model_name="Llama-3-8B-Instruct", model_version="1.0") |
| 45 | ], |
| 46 | ) |
| 47 | original_signed = model_signer.sign(original) |
| 48 | |
| 49 | # Step 2: Human edits content and re-signs with reference to previous manifest |
| 50 | edited_content = b"Press release: QuantaMrkt ships tool #4 (Signed AI Content Provenance)." |
| 51 | edited = ContentManifest.create( |
| 52 | content=edited_content, |
| 53 | content_type="text/plain", |
| 54 | model_attribution=attribution, # still based on Llama-3 originally |
| 55 | generation_context=ctx, |
| 56 | assertions=[ |
| 57 | AIGeneratedAssertion( |
| 58 | model_name="Llama-3-8B-Instruct", |
| 59 | model_version="1.0", |
| 60 | human_edited=True, |
| 61 | ), |
| 62 | ], |
| 63 | previous_manifest_id=original_signed.manifest_id, |
| 64 | ) |
| 65 | edited_signed = editor_signer.sign(edited) |
| 66 | |
| 67 | chain = ProvenanceChain() |
| 68 | chain.add(original_signed) |
| 69 | chain.add(edited_signed) |
| 70 | |
| 71 | ok, errors = chain.verify_chain() |
| 72 | print(f"chain length: {len(chain.links)}") |
| 73 | print(f"chain valid: {ok}") |
| 74 | if errors: |
| 75 | for e in errors: |
| 76 | print(f" error: {e}") |
| 77 | |
| 78 | for link in chain.links: |
| 79 | m = link.manifest |
| 80 | print(f"\n - manifest_id: {m.manifest_id}") |
| 81 | print(f" signer: {m.signer_did}") |
| 82 | print(f" content_hash: {m.content_hash[:16]}...") |
| 83 | print(f" prev: {m.previous_manifest_id}") |
| 84 | |
| 85 | |
| 86 | if __name__ == "__main__": |
| 87 | main() |
| 88 | |