Minimal, deterministic authority interpreter for LLM governance.
A pre-inference gate that classifies request intent and enforces mode-based access control. Runs before any LLM call. Produces machine-readable receipts.
- Not an agent
- Not a memory system
- Not a prompt template
- Not a filter for LLM outputs (see
truth_state.pyfor post-inference validation)
from ami_gate.gate import AMIGate, check, set_mode
from ami_gate.modes import REPORT_ONLY, ADVISE_ALLOWED
# Create gate (loads config once)
gate = AMIGate()
# Check request before calling LLM
result = gate.check("What is the system status?")
if result.allowed:
response = call_your_llm(request)
else:
response = f"Blocked: {result.reason_text}"from ami_gate.gate import check, set_mode
from ami_gate.truth_state import validate_output
def governed_llm_call(request: str, llm_fn, model: str = None):
# Pre-inference gate
result = check(request, model=model)
if not result.allowed:
return {"blocked": True, "reason": result.reason_code}
# Call LLM
response = llm_fn(request)
# Post-inference validation
validation = validate_output(response, has_grounding=False)
return {"response": validation.validated, "truth_state": validation.truth_state}>>> from ami_gate.gate import AMIGate
>>> gate = AMIGate() # Default: REPORT_ONLY
>>> result = gate.check("Delete all files in /tmp")
>>> result.allowed
False
>>> result.reason_code
'INTENT_BLOCKED'
>>> result.intent
'EXECUTE'
>>> result.mode
'REPORT_ONLY'| File | Purpose |
|---|---|
| gate.py | Request classification and mode enforcement |
| modes.py | Mode and intent constants |
| truth_state.py | Post-inference output validation |
| receipt.py | Append-only audit trail |
| config.json | Patterns and settings (immutable at runtime) |
| Mode | Allows |
|---|---|
| REPORT_ONLY | REPORT |
| ADVISE_ALLOWED | REPORT, ADVISE |
| EXECUTE_ALLOWED | REPORT, ADVISE, EXECUTE |
| RESEARCH_ALLOWED | REPORT, RESEARCH |
None beyond Python stdlib.