This page covers programmatic usage of Agent Artifacts without the CLI. The APIs are modular, so you can use just one subpackage (skills, memory, or traces) if you want.
from agent_artifacts.storage.registry import create_backend
storage = create_backend("sqlite", db_path="agent-artifacts.db")
storage.initialize()from agent_artifacts.api import trace_log, trace_query
trace_log(
storage,
decision="execute_tool",
reason="User asked to summarize",
confidence=0.86,
result="success",
metadata={"tool": "summarizer"},
)
recent = list(trace_query(storage, limit=10))
print(recent)from agent_artifacts.api import (
artifact_publish,
begin,
commit,
journal_query,
staged_memory_query,
stage,
trace_log,
trace_query,
tx_status,
validate,
)
artifact = artifact_publish(
storage,
path="./skills/deploy_fastapi.yaml",
name="deploy_fastapi",
version="1.0.0",
)
tx_id = begin(storage, actor="agent", reason="user requested deploy")
stage(
storage,
tx_id=tx_id,
key="user.preference.deploy_mode",
value="docker_compose",
entry_type="preference",
source="user",
confidence=0.9,
)
list(staged_memory_query(storage, tx_id=tx_id, limit=10))
validate(
storage,
tx_id=tx_id,
status="approved",
confidence=0.95,
evidence="user confirmed",
validator="human",
)
# Optional: mark prior entries with the same key/type as superseded.
commit(storage, tx_id=tx_id, supersede=True)
status = tx_status(storage, tx_id=tx_id)
events = list(journal_query(storage, tx_id=tx_id, limit=50))
trace_log(
storage,
decision="execute_skill",
skill_ref=artifact.ref(),
reason="deploy requested",
confidence=0.9,
result="success",
tx_id=tx_id,
)
recent = list(trace_query(storage, decision="execute_skill", limit=10))from agent_artifacts.adapters.langgraph import (
LangGraphAdapter,
LANGGRAPH_METRIC_MEMORY_STAGED,
LANGGRAPH_METRIC_TX_COMMITTED,
LANGGRAPH_METRIC_TX_ROLLED_BACK,
)
class Metrics:
def increment(self, name: str, value: int = 1, *, tags: dict | None = None) -> None:
print("metric", name, value, tags)
adapter = LangGraphAdapter(storage, metrics_hook=Metrics())