Docs
Total ?All decisions logged by your agents in this view. Click any card below to see the full evidence trail.
Approved ?Decisions with a positive outcome — APPROVED, ADVANCE, SCALE_UP, CLEAR, etc.
Rejected ?Decisions with a negative outcome — REJECTED, DENIED, DECLINE, PAUSE, etc.
Approval Rate ?Percentage of decisions with a positive outcome. Baseline drift detection will alert you if this shifts unexpectedly.
🎉 Your account is ready!
Here's your API key — save it now, it won't be shown again.
You're set up. Here's what to do next.
SealVera is watching. Connect an agent and every decision it makes will appear below — explained, signed, and monitored.
Account created
You're in. Your org, API key, and dashboard are ready.
2
Connect your first agent
Install the SealVera skill or set NODE_OPTIONS — decisions appear here automatically.
3
Set up your first alert
Get notified when your agent behaves unexpectedly.
4
Generate a compliance report
One-click SOC 2 / EU AI Act formatted audit trail.
// Zero-friction: set env vars, no code changes needed
// export NODE_OPTIONS="--require node_modules/sealvera/scripts/autoload.js"
// export SEALVERA_API_KEY=<YOUR_KEY>

// Or wrap explicitly:
// npm install sealvera

const SealVera = require('sealvera');
const { OpenAI } = require('openai');

SealVera.init({ endpoint: 'https://app.sealvera.com', apiKey: process.env.SEALVERA_API_KEY });
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const agent = SealVera.createClient(openai, { agent: 'my-agent-name' });
const response = await agent.chat.completions.create({ model: 'gpt-4o', messages: [...] });
OpenAI, Anthropic, and OpenRouter all detected automatically
pip install sealvera

import openai, sealvera

sealvera.init(endpoint="https://app.sealvera.com", api_key="")

# Pass your configured client — SDK is detected automatically
openai_client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"])
agent = sealvera.create_client(openai_client, agent="my-agent-name")

# Use exactly like the original — every call is now logged
response = agent.chat.completions.create(model="gpt-4o", messages=[...])

# Works for Anthropic too — SDK detected, thinking blocks extracted natively
import anthropic
anthropic_client = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
uw_agent = sealvera.create_client(anthropic_client, agent="underwriter")
OpenAI, Anthropic, and OpenRouter all detected automatically
go get github.com/sealvera/sealvera-go

import sealvera "github.com/sealvera/sealvera-go"

sealvera.Init(sealvera.Config{
    Endpoint: "https://app.sealvera.com",
    APIKey:   "",
})

// One Agent per logical agent in your application
fraudAgent := sealvera.NewAgent("fraud-screener")
uwAgent    := sealvera.NewAgent("loan-underwriter")

// Wrap your LLM call — provider declared per call
result, err := fraudAgent.WrapOpenAI(ctx, "screen_application", input,
    func() (any, error) {
        return openaiClient.Chat.Completions.New(ctx, params)
    },
)

// Include session_id in input — decisions link into traces automatically
input := map[string]any{"applicant_id": "APP-001", "session_id": caseID}
WrapOpenAI, WrapAnthropic, WrapOpenRouter — one Agent per logical agent
npm install sealvera @anthropic-ai/sdk

const SealVera = require('sealvera');
const Anthropic = require('@anthropic-ai/sdk');

SealVera.init({ endpoint: 'https://app.sealvera.com', apiKey: '' });

// Pass your configured Anthropic client — SDK detected automatically
const anthropic = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
const agent = SealVera.createClient(anthropic, { agent: 'my-claude-agent' });

// Use exactly like the original client
// Extended thinking chains are captured as native evidence automatically
const result = await agent.messages.create({
  model: 'claude-3-7-sonnet-20250219',
  max_tokens: 16000,
  thinking: { type: 'enabled', budget_tokens: 10000 },
  messages: [{ role: 'user', content: '...' }]
});
Extended thinking extracted as native evidence — green "Agent-provided" badge
npm install sealvera

const SealVera = require('sealvera');
const { OpenAI } = require('openai');

SealVera.init({ endpoint: 'https://app.sealvera.com', apiKey: '' });

// OpenRouter baseURL is detected automatically — no extra config
const openrouter = new OpenAI({
  baseURL: 'https://openrouter.ai/api/v1',
  apiKey: process.env.OPENROUTER_API_KEY,
});
const agent = SealVera.createClient(openrouter, { agent: 'my-agent' });

// Any model — GPT-4o, Claude, Llama — all logged, model name tracked per entry
const result = await agent.chat.completions.create({
  model: 'anthropic/claude-3-7-sonnet',
  messages: [...]
});
Any model through OpenRouter — Claude thinking blocks auto-detected
# No SDK needed — works with any language or framework
# Just point your existing OTel exporter at SealVera

OTEL_EXPORTER_OTLP_ENDPOINT=https://app.sealvera.com/api/otel
OTEL_EXPORTER_OTLP_HEADERS="X-SealVera-Key="

# Add these attributes to your AI decision spans:
# ai.agent      = "my-agent-name"
# ai.action     = "evaluate"
# ai.decision   = "APPROVED"
# ai.model      = "gpt-4o"
# ai.input      = '{"amount": 25000}'
# ai.output     = '{"decision": "APPROVED", "confidence": 0.94}'
# ai.reasoning  = "Optional — plain text reasoning"

# Or send a span directly:
curl -X POST https://app.sealvera.com/api/otel/v1/spans \
  -H "X-SealVera-Key: " \
  -H "Content-Type: application/json" \
  -d '{"resourceSpans":[{"scopeSpans":[{"spans":[{
    "name":"ai.decision",
    "attributes":[
      {"key":"ai.agent","value":{"stringValue":"my-agent"}},
      {"key":"ai.decision","value":{"stringValue":"APPROVED"}}
    ]
  }]}]}]}'
Works with any OTel-compatible framework — zero SDK changes
# Option 1 — OpenClaw skill (recommended)
clawhub install sealvera
export SEALVERA_API_KEY=<YOUR_KEY>

# Option 2 — Any Node.js agent
npm install sealvera
export NODE_OPTIONS="--require node_modules/sealvera/scripts/autoload.js"
export SEALVERA_API_KEY=<YOUR_KEY>

# That's it — run your agent as normal
Every LLM call your OpenClaw agent makes is now audited
# Log any decision directly — no SDK needed
# Include session_id to auto-link decisions into traces

curl -X POST https://app.sealvera.com/api/ingest \
  -H "Content-Type: application/json" \
  -H "X-SealVera-Key: " \
  -d '{
    "agent": "loan-underwriter",
    "action": "evaluate_application",
    "decision": "APPROVED",
    "input": {
      "applicant_id": "A-4421",
      "amount": 25000,
      "session_id": "req_abc123"
    },
    "output": { "decision": "APPROVED", "confidence": 0.92 }
  }'
Any language, any agent
Ready to connect your agent?
Copy the snippet above, drop it in, and decisions appear here automatically.
View docs →

No anomalies detected. Your agents are behaving normally.

↻ New trace detected — click to refresh ⚡ auto-trace active

No traces yet.

SealVera links related decisions automatically when it detects shared session IDs, request IDs, or temporal patterns across your agents.

You can also create traces explicitly:

traceId: 'claim-C9182'   // in any ingest call

Once your agents are running, traces appear here automatically.

Trace
Organization & API Keys
Your agent is connected
SealVera just logged its first decision. Every call is now being captured, explained, and cryptographically signed.
Connect an Agent
Add SealVera to any AI agent in minutes. Every decision is automatically logged, explained, and signed.
Your API Key
Loading...
npm install sealvera

const SealVera = require('sealvera');
const { OpenAI } = require('openai');

SealVera.init({ endpoint: 'https://app.sealvera.com', apiKey: 'YOUR_KEY' });

// Pass your configured client — SealVera detects the SDK automatically
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
const agent = SealVera.createClient(openai, { agent: 'my-agent-name' });

// Use exactly like the original client — every call is now logged
const response = await agent.chat.completions.create({ model: 'gpt-4o', messages: [...] });

// Multi-agent? One client per agent + session_id in inputs = automatic trace
const fraudAgent = SealVera.createClient(new OpenAI(), { agent: 'fraud-screener' });
const uwAgent    = SealVera.createClient(new OpenAI(), { agent: 'loan-underwriter' });
// Include session_id in your message content — decisions link automatically
OpenAI, Anthropic, and OpenRouter all detected automatically
pip install sealvera

import openai, sealvera

sealvera.init(endpoint="https://app.sealvera.com", api_key="YOUR_KEY")

# Pass your configured client — SDK is detected automatically
openai_client = openai.OpenAI(api_key=os.environ["OPENAI_API_KEY"])
agent = sealvera.create_client(openai_client, agent="my-agent-name")

# Use exactly like the original — every call is now logged
response = agent.chat.completions.create(model="gpt-4o", messages=[...])

# Works for Anthropic too — SDK detected, thinking blocks extracted natively
import anthropic
uw_agent = sealvera.create_client(
    anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"]),
    agent="underwriter"
)
OpenAI, Anthropic, and OpenRouter all detected automatically
go get github.com/sealvera/sealvera-go

import sealvera "github.com/sealvera/sealvera-go"

sealvera.Init(sealvera.Config{
    Endpoint: "https://app.sealvera.com",
    APIKey:   "YOUR_KEY",
})

// One Agent per logical agent — equivalent to createClient() in JS/Python
fraudAgent := sealvera.NewAgent("fraud-screener")
uwAgent    := sealvera.NewAgent("loan-underwriter")

// Wrap your LLM call — provider declared per call
result, err := fraudAgent.WrapOpenAI(ctx, "screen_application", input,
    func() (any, error) {
        return openaiClient.Chat.Completions.New(ctx, params)
    },
)

// Include session_id in input — decisions link into traces automatically
input := map[string]any{"applicant_id": "APP-001", "session_id": caseID}
WrapOpenAI, WrapAnthropic, WrapOpenRouter — one Agent per logical agent
# Include session_id in inputs — SealVera links decisions
# from different agents into traces automatically

curl -X POST https://app.sealvera.com/api/ingest \
  -H "Content-Type: application/json" \
  -H "X-SealVera-Key: YOUR_KEY" \
  -d '{
    "agent": "loan-underwriter",
    "action": "evaluate_application",
    "decision": "APPROVED",
    "input": {
      "applicant_id": "A-4421",
      "amount": 25000,
      "session_id": "req_abc123"
    },
    "output": { "decision": "APPROVED", "confidence": 0.92 }
  }'
Any language, any agent — no SDK required
npm install sealvera @anthropic-ai/sdk

const SealVera = require('sealvera');
const Anthropic = require('@anthropic-ai/sdk');

SealVera.init({ endpoint: 'https://app.sealvera.com', apiKey: 'YOUR_KEY' });

// Pass your configured Anthropic client — SDK detected automatically
const anthropic = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
const agent = SealVera.createClient(anthropic, { agent: 'my-claude-agent' });

// Extended thinking chains are captured as native evidence automatically
const result = await agent.messages.create({
  model: 'claude-3-7-sonnet-20250219',
  max_tokens: 16000,
  thinking: { type: 'enabled', budget_tokens: 10000 },
  messages: [{ role: 'user', content: '...' }]
});
Extended thinking extracted as native evidence — green "Agent-provided" badge
npm install sealvera

const SealVera = require('sealvera');
const { OpenAI } = require('openai');

SealVera.init({ endpoint: 'https://app.sealvera.com', apiKey: 'YOUR_KEY' });

// OpenRouter baseURL is detected automatically — no extra config
const openrouter = new OpenAI({
  baseURL: 'https://openrouter.ai/api/v1',
  apiKey: process.env.OPENROUTER_API_KEY,
});
const agent = SealVera.createClient(openrouter, { agent: 'my-agent' });

// Any model — GPT-4o, Claude, Llama — all logged, model name tracked per entry
const result = await agent.chat.completions.create({
  model: 'anthropic/claude-3-7-sonnet',
  messages: [...]
});
Any model through OpenRouter — Claude thinking blocks auto-detected
const SealVeraOpenClaw = require('./skills/sealvera/sealvera-openclaw');

const sealvera = new SealVeraOpenClaw({
  endpoint: 'https://app.sealvera.com',
  apiKey: 'YOUR_KEY',
  agent: 'my-openclaw-agent'
});

// Log every agent turn
await sealvera.captureAgentTurn({
  action: 'respond_to_user',
  decision: 'RESPONDED',
  input: { message: userMessage },
  output: { response: agentResponse },
  reasoning: 'User asked for status update. Queried DB and summarized.'
});

// Log tool calls
await sealvera.captureToolCall({
  toolName: 'web_search',
  params: { query: 'EU AI Act requirements' },
  result: { results: [...] }
});

// All turns are auto-linked into one trace per session
Every agent turn and tool call logged automatically
# ── OpenTelemetry Integration ──────────────────────────────
# No SDK needed — works with any language that supports OTel.
# Point your OTel exporter at SealVera:

OTEL_EXPORTER_OTLP_ENDPOINT=https://app.sealvera.com/api/otel
OTEL_EXPORTER_OTLP_HEADERS="X-SealVera-Key=YOUR_KEY"

# Required span attributes (add to your AI decision spans):
#   ai.agent    = "my-agent-name"          (agent identifier)
#   ai.action   = "evaluate_transaction"   (what the agent did)
#   ai.decision = "APPROVED"               (the decision made)
#   ai.input    = '{"amount": 1000}'       (JSON string — input data)
#   ai.output   = '{"decision":"APPROVED", "confidence": 0.94}'

# Optional span attributes:
#   ai.model     = "gpt-4o"               (model used)
#   ai.reasoning = "High score, low risk"  (reasoning text)
#   ai.reasoning_steps = '[{...}]'         (structured evidence, JSON)
#   ai.evidence_source = "native"          (evidence origin)

# Or send directly with curl:
curl -X POST https://app.sealvera.com/api/otel/v1/spans \
  -H "Content-Type: application/json" \
  -H "X-SealVera-Key: YOUR_KEY" \
  -d '{
    "resourceSpans": [{
      "resource": {"attributes": [
        {"key":"service.name","value":{"stringValue":"my-service"}}
      ]},
      "scopeSpans": [{"spans": [{
        "name": "ai.decision",
        "traceId": "optional-trace-id",
        "startTimeUnixNano": "1772000000000000000",
        "attributes": [
          {"key":"ai.agent","value":{"stringValue":"fraud-detector"}},
          {"key":"ai.action","value":{"stringValue":"evaluate"}},
          {"key":"ai.decision","value":{"stringValue":"FLAGGED"}},
          {"key":"ai.input","value":{"stringValue":"{\"amount\":50000}"}},
          {"key":"ai.output","value":{"stringValue":"{\"decision\":\"FLAGGED\",\"confidence\":0.91}"}}
        ]
      }]}]
    }]
  }'
Works with any OTel-compatible language or framework