Skip to main content

Installation

npm install @sentrial/sdk

Quick Start

import { SentrialClient } from '@sentrial/sdk';

// Initialize client
const client = new SentrialClient({
  apiKey: process.env.SENTRIAL_API_KEY
});

// Create a session (returns string | null — null if failSilently is true and request fails)
const sessionId = await client.createSession({
  name: "Customer Support Request",
  agentName: "support-agent",
  userId: "user_123",
  metadata: { channel: "web_chat" }
});

// Track events...

// Complete session
await client.completeSession({
  sessionId,
  success: true,
  estimatedCost: 0.023
});

SentrialClient

constructor(config)

interface SentrialClientConfig {
  apiKey?: string;              // Your Sentrial API key (or SENTRIAL_API_KEY env var)
  apiUrl?: string;              // Custom API URL (optional)
  failSilently?: boolean;       // If true (default), SDK errors are logged but won't crash your app
  pii?: PiiConfig | boolean;    // PII redaction config (pass true to fetch from server)
  batching?: BatcherConfig;     // Event batching config for fire-and-forget tracking
}

const client = new SentrialClient({
  apiKey: "sentrial_live_xxx",
  apiUrl: "https://api.sentrial.com"  // Optional
});

createSession(params)

Create a new tracking session. Returns string | null (null when failSilently is true and the request fails).
interface CreateSessionParams {
  name: string;                        // Descriptive name
  agentName: string;                   // Agent identifier
  userId: string;                      // External user ID
  parentSessionId?: string;            // Parent session ID for multi-agent hierarchies
  convoId?: string;                    // Conversation ID to group related sessions
  metadata?: Record<string, unknown>;  // Custom metadata
}

const sessionId = await client.createSession({
  name: "Support Request #123",
  agentName: "support-agent",
  userId: "user_12345",
  metadata: {
    ticketId: "TKT-789",
    priority: "high"
  }
});

trackToolCall(params)

Track a tool call event.
interface TrackToolCallParams {
  sessionId: string;
  toolName: string;
  toolInput: Record<string, unknown>;
  toolOutput: Record<string, unknown>;
  reasoning?: string;
  estimatedCost?: number;
  toolError?: Record<string, unknown>;
  tokenCount?: number;
  traceId?: string;               // OpenTelemetry trace ID
  spanId?: string;                // OpenTelemetry span ID
  metadata?: Record<string, unknown>;
}

await client.trackToolCall({
  sessionId,
  toolName: "search_knowledge_base",
  toolInput: { query: "password reset" },
  toolOutput: { results: ["KB-001", "KB-002"] },
  reasoning: "User asked about password reset"
});

trackDecision(params)

Track an agent decision.
interface TrackDecisionParams {
  sessionId: string;
  reasoning: string;
  alternatives?: string[];
  confidence?: number;
  estimatedCost?: number;
  tokenCount?: number;
  traceId?: string;
  spanId?: string;
  metadata?: Record<string, unknown>;
}

await client.trackDecision({
  sessionId,
  reasoning: "Will search KB first before escalating",
  alternatives: ["escalate_to_human", "ask_clarifying_question"],
  confidence: 0.92
});

completeSession(params)

Complete a session with final metrics.
interface CompleteSessionParams {
  sessionId: string;
  success?: boolean;
  failureReason?: string;
  estimatedCost?: number;
  customMetrics?: Record<string, number>;
  durationMs?: number;
  promptTokens?: number;
  completionTokens?: number;
  totalTokens?: number;
  userInput?: string;
  assistantOutput?: string;
  output?: string;                // Alias for assistantOutput
}

// Success
await client.completeSession({
  sessionId,
  success: true,
  estimatedCost: 0.045,
  promptTokens: 1500,
  completionTokens: 500,
  customMetrics: {
    satisfaction: 4.5
  }
});

// Failure
await client.completeSession({
  sessionId,
  success: false,
  failureReason: "API rate limit exceeded"
});

trackError(params)

Track an error event.
await client.trackError({
  sessionId,
  errorMessage: "API rate limit exceeded",
  errorType: "RateLimitError",
  toolName: "search_kb",
  stackTrace: error.stack,
});

begin(params) / interaction.finish()

Simplified API that auto-manages sessions. Returns an Interaction object.
import { sentrial } from '@sentrial/sdk';

sentrial.configure({ apiKey: process.env.SENTRIAL_API_KEY });

const interaction = await sentrial.begin({
  userId: 'user_123',
  event: 'support_request',
  input: 'Help me reset my password',
  convoId: 'conv_789',
});

// Your agent logic...
const response = await agent.run(userInput);

// Track tool calls on the interaction
await interaction.trackToolCall({
  toolName: 'search_kb',
  toolInput: { query: 'password reset' },
  toolOutput: { results: ['KB-001'] },
});

await interaction.finish({ output: response, success: true });

LLM Auto-Wrappers

Automatically track all LLM calls with token counts, cost, and latency.
import { sentrial, wrapOpenAI, wrapAnthropic, wrapGoogle } from '@sentrial/sdk';
import OpenAI from 'openai';

sentrial.configure({ apiKey: process.env.SENTRIAL_API_KEY });

// Wrap your LLM client — all calls are now auto-tracked
const openai = wrapOpenAI(new OpenAI());

const interaction = await sentrial.begin({
  userId: 'user_123',
  event: 'chat',
  input: userQuery,
});

// This call is automatically tracked with tokens, cost, and duration
const response = await openai.chat.completions.create({
  model: 'gpt-4o',
  messages: [{ role: 'user', content: userQuery }],
});

await interaction.finish({ output: response.choices[0].message.content });
Also available: wrapAnthropic(client), wrapGoogle(model), wrapLLM(client) (auto-detect).

Decorators & Higher-Order Functions

import { withSession, withTool, setClient, SentrialClient } from '@sentrial/sdk';

setClient(new SentrialClient({ apiKey: process.env.SENTRIAL_API_KEY }));

// Wrap a tool function — auto-tracks inputs, outputs, and errors
const searchKB = withTool('search_kb', async (query: string) => {
  return await kb.search(query);
});

// Wrap an agent function — auto-creates session, tracks all tools inside
const handleRequest = withSession('support-agent', async (userId: string, input: string) => {
  const results = await searchKB(input);
  return results;
});

Event Batching

Queue tracking calls and flush them periodically instead of sending each one immediately. Reduces HTTP overhead for high-throughput agents.
const client = new SentrialClient({
  apiKey: process.env.SENTRIAL_API_KEY,
  batching: {
    enabled: true,
    flushIntervalMs: 2000,  // Flush every 2s (default: 1000)
    flushThreshold: 20,     // Flush after 20 events (default: 10)
    maxQueueSize: 500,      // Drop events if queue exceeds this (default: 1000)
  },
});

// trackToolCall, trackDecision, trackError are queued and batched
// createSession, completeSession always bypass the batcher

// Flush manually before shutdown
await client.flush();
await client.shutdown();

PII Redaction

Automatically redact sensitive data before it leaves your infrastructure.
// Auto-fetch your org's PII config from the server
const client1 = new SentrialClient({ apiKey: '...', pii: true });

// Or configure locally with full control
const client2 = new SentrialClient({
  apiKey: '...',
  pii: {
    enabled: true,
    mode: 'label',            // 'label' | 'hash' | 'remove'
    fields: ['userInput', 'assistantOutput', 'metadata'],
    builtinPatterns: {
      emails: true,
      phones: true,
      ssns: true,
      creditCards: true,
    },
    customPatterns: [
      { label: 'api_key', pattern: /sk-[a-zA-Z0-9]{32,}/ },
    ],
  },
});

Cost Calculation

Exported helper functions for calculating LLM API costs. Updated pricing for latest models.
import { calculateOpenAICost, calculateAnthropicCost, calculateGoogleCost } from '@sentrial/sdk';

const cost = calculateOpenAICost({
  model: 'gpt-4o',
  inputTokens: 1000,
  outputTokens: 500,
});
// Returns: 0.0075 (USD)

calculateAnthropicCost({ model: 'claude-sonnet-4', inputTokens: 1000, outputTokens: 500 });
calculateGoogleCost({ model: 'gemini-2.5-flash', inputTokens: 1000, outputTokens: 500 });
Supported models include GPT-5, GPT-4.1, O-series, Claude Opus/Sonnet 4, Claude 3.5, Gemini 2.5/2.0, and more.

Complete Example

import { SentrialClient, calculateOpenAICost } from '@sentrial/sdk';
import OpenAI from 'openai';

const sentrial = new SentrialClient({
  apiKey: process.env.SENTRIAL_API_KEY!
});

const openai = new OpenAI();

async function handleUserRequest(userInput: string, userId: string) {
  // Start session
  const sessionId = await sentrial.createSession({
    name: `Support: ${userInput.slice(0, 40)}...`,
    agentName: "support-agent",
    userId
  });

  try {
    // Track the LLM call
    const startTime = Date.now();

    const completion = await openai.chat.completions.create({
      model: "gpt-4o",
      messages: [{ role: "user", content: userInput }]
    });

    const response = completion.choices[0].message.content;
    const usage = completion.usage;

    // Complete session with metrics
    await sentrial.completeSession({
      sessionId,
      success: true,
      estimatedCost: calculateOpenAICost({
        model: 'gpt-4o',
        inputTokens: usage?.prompt_tokens ?? 0,
        outputTokens: usage?.completion_tokens ?? 0,
      }),
      promptTokens: usage?.prompt_tokens,
      completionTokens: usage?.completion_tokens,
      durationMs: Date.now() - startTime
    });

    return response;

  } catch (error) {
    await sentrial.completeSession({
      sessionId,
      success: false,
      failureReason: error.message
    });
    throw error;
  }
}

Environment Variables

# .env
SENTRIAL_API_KEY=sentrial_live_xxxxxxxxxxxxx
SENTRIAL_API_URL=https://api.sentrial.com  # Optional

Next Steps