Skip to main content

When to Use Manual Tracking

Custom Frameworks

Building with a custom agent framework, not LangChain.

Raw OpenAI/Anthropic

Using the OpenAI or Anthropic SDKs directly.

Specific Events

Only want to track certain events, not everything.

Non-Python

Using JavaScript/TypeScript or another language.

Basic Pattern

The simple begin/finish pattern works for most use cases:
import sentrial

sentrial.configure(api_key="sentrial_live_xxx")

def handle_request(user_input: str, user_id: str):
    # Begin tracking
    interaction = sentrial.begin(
        user_id=user_id,
        event="chat_request",
        input=user_input
    )

    try:
        # Your agent logic
        response = your_agent.run(user_input)

        # Finish with success
        interaction.finish(
            output=response,
            success=True,
            estimated_cost=0.023
        )
        return response

    except Exception as e:
        # Finish with failure
        interaction.finish(
            success=False,
            failure_reason=str(e)
        )
        raise

Full Control Pattern

For more granular tracking, use the SentrialClient directly:
from sentrial import SentrialClient
import openai

client = SentrialClient(api_key="sentrial_live_xxx")

def handle_request(user_input: str, user_id: str):
    # Create session
    session_id = client.create_session(
        name=f"Request: {user_input[:40]}...",
        agent_name="my-custom-agent",
        user_id=user_id,
        metadata={"source": "api"}
    )

    try:
        # Track decision
        client.track_decision(
            session_id=session_id,
            reasoning="User asked a question, will search then respond",
            alternatives=["direct_response", "clarify_question"]
        )

        # Track tool call
        search_results = search_database(user_input)
        client.track_tool_call(
            session_id=session_id,
            tool_name="search_database",
            tool_input={"query": user_input},
            tool_output={"results": search_results},
            reasoning="Searching for relevant information"
        )

        # Make LLM call
        completion = openai.chat.completions.create(
            model="gpt-4o",
            messages=[
                {"role": "system", "content": "You are a helpful assistant."},
                {"role": "user", "content": user_input}
            ]
        )

        response = completion.choices[0].message.content
        usage = completion.usage

        # Complete session
        client.complete_session(
            session_id=session_id,
            success=True,
            estimated_cost=calculate_cost(usage),
            prompt_tokens=usage.prompt_tokens,
            completion_tokens=usage.completion_tokens
        )

        return response

    except Exception as e:
        client.complete_session(
            session_id=session_id,
            success=False,
            failure_reason=str(e)
        )
        raise

Tracking Tool Calls

Wrap your tool functions to automatically track them:
def tracked_tool(client, session_id):
    """Decorator to track tool calls."""
    def decorator(func):
        def wrapper(*args, **kwargs):
            tool_input = {"args": args, "kwargs": kwargs}

            try:
                result = func(*args, **kwargs)

                client.track_tool_call(
                    session_id=session_id,
                    tool_name=func.__name__,
                    tool_input=tool_input,
                    tool_output={"result": result}
                )

                return result

            except Exception as e:
                client.track_tool_call(
                    session_id=session_id,
                    tool_name=func.__name__,
                    tool_input=tool_input,
                    tool_output={"error": str(e)}
                )
                raise
        return wrapper
    return decorator


# Usage
@tracked_tool(client, session_id)
def search_database(query: str):
    return database.search(query)

@tracked_tool(client, session_id)
def send_email(to: str, subject: str, body: str):
    return email_service.send(to, subject, body)

TypeScript Example

import { SentrialClient } from '@sentrial/sdk';
import OpenAI from 'openai';

const sentrial = new SentrialClient({
  apiKey: process.env.SENTRIAL_API_KEY!
});

const openai = new OpenAI();

async function handleRequest(userInput: string, userId: string) {
  const sessionId = await sentrial.createSession({
    name: `Request: ${userInput.slice(0, 40)}...`,
    agentName: "my-custom-agent",
    userId
  });

  try {
    // Track tool call
    const searchResults = await searchDatabase(userInput);
    await sentrial.trackToolCall({
      sessionId,
      toolName: "search_database",
      toolInput: { query: userInput },
      toolOutput: { results: searchResults },
    });

    // Make LLM call
    const completion = await openai.chat.completions.create({
      model: "gpt-4o",
      messages: [{ role: "user", content: userInput }]
    });

    const response = completion.choices[0].message.content;

    await sentrial.completeSession({
      sessionId,
      success: true,
      promptTokens: completion.usage?.prompt_tokens,
      completionTokens: completion.usage?.completion_tokens,
    });

    return response;

  } catch (error) {
    await sentrial.completeSession({
      sessionId,
      success: false,
      failureReason: error.message,
    });
    throw error;
  }
}

Best Practices

Call complete_session() or finish() even on errors to ensure data is recorded.
Focus on tool calls and decisions, not every line of code.
When success=False, always include failure_reason for better diagnosis.
Same agent_name groups sessions together in the dashboard.
Use the cost calculation helpers or track real token usage.

Next Steps