Untrace is currently in beta - Route LLM traces to any observability platform.
Connect Untrace with LLM providers and observability platforms
{ "platform": "webhook", "config": { "url": "https://your-api.com/traces", "headers": { "Authorization": "Bearer your-token", "X-Custom-Header": "value" }, "retry": { "maxAttempts": 3, "backoffMs": 1000 } } }
interface TracePayload { id: string; timestamp: string; service: { name: string; version?: string; environment?: string; }; trace: { model: string; provider: string; operation: 'chat' | 'completion' | 'embedding'; input: { messages?: Message[]; prompt?: string; parameters: Record<string, any>; }; output: { content?: string; choices?: Choice[]; embedding?: number[]; }; metrics: { latencyMs: number; promptTokens: number; completionTokens: number; totalTokens: number; cost?: { prompt: number; completion: number; total: number; currency: string; }; }; metadata?: Record<string, any>; error?: { type: string; message: string; code?: string; }; }; }
routing: rules: # Development traces to Langfuse - name: "Dev to Langfuse" condition: environment: "development" destination: "langfuse" # Production GPT-4 to LangSmith - name: "Prod GPT-4" condition: environment: "production" model: "gpt-4*" destination: "langsmith" # Errors to custom webhook - name: "Error Handler" condition: error: true destinations: - "langsmith" - platform: "webhook" url: "https://alerts.company.com"
init({ apiKey: 'your-key', providers: { openai: { captureStreaming: true, includePromptTemplates: true }, anthropic: { captureSystemPrompts: true, maskSensitiveData: true }, langchain: { traceFullChain: true, includeIntermediateSteps: true } } });
// Check if provider is loaded console.log(untrace.getInstrumentedProviders()); // Force instrumentation const client = untrace.instrument('openai', openaiClient);