agentworks-sdk
v0.1.1
Published
AgentWorks SDK - Instrument and observe multi-agent AI systems
Maintainers
Readme
AgentWorks JavaScript SDK
Instrument and observe your multi-agent AI systems with a single line of code.
Installation
npm install agentworks
# or
yarn add agentworks
# or
pnpm add agentworksQuick Start
import { configure, traceAgent, traceTool, traceLLM } from 'agentworks';
import OpenAI from 'openai';
const openai = new OpenAI();
// Configure SDK
configure({
ingestEndpoint: 'http://localhost:8080',
orgId: 'my-org',
projectId: 'my-project',
});
// Trace your agent
await traceAgent('support-bot', async () => {
// Trace tool calls
const user = await traceTool('fetch_user_data', async () => {
return await fetchUserData(userId);
});
// Trace LLM calls with automatic error handling and cost tracking
await traceLLM(
async (capture) => {
const response = await openai.chat.completions.create({
model: 'gpt-4o-mini',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: `Summarize: ${user.history}` }
],
});
capture({
completion: response.choices[0].message.content || '',
promptTokens: response.usage?.prompt_tokens || 0,
completionTokens: response.usage?.completion_tokens || 0,
});
return response;
},
{
model: 'gpt-4o-mini',
provider: 'openai',
systemPrompt: 'You are a helpful assistant.',
userPrompt: `Summarize: ${user.history}`,
}
);
}, 'ticket-123');Features
- Zero-overhead instrumentation: <5ms latency per span
- Automatic cost tracking: Built-in pricing for OpenAI, Anthropic, Google
- PII detection & redaction: Protect sensitive data automatically
- TypeScript support: Full type definitions included
- Framework agnostic: Works with any JavaScript/TypeScript agent framework
API Reference
Configuration
configure({
ingestEndpoint: 'http://localhost:8080', // AgentWorks API endpoint
apiKey: 'aw_...', // API key (optional for dev)
orgId: 'my-org', // Organization ID
projectId: 'my-project', // Project ID
redactPii: true, // Enable PII redaction
debug: false, // Enable debug logging
});Tracing
traceAgent(agentId, fn, workflowId?, attributes?)
Trace an agent execution.
await traceAgent('support-bot', async () => {
// Agent logic here
}, 'ticket-123', { 'user.id': '12345' });traceTool(name, fn, attributes?)
Trace a tool execution.
const result = await traceTool('stripe_refund', async () => {
// Tool logic here
return await stripe.refunds.create({ amount: 5000 });
}, { amount: '50.00' });traceDecision(fn, policy?, attributes?)
Trace a decision point.
const model = await traceDecision(async () => {
return selectModel(task);
}, 'routing-v1', { task: 'classification' });llmCall(params)
Trace an LLM call with automatic cost calculation and PII detection.
Note: This function does NOT capture errors automatically. Consider using traceLLM() for better error handling.
const result = await llmCall({
model: 'gpt-4',
provider: 'openai',
prompt: 'Classify: ...',
completion: 'Category: Support',
promptTokens: 100,
completionTokens: 10,
attributes: { temperature: 0.7 },
});
// Returns: { traceId, spanId, costUsd, piiDetected }traceLLM(fn, params) ⭐ Recommended
Trace an LLM call with automatic error handling, cost tracking, and PII detection.
Advantages over llmCall():
- ✅ Automatically captures and records errors
- ✅ Proper error status in dashboard
- ✅ Complete stack traces for debugging
- ✅ Wraps the actual LLM call
import OpenAI from 'openai';
const openai = new OpenAI();
// Wrap your LLM call with automatic error handling
const response = await traceLLM(
async (capture) => {
// Your LLM call here
const result = await openai.chat.completions.create({
model: 'gpt-4o-mini',
messages: [
{ role: 'system', content: 'You are helpful.' },
{ role: 'user', content: 'Hello!' }
],
});
// Capture the response
capture({
completion: result.choices[0].message.content || '',
promptTokens: result.usage?.prompt_tokens || 0,
completionTokens: result.usage?.completion_tokens || 0,
});
return result;
},
{
model: 'gpt-4o-mini',
provider: 'openai',
systemPrompt: 'You are helpful.',
userPrompt: 'Hello!',
}
);
// Any errors are automatically recorded to AgentWorks!Utilities
getCurrentTraceId()
Get the current trace ID.
import { getCurrentTraceId } from 'agentworks';
const traceId = getCurrentTraceId();getCurrentSpanId()
Get the current span ID.
import { getCurrentSpanId } from 'agentworks';
const spanId = getCurrentSpanId();Supported Models
The SDK includes built-in pricing for:
- OpenAI: GPT-4, GPT-4 Turbo, GPT-4o, GPT-3.5 Turbo
- Anthropic: Claude 3 Opus, Sonnet, Haiku, Claude 3.5 Sonnet
- Google: Gemini Pro, Gemini 1.5 Pro/Flash
PII Detection
Automatically detects and redacts:
- Email addresses
- Phone numbers
- Social Security Numbers
- Credit card numbers
- API keys
Enable PII redaction:
configure({
redactPii: true,
});TypeScript Support
This SDK is written in TypeScript and includes full type definitions.
import type { Config, SpanAttributes, LLMCallResult } from 'agentworks';Examples
Express.js API
import express from 'express';
import { configure, traceAgent, llmCall } from 'agentworks';
configure({
ingestEndpoint: process.env.AGENTWORKS_ENDPOINT,
apiKey: process.env.AGENTWORKS_API_KEY,
orgId: 'my-org',
projectId: 'api',
});
app.post('/chat', async (req, res) => {
await traceAgent('chat-api', async () => {
const response = await generateResponse(req.body.message);
await llmCall({
model: 'gpt-4',
provider: 'openai',
prompt: req.body.message,
completion: response,
promptTokens: 100,
completionTokens: 50,
});
res.json({ response });
}, req.body.conversationId);
});LangChain Integration
import { ChatOpenAI } from 'langchain/chat_models/openai';
import { traceAgent, llmCall } from 'agentworks';
const chat = new ChatOpenAI({ modelName: 'gpt-4' });
await traceAgent('langchain-bot', async () => {
const response = await chat.call([
{ role: 'user', content: 'Hello!' }
]);
await llmCall({
model: 'gpt-4',
provider: 'openai',
prompt: 'Hello!',
completion: response.text,
promptTokens: 10,
completionTokens: 20,
});
});Development
# Install dependencies
npm install
# Build
npm run build
# Test
npm test
# Lint
npm run lint
# Type check
npm run type-checkLicense
MIT
