@cherrystudio/analytics-client
v1.1.0
Published
Analytics client for Cherry Studio applications
Readme
@cherrystudio/analytics-client
Analytics client SDK for Cherry Studio applications. Supports batch event tracking with automatic buffering and flush.
Installation
npm install @cherrystudio/analytics-client
# or
pnpm add @cherrystudio/analytics-client
# or
yarn add @cherrystudio/analytics-clientQuick Start
import { AnalyticsClient } from '@cherrystudio/analytics-client'
// Initialize the client
const analytics = new AnalyticsClient({
clientId: 'user-uuid-here',
channel: 'cherry-studio',
})
// Track token usage
analytics.trackTokenUsage({
provider: 'openai',
model: 'gpt-4',
input_tokens: 100,
output_tokens: 200,
})
// Flush before app exit
await analytics.flush()API
Constructor Options
interface AnalyticsClientOptions {
/** Analytics service base URL (default: https://analytics.cherry-ai.com) */
baseUrl?: string
/** Client unique identifier (UUID) */
clientId: string
/** Channel/application name */
channel: string
/** Enable automatic batching (default: true) */
autoBatch?: boolean
/** Batch size before auto-flush (default: 10) */
batchSize?: number
/** Batch flush interval in milliseconds (default: 5000) */
flushInterval?: number
/** Request timeout in milliseconds (default: 10000) */
timeout?: number
/** Custom fetch function (for Node.js or custom implementations) */
fetch?: typeof fetch
/** Called when an error occurs */
onError?: (error: Error) => void
}Methods
trackTokenUsage(data, timestamp?)
Track AI token usage.
analytics.trackTokenUsage({
provider: 'openai', // AI provider name
model: 'gpt-4', // Model name
input_tokens: 100, // Input token count
output_tokens: 200, // Output token count
})
// With custom timestamp (for offline tracking)
analytics.trackTokenUsage(data, new Date('2025-01-15T10:30:00Z'))track(eventType, data, timestamp?)
Track a generic event. Useful for custom event types.
analytics.track('custom_event', {
action: 'button_click',
value: 42,
})flush()
Flush all pending events to the server. Returns null if queue is empty.
const result = await analytics.flush()
// { success: true, count: 5 }sendImmediate(eventType, data, timestamp?)
Send an event immediately without batching.
await analytics.sendImmediate('token_usage', {
provider: 'anthropic',
model: 'claude-3',
input_tokens: 50,
output_tokens: 150,
})setClientId(clientId)
Update the client ID (e.g., after user login).
analytics.setClientId('new-user-uuid')getQueueSize()
Get the current number of pending events in the queue.
const pending = analytics.getQueueSize()destroy()
Clean up the client. Flushes remaining events and stops timers.
await analytics.destroy()Examples
Electron App
import { AnalyticsClient } from '@cherrystudio/analytics-client'
import { app } from 'electron'
const analytics = new AnalyticsClient({
clientId: getMachineId(),
channel: 'cherry-studio',
onError: (error) => console.error('Analytics error:', error),
})
// Track usage
analytics.trackTokenUsage({
provider: 'openai',
model: 'gpt-4',
input_tokens: 100,
output_tokens: 200,
})
// Flush on app quit
app.on('before-quit', async (event) => {
event.preventDefault()
await analytics.destroy()
app.exit()
})Node.js with node-fetch
import { AnalyticsClient } from '@cherrystudio/analytics-client'
import fetch from 'node-fetch'
const analytics = new AnalyticsClient({
clientId: 'server-instance-id',
channel: 'cherryin-backend',
fetch: fetch as unknown as typeof globalThis.fetch,
})Disable Auto Batching
const analytics = new AnalyticsClient({
clientId: 'user-uuid',
channel: 'cherry-studio',
autoBatch: false, // Disable auto batching
})
// Manually send each event
await analytics.sendImmediate('token_usage', { ... })Custom Batch Settings
const analytics = new AnalyticsClient({
clientId: 'user-uuid',
channel: 'cherry-studio',
batchSize: 50, // Flush when 50 events accumulated
flushInterval: 10000, // Or every 10 seconds
})License
MIT
