@ai-queue/types
v1.5.0
Published
This document provides a reference for the TypeScript types used in the AI Queue API. These types can be imported from the `@ai-queue/types` package to ensure type safety when integrating with the API.
Readme
AI Queue TypeScript Types Documentation
This document provides a reference for the TypeScript types used in the AI Queue API. These types can be imported from the @ai-queue/types package to ensure type safety when integrating with the API.
Installation
npm install @ai-queue/types
# or
yarn add @ai-queue/typesCore Types
Job Types
import { JobType, JobTypes } from '@ai-queue/types';
// Available job types
const jobTypes: Record<string, JobType> = JobTypes;
// {
// SPEECH: "speech",
// IMAGE: "image",
// TRANSCRIPTION: "transcription",
// STORAGE: "storage"
// }Job Status
import { JobStatusType, JobStatus } from '@ai-queue/types';
// Available job status values
const statusValues: Record<string, JobStatusType> = JobStatus;
// {
// PENDING: "pending",
// PROCESSING: "processing",
// COMPLETED: "completed",
// FAILED: "failed"
// }Batch Status
import { BatchStatusType, BatchStatus } from '@ai-queue/types';
// Available batch status values (extends JobStatus)
const batchStatusValues: Record<string, BatchStatusType> = BatchStatus;
// {
// PENDING: "pending",
// PROCESSING: "processing",
// COMPLETED: "completed",
// FAILED: "failed",
// PARTIAL: "partial"
// }Providers
import {
SpeechProvider, SpeechProviders,
TranscriptionProvider, TranscriptionProviders,
ImageProvider, ImageProviders,
StorageProvider, StorageProviders
} from '@ai-queue/types';
// Available speech providers
const speechProviders: Record<string, SpeechProvider> = SpeechProviders;
// {
// AWS: "aws",
// ELEVENLABS: "elevenlabs",
// OPENAI: "openai",
// GOOGLE: "google",
// DEEPGRAM: "deepgram"
// }
// Available transcription providers
const transcriptionProviders: Record<string, TranscriptionProvider> = TranscriptionProviders;
// {
// DEEPGRAM: "deepgram"
// }
// Available image providers
const imageProviders: Record<string, ImageProvider> = ImageProviders;
// {
// GETIMG: "getimg",
// OPENAI: "openai"
// }
// Available storage providers
const storageProviders: Record<string, StorageProvider> = StorageProviders;
// {
// AWS: "aws",
// CL: "cloudflare",
// BACKBLAZE: "backblaze"
// }Job Data Types
These types define the input data structure for different job types:
Speech Job Data
import { SpeechJobData } from '@ai-queue/types';
const speechJobData: SpeechJobData = {
text: "Hello world",
provider: "elevenlabs",
voiceId: "voice-id-here",
config: {
// Provider-specific configuration
stability: 0.5, // ElevenLabs specific
similarity_boost: 0.7, // ElevenLabs specific
style: 0.3, // ElevenLabs specific
use_speaker_boost: true, // ElevenLabs specific
// OR
pitch: 10, // Google specific
speed: 1.5 // Google specific
}
};Image Job Data
import { ImageJobData, ImageProviders } from '@ai-queue/types';
const getImgJobData: ImageJobData = {
prompt: "A sunset over mountains",
config: {
provider: ImageProviders.GETIMG,
config: {
model: "flux-schnell",
width: 1024,
height: 1024,
seed: 12345,
steps: 50
}
}
};
const openAiImageEditJob: ImageJobData = {
prompt: "Make the sky golden hour and remove tourists",
config: {
provider: ImageProviders.OPENAI,
config: {
mode: "image-to-image",
model: "gpt-image-1",
n: 2,
initImageUrl: "https://example.com/original.png",
maskImageUrl: "https://example.com/mask.png",
promptStrength: 0.7,
size: "1024x1024"
}
}
};Transcription Job Data
import { TranscriptionJobData } from '@ai-queue/types';
const transcriptionJobData: TranscriptionJobData = {
audioUrl: "https://example.com/audio.mp3",
provider: "deepgram",
config: {
language: "en-US",
detectLanguage: true
}
};Storage Job Data
import { StorageJobData } from '@ai-queue/types';
const storageJobData: StorageJobData = {
fileUrl: "https://example.com/file.pdf",
provider: "cloudflare",
path: "documents/file.pdf",
config: {
public: true,
cacheControl: "max-age=86400"
}
};Job Result Types
These types define the output data structure for different job types:
Speech Job Result
import { SpeechResult } from '@ai-queue/types';
const speechResult: SpeechResult = {
url: "https://storage.example.com/speech/file.mp3",
mimeType: "audio/mpeg"
};Image Job Result
import { ImageResult } from '@ai-queue/types';
const imageResult: ImageResult = {
url: "https://storage.example.com/images/generated.png",
seed: 12345, // Optional
mode: "image-to-image",
sourceImageUrl: "https://example.com/original.png",
variants: [
"https://storage.example.com/images/generated-1.png",
"https://storage.example.com/images/generated-2.png"
]
};Transcription Job Result
import { TranscriptionResult, Word } from '@ai-queue/types';
const transcriptionResult: TranscriptionResult = {
transcript: "Hello world, this is a transcription.",
words: [
{
word: "Hello",
start: 0.0,
end: 0.5,
confidence: 0.98
},
{
word: "world",
start: 0.6,
end: 1.1,
confidence: 0.95
}
// More words...
],
language: "en-US"
};Storage Job Result
import { StorageResult } from '@ai-queue/types';
const storageResult: StorageResult = {
url: "https://storage.example.com/documents/file.pdf"
};Complete Job Types
These types combine the base job fields with type-specific data and results:
Speech Job
import { SpeechJob } from '@ai-queue/types';
const speechJob: SpeechJob = {
id: "job-123",
type: "speech",
status: "completed",
createdAt: new Date(),
updatedAt: new Date(),
data: {
text: "Hello world",
provider: "elevenlabs",
voiceId: "voice-id-here"
},
result: {
url: "https://storage.example.com/speech/file.mp3",
mimeType: "audio/mpeg"
},
metadata: {
userId: "user-123",
projectId: "project-456"
}
};Image Job
import { ImageJob, ImageProviders } from '@ai-queue/types';
const imageJob: ImageJob = {
id: "job-456",
type: "image",
status: "completed",
createdAt: new Date(),
updatedAt: new Date(),
data: {
prompt: "A sunset over mountains",
config: {
provider: ImageProviders.OPENAI,
config: {
model: "gpt-image-1",
n: 1
}
}
},
result: {
url: "https://storage.example.com/images/generated.png",
seed: 12345,
mode: "text-to-image"
}
};Transcription Job
import { TranscriptionJob } from '@ai-queue/types';
const transcriptionJob: TranscriptionJob = {
id: "job-789",
type: "transcription",
status: "completed",
createdAt: new Date(),
updatedAt: new Date(),
data: {
audioUrl: "https://example.com/audio.mp3",
provider: "deepgram"
},
result: {
transcript: "Hello world, this is a transcription.",
words: [
// Word objects with timing information
],
language: "en-US"
}
};Storage Job
import { StorageJob } from '@ai-queue/types';
const storageJob: StorageJob = {
id: "job-012",
type: "storage",
status: "completed",
createdAt: new Date(),
updatedAt: new Date(),
data: {
fileUrl: "https://example.com/file.pdf",
path: "documents/file.pdf"
},
result: {
url: "https://storage.example.com/documents/file.pdf"
}
};Generic Job Type
import { Job } from '@ai-queue/types';
// Job can be any of the specific job types
function processJob(job: Job) {
switch (job.type) {
case "speech":
// TypeScript knows this is a SpeechJob
console.log(job.data.text);
if (job.result) console.log(job.result.url);
break;
case "image":
// TypeScript knows this is an ImageJob
console.log(job.data.prompt);
if (job.result) console.log(job.result.url);
break;
// Handle other job types...
}
}Queue Types
These types are used when submitting jobs to the queue:
Queue Job
import { QueueJob } from '@ai-queue/types';
// Submit a single job to the queue
const queueJob: QueueJob = {
type: "speech",
data: {
text: "Hello world",
provider: "elevenlabs",
voiceId: "voice-id-here"
},
webhookUrl: "https://example.com/webhook",
metadata: {
userId: "user-123"
}
};Batch Queue Job
import { BatchQueueJob, ImageProviders } from '@ai-queue/types';
// Submit a batch of jobs to the queue
const batchQueueJob: BatchQueueJob = {
type: "image",
jobs: [
{
data: {
prompt: "A sunset over mountains",
config: {
provider: ImageProviders.GETIMG,
config: {
width: 1024,
height: 1024
}
}
},
metadata: {
description: "Mountain sunset"
}
},
{
data: {
prompt: "A beach at dawn",
config: {
provider: ImageProviders.OPENAI,
config: {
model: "gpt-image-1",
n: 2
}
}
},
metadata: {
description: "Beach dawn"
}
}
],
webhookUrl: "https://example.com/batch-webhook",
batchMetadata: {
projectId: "project-456"
}
};Queue Response Types
import { QueueJobResponse, BatchQueueJobResponse } from '@ai-queue/types';
// Response when submitting a single job
const queueResponse: QueueJobResponse = {
jobId: "job-123",
status: "pending"
};
// Response when submitting a batch
const batchResponse: BatchQueueJobResponse = {
batchId: "batch-123",
status: "pending",
jobCount: 2
};Webhook Types
These types define the structure of webhook notifications:
Job Webhook Notification
import { WebhookNotification } from '@ai-queue/types';
// Example webhook handler
function handleWebhook(notification: WebhookNotification) {
console.log(`Job ${notification.jobId} status: ${notification.status}`);
if (notification.status === "completed") {
switch (notification.type) {
case "speech":
// Process speech result
console.log(`Audio URL: ${notification.result.url}`);
break;
case "image":
// Process image result
console.log(`Image URL: ${notification.result.url}`);
break;
// Handle other job types...
}
} else if (notification.status === "failed") {
console.error(`Error: ${notification.error}`);
}
}Batch Webhook Notification
import { BatchWebhookNotification } from '@ai-queue/types';
// Example batch webhook handler
function handleBatchWebhook(notification: BatchWebhookNotification) {
console.log(`Batch ${notification.batchId} status: ${notification.status}`);
console.log(`Summary: ${notification.summary.completed}/${notification.summary.total} completed`);
// Process individual results
notification.results.forEach(result => {
if (result.success) {
console.log(`Job succeeded: ${JSON.stringify(result.data)}`);
} else {
console.error(`Job failed: ${result.error}`);
}
});
}Storage Types
Types for generating presigned URLs for file uploads:
import { GeneratePresignedUrl, PresignedUrlResponse } from '@ai-queue/types';
// Request to generate a presigned URL
const generateRequest: GeneratePresignedUrl = {
fileType: "image",
contentType: "image/png",
expiresIn: 3600 // seconds
};
// Response with presigned URL
const presignedResponse: PresignedUrlResponse = {
presignedUrl: "https://storage.example.com/upload?signature=abc123",
fileUrl: "https://storage.example.com/images/file.png"
};Using Zod Schemas for Validation
The package exports Zod schemas for runtime validation with improved type safety:
import {
SpeechProviderConfigSchema,
ImageProviderConfigSchema,
TranscriptionProviderConfigSchema,
StorageProviderConfigSchema
} from '@ai-queue/types';
// Validate speech configuration
const speechConfig = {
provider: "elevenlabs",
voiceId: "voice-123",
config: {
stability: 0.5,
similarity_boost: 0.7
}
};
const validationResult = SpeechProviderConfigSchema.safeParse(speechConfig);
if (validationResult.success) {
const validConfig = validationResult.data;
// Config is now fully typed and validated
} else {
console.error("Validation errors:", validationResult.error);
}
// Provider-specific type checking
if (speechConfig.provider === "elevenlabs") {
// TypeScript knows this is ElevenLabs configuration
const stability = speechConfig.config.stability;
} else if (speechConfig.provider === "google") {
// TypeScript knows this is Google configuration
const pitch = speechConfig.config.pitch;
}UI Configuration Metadata
The package provides metadata for generating UIs for provider configuration options. This makes it easy to create forms with appropriate input types, validation ranges, and default values.
import {
PROVIDER_SETTINGS,
ConfigFieldMetadata,
getProviderSettings
} from '@ai-queue/types';
// Access all settings for a specific service type
const allSpeechSettings = PROVIDER_SETTINGS.speech;
// {
// 'elevenlabs': { ... },
// 'google': { ... },
// 'deepgram': { ... },
// 'openai': { ... },
// 'aws': { ... }
// }
// Access settings for a specific provider
const elevenLabsSettings = PROVIDER_SETTINGS.speech[SpeechProviders.ELEVENLABS];
// {
// voiceId: { type: 'text', label: 'Voice ID', required: true },
// stability: { type: 'percentage', label: 'Stability', min: 0, max: 1, defaultValue: 0.5 },
// similarity_boost: { type: 'percentage', label: 'Similarity Boost', min: 0, max: 1, defaultValue: 0.75 },
// style: { type: 'percentage', label: 'Style', min: 0, max: 1, defaultValue: 0 },
// use_speaker_boost: { type: 'boolean', label: 'Use Speaker Boost', defaultValue: false }
// }
// Use the helper function to get settings by job type and provider
const googleSettings = getProviderSettings('speech', 'google');
// {
// voiceId: { type: 'text', label: 'Voice ID', required: true },
// pitch: { type: 'number', label: 'Pitch', min: -20, max: 20, defaultValue: 0 },
// speed: { type: 'number', label: 'Speed', min: 0.25, max: 4.0, defaultValue: 1.0 }
// }
// Each field has metadata for generating UI components
const stabilityField: ConfigFieldMetadata = elevenLabsSettings.stability;
// {
// type: 'percentage',
// label: 'Stability',
// min: 0,
// max: 1,
// defaultValue: 0.5
// }
// Example of using the metadata to generate a form field
function renderFormField(name: string, field: ConfigFieldMetadata) {
switch (field.type) {
case 'text':
return `<input type="text" name="${name}" ${field.required ? 'required' : ''} />`;
case 'number':
case 'percentage':
return `<input type="range" name="${name}"
min="${field.min}" max="${field.max}"
value="${field.defaultValue}" step="0.01" />`;
case 'boolean':
return `<input type="checkbox" name="${name}" ${field.defaultValue ? 'checked' : ''} />`;
default:
return '';
}
}
// Generate a complete form for a provider
function generateProviderForm(jobType: string, provider: string) {
const settings = getProviderSettings(jobType, provider);
return Object.entries(settings)
.map(([name, field]) => {
return `
<div class="form-group">
<label>${field.label}</label>
${renderFormField(name, field)}
</div>
`;
})
.join('');
}
// Example usage
const elevenLabsForm = generateProviderForm('speech', 'elevenlabs');