@ignitionai/sdk
v0.1.0
Published
TypeScript SDK for IgnitionAI RAG platform
Downloads
40
Maintainers
Readme
@ignitionai/sdk
TypeScript SDK for the IgnitionAI RAG platform. Zero dependencies, works everywhere: Node.js 18+, Bun, Deno, edge runtimes, and browsers.
Installation
npm install @ignitionai/sdk
# or
bun add @ignitionai/sdkSetup
import { IgnitionAI } from "@ignitionai/sdk";
const client = new IgnitionAI({
apiKey: "ign_xxxxxxxxxxxx",
});Or use the IGNITION_API_KEY environment variable:
const client = new IgnitionAI();Configuration Options
const client = new IgnitionAI({
apiKey: "ign_xxxx", // required (or IGNITION_API_KEY env)
baseURL: "https://custom.api", // default: https://api.ignitionai.dev
maxRetries: 3, // default: 2
timeout: 60_000, // default: 30_000ms
fetch: customFetch, // custom fetch implementation
});RAG Chat
Non-streaming
const response = await client.chat.send({
collectionId: "coll_xxxx",
query: "What is the refund policy?",
});
console.log(response.answer);
console.log(`${response.sources.length} sources used`);Streaming
const stream = client.chat.stream({
collectionId: "coll_xxxx",
query: "Explain the onboarding process",
});
for await (const event of stream) {
if (event.type === "chunk") {
process.stdout.write(event.content);
}
if (event.type === "sources") {
console.log("\nSources:", event.sources.length);
}
}Text-only stream
for await (const text of stream.toTextStream()) {
process.stdout.write(text);
}Collect full response
const { text, sources, metrics } = await stream.getFullResponse();Progressive Streaming
Track RAG pipeline stages in real time:
const stream = client.chat.streamProgressive({
collectionId: "coll_xxxx",
query: "Compare pricing plans",
});
for await (const event of stream) {
switch (event.type) {
case "stage":
console.log(`[${event.stage}] ${event.elapsed}ms`);
break;
case "sources":
console.log(`Found ${event.sources.length} sources`);
break;
case "chunk":
process.stdout.write(event.content);
break;
case "metrics":
console.log(`\nTotal: ${event.pipelineMetrics.totalTimeMs}ms`);
break;
}
}Agent Chat
const stream = client.agentChat.stream("agent_xxxx", {
query: "How do I configure SSO?",
// sessionId auto-generated if omitted
});
for await (const event of stream) {
switch (event.type) {
case "chunk":
process.stdout.write(event.content);
break;
case "tool_call":
console.log(`\n[Tool: ${event.name}]`);
break;
case "tool_result":
console.log(`[Result: ${event.name}]`);
break;
case "sources":
console.log(`\n${event.sources.length} sources`);
break;
}
}Agents
// List
const agents = await client.agents.list();
// Get
const agent = await client.agents.get("agent_xxxx");
// Create
const agent = await client.agents.create({
name: "Support Bot",
collectionId: "coll_xxxx",
model: "gpt-4o-mini",
temperature: 30,
enabledBuiltinTools: ["web_search", "current_date"],
});
// Generate config with AI wizard
const config = await client.agents.generate({
description: "A customer support agent for our SaaS platform",
language: "en",
});
// Update
await client.agents.update("agent_xxxx", { temperature: 50 });
// Delete (soft)
await client.agents.delete("agent_xxxx");
// Delete (permanent)
await client.agents.delete("agent_xxxx", true);Collections
// List
const collections = await client.collections.list();
// Create
const coll = await client.collections.create({
name: "Product Docs",
description: "All product documentation",
});
// Search
const results = await client.collections.search("coll_xxxx", {
query: "authentication",
limit: 10,
filters: [{ field: "category", operator: "eq", value: "security" }],
});
// Browse chunks
const { chunks, total, hasMore } = await client.collections.chunks("coll_xxxx", {
limit: 20,
type: "text",
});
// Stats & insights
const stats = await client.collections.stats("coll_xxxx");
const insights = await client.collections.insights("coll_xxxx");
// Update / Delete
await client.collections.update("coll_xxxx", { name: "Renamed" });
await client.collections.delete("coll_xxxx");Ingestion
// Ingest from URL
await client.ingest.url({
collectionId: "coll_xxxx",
url: "https://docs.example.com/guide",
});
// Ingest raw text
await client.ingest.document({
collectionId: "coll_xxxx",
content: "Your document content...",
filename: "guide.txt",
});
// Upload file
await client.ingest.file({
collectionId: "coll_xxxx",
file: new File([buffer], "report.pdf", { type: "application/pdf" }),
});
// Crawl website
await client.ingest.crawl({
collectionId: "coll_xxxx",
url: "https://docs.example.com",
maxPages: 50,
});
// Import HuggingFace dataset
await client.ingest.dataset({
collectionId: "coll_xxxx",
source: "huggingface",
datasetId: "squad",
split: "train",
maxRows: 1000,
});MCP (Model Context Protocol)
// List available prompts and resources
const prompts = await client.mcp.prompts("agent_xxxx");
const resources = await client.mcp.resources("agent_xxxx");
// Get a prompt
const prompt = await client.mcp.getPrompt("agent_xxxx", {
serverName: "my-server",
promptName: "summarize",
});
// Read a resource
const resource = await client.mcp.readResource("agent_xxxx", {
serverName: "my-server",
uri: "resource://data",
});
// Test server connectivity
const result = await client.mcp.testServer({
url: "https://mcp.example.com",
transport: "sse",
});Error Handling
All errors extend IgnitionAIError. HTTP errors are automatically mapped:
import {
IgnitionAI,
RateLimitError,
AuthenticationError,
NotFoundError,
} from "@ignitionai/sdk";
try {
await client.chat.send({ collectionId: "coll_xxxx", query: "Hello" });
} catch (error) {
if (error instanceof RateLimitError) {
console.log(`Rate limited. Retry after ${error.retryAfter}s`);
} else if (error instanceof AuthenticationError) {
console.log("Invalid API key");
} else if (error instanceof NotFoundError) {
console.log("Resource not found");
}
}| Error Class | Status Code |
|---|---|
| BadRequestError | 400 |
| AuthenticationError | 401 |
| PermissionError | 403 |
| NotFoundError | 404 |
| RateLimitError | 429 |
| InternalServerError | 500+ |
| APIConnectionError | Network failure |
| APIConnectionTimeoutError | Timeout |
Retries are automatic for status codes 408, 429, 500, 502, 503, 504 with exponential backoff.
Vercel AI SDK Integration
Drop-in adapter for Next.js with useChat():
Route Handler (app/api/chat/route.ts)
import { IgnitionAI } from "@ignitionai/sdk";
import { toAIStreamResponse } from "@ignitionai/sdk/adapters/vercel-ai";
const client = new IgnitionAI();
export async function POST(req: Request) {
const { messages } = await req.json();
const stream = client.chat.stream({
collectionId: process.env.COLLECTION_ID!,
query: messages.at(-1).content,
history: messages.slice(0, -1),
});
return toAIStreamResponse(stream);
}Client Component
"use client";
import { useChat } from "ai/react";
export default function Chat() {
const { messages, input, handleInputChange, handleSubmit } = useChat();
return (
<div>
{messages.map((m) => (
<div key={m.id}>
{m.role}: {m.content}
</div>
))}
<form onSubmit={handleSubmit}>
<input value={input} onChange={handleInputChange} />
</form>
</div>
);
}Works with AgentChatStream too:
const stream = client.agentChat.stream(agentId, { query, sessionId });
return toAIStreamResponse(stream);Environment Variables
| Variable | Description | Required |
|---|---|---|
| IGNITION_API_KEY | API key (starts with ign_) | Yes (or pass in constructor) |
| IGNITION_BASE_URL | Custom API URL | No |
Requirements
- Node.js 18+ / Bun / Deno
- Zero dependencies
License
MIT
