@kaiko.io/rescript-reindexed
v8.2.2
Published
Kind of an IndexedDB ORM written in ReScript with no runtime dependencies.
Readme
ReIndexed
A type-safe IndexedDB ORM for ReScript with zero runtime dependencies. ReIndexed provides an elegant, functional API for working with IndexedDB, complete with migrations, transactions, and high-performance batch operations.
Run live tests: https://kaiko-systems.gitlab.io/ReIndexed/
Features
- 🎯 Type-safe: Full type safety with ReScript's type system
- 🚀 High Performance: Minimal transaction overhead with bulk operations
- 📦 Zero Dependencies: No runtime dependencies
- 🔄 Migrations: Versioned schema migrations with automatic upgrades
- 🛡️ Error Handling: Both unsafe (exception-throwing) and safe (Result-based) APIs
- 🔍 Rich Queries: Complex queries with And/Or, pagination, and cursor-based operations
- ⚡ Transactions: Automatic transaction management with full control when needed
Installation
npm install @kaiko.io/rescript-reindexedAdd to your rescript.json:
{
"bs-dependencies": ["@kaiko.io/rescript-reindexed"]
}Quick Start
// 1. Define your data model
module Vessel = {
module Def = {
type t = {
id: string,
name: string,
age: int,
flag: option<string>
}
type index = [#id | #name | #age | #flag]
}
include ReIndexed.MakeModel(Def)
}
// 2. Define your database with migrations
module Database = ReIndexed.MakeDatabase({
let migrations = () => [
// Migration 0: Create object store
_ => async (db, _transaction) => {
let vessels = db->IDB.Migration.Database.createObjectStore("vessels")
vessels->IDB.Migration.Store.createIndex("name", "name")
vessels->IDB.Migration.Store.createIndex("age", "age")
Ok()
}
]
})
// 3. Define your query interface
module Query = Database.MakeQuery({
type read = {vessels: Vessel.read}
type write = {vessels: Vessel.actions}
type response = {vessels: array<Vessel.t>}
type mapper = {vessels?: Vessel.t => ReIndexedCommands.command<Vessel.t>}
type aggregator<'state> = {
vessels?: ('state, Vessel.t) => ('state, ReIndexedCommands.flow)
}
})
// 4. Connect and use
let main = async () => {
// Connect to database
switch await Database.connect("my-database") {
| Error(e) => Console.error("Failed to connect", e)
| Ok(_db) => {
// Write data
let _ = await {
...Query.makeWrite(),
vessels: [
Vessel.save({id: "v1", name: "Aurora", age: 5, flag: Some("us")}),
Vessel.save({id: "v2", name: "Borealis", age: 10, flag: Some("ca")}),
]
}->Query.write
// Read data
let {vessels} = await {
...Query.makeRead(),
vessels: Vessel.All
}->Query.read
Console.log2("Vessels:", vessels)
}
}
}Core Concepts
Models
Models define your data structures and provide type-safe operations. ReIndexed provides two model makers:
MakeModel: For models with string IDsMakeIdModel: For models with custom ID types
// Simple model with string ID
module Staff = {
module Def = {
type t = {
id: string,
name: string,
age: int,
position: [#shore | #crew]
}
type index = [#id | #name | #age | #position]
}
include ReIndexed.MakeModel(Def)
}
// Model with custom ID type
module VesselId: ReIndexed.Identifier = {
type t
external fromString: string => t = "%identity"
external toString: t => string = "%identity"
external manyFromString: array<string> => array<t> = "%identity"
external manyToString: array<t> => array<string> = "%identity"
}
module Vessel = {
module Def = {
type t = {id: VesselId.t, name: string, age: int}
type index = [#id | #name | #age]
}
include ReIndexed.MakeIdModel(Def, VesselId)
}Database and Migrations
Databases are created with versioned migrations. Each migration receives the database and transaction:
module Database = ReIndexed.MakeDatabase({
let migrations = () => [
// Migration 0: Create initial schema
_ => async (db, _transaction) => {
let vessels = db->IDB.Migration.Database.createObjectStore("vessels")
vessels->IDB.Migration.Store.createIndex("name", "name")
vessels->IDB.Migration.Store.createIndex("age", "age")
Ok()
},
// Migration 1: Seed initial data
_ => async (_db, transaction) => {
// Use ReIndexedPatterns.MakeWriter or custom logic
Ok()
},
// Migration 2: Add new index
_ => async (_db, transaction) => {
let vessels = transaction->IDB.Migration.Transaction.objectStore("vessels")
vessels->IDB.Migration.Store.createIndex("flag", "flag")
Ok()
}
]
})
// Connect to database
let result = await Database.connect("my-app-db")Query Interface
The query interface is defined for each database and provides type-safe access:
module Query = Database.MakeQuery({
// Read specification - what you can query
type read = {
vessels: Vessel.read,
staff: Staff.read
}
// Write specification - what you can modify
type write = {
vessels: Vessel.actions,
staff: Staff.actions
}
// Response type - what you get back
type response = {
vessels: array<Vessel.t>,
staff: array<Staff.t>
}
// Mapper for transformations
type mapper = {
vessels?: Vessel.t => ReIndexedCommands.command<Vessel.t>,
staff?: Staff.t => ReIndexedCommands.command<Staff.t>
}
// Aggregator for reductions
type aggregator<'state> = {
vessels?: ('state, Vessel.t) => ('state, ReIndexedCommands.flow),
staff?: ('state, Staff.t) => ('state, ReIndexedCommands.flow)
}
})Query Operations
Read Operations
Read data from one or more object stores:
// Read all vessels
let {vessels} = await {...Query.makeRead(), vessels: All}->Query.read
// Read by ID
let {vessels} = await {...Query.makeRead(), vessels: Get("vessel-123")}->Query.read
// Read with complex query
let {vessels} = await {
...Query.makeRead(),
vessels: Between(#age, Incl(10->Query.value), Exlc(20->Query.value))
}->Query.read
// Read from multiple stores
let {vessels, staff} = await {
...Query.makeRead(),
vessels: All,
staff: Is(#position, "crew")
}->Query.readWrite Operations
Write data to one or more object stores:
// Save records
let _ = await {
...Query.makeWrite(),
vessels: [
Save({id: "v1", name: "Aurora", age: 5, flag: None}),
Save({id: "v2", name: "Borealis", age: 10, flag: Some("ca")})
]
}->Query.write
// Delete records by IDs
let _ = await {
...Query.makeWrite(),
vessels: [Delete("v1"), Delete("v2")]
}->Query.write
// Clear entire store
let _ = await {...Query.makeWrite(), vessels: [Clear]}->Query.write
// Mix operations
let _ = await {
...Query.makeWrite(),
vessels: [Clear, Save(vessel1), Save(vessel2)]
}->Query.writeDo - Combined Read/Write Operations
Execute multiple reads and writes in a single transaction:
let {vessels, staff} = await [
// First read vessels
Query.Read(_ => {...Query.makeRead(), vessels: All}),
// Then write staff based on previous results
Query.Write(response => {
let vesselCount = response.vessels->Array.length
{
...Query.makeWrite(),
staff: [Save({
id: "s1",
name: "Captain",
count: vesselCount
})]
}
})
]->Query.do
Console.log2("Results:", {vessels, staff})Map - Transform and Update
Read records, transform them, and write back in a single transaction:
// Update all vessels
await {
vessels: All,
staff: NoOp
}->Query.map({
vessels: vessel => Update({...vessel, age: vessel.age + 1})
})
// Conditional updates
await {
vessels: All,
staff: NoOp
}->Query.map({
vessels: vessel =>
vessel.age < 18 ? Delete : Update({...vessel, flag: Some("adult")})
})
// Transform specific records
await {
vessels: In(["v1", "v2", "v3"]),
staff: NoOp
}->Query.map({
vessels: vessel => Update({...vessel, name: vessel.name ++ " (Updated)"})
})Map commands:
Next- Skip this record, continue to nextUpdate(record)- Update the record and continueDelete- Delete the record and continueStop- Stop iteration immediately
Aggregate - Reduce Over Records
Reduce records to a single value:
// Sum ages
let totalAge = await {
vessels: All,
staff: NoOp
}->Query.aggregate(0, {
vessels: (sum, vessel) => (sum + vessel.age, Next)
})
// Count records
let count = await {
vessels: Gte(#age, "18"),
staff: NoOp
}->Query.aggregate(0, {
vessels: (count, _vessel) => (count + 1, Next)
})
// Build custom data structure
let byFlag = await {
vessels: All,
staff: NoOp
}->Query.aggregate(Map.String.empty, {
vessels: (acc, vessel) => {
switch vessel.flag {
| Some(flag) => (acc->Map.String.set(flag, vessel), Next)
| None => (acc, Next)
}
}
})
// Early termination
let firstOld = await {
vessels: All,
staff: NoOp
}->Query.aggregate(None, {
vessels: (result, vessel) =>
vessel.age >= 100 ? (Some(vessel), Stop) : (result, Next)
})Aggregate flow:
Next- Continue to next recordStop- Stop iteration and return current state
Batch Operations
batch works like do but without accumulating responses between steps.
Use it to combine multiple independent write and map operations in a single
transaction:
// Write to some stores, then map over others — all in one transaction
await [
Write({
...Query.makeWrite(),
vessels: newVessels->Array.map(Vessel.save)
}),
Map(
{...Query.makeRead(), staff: All},
{...Query.makeMapper(), staff: s => Update({...s, active: true})}
)
]->Query.batchEach step in the batch runs sequentially, so a Map can see records written
by a preceding Write. Unlike do, you don't get intermediate responses —
operations are provided upfront.
When to use batch instead of do:
- You need to combine writes and maps but don't need to thread responses between steps.
- You want the simplicity of fire-and-forget bulk operations.
Note: If all your operations target the same stores, a single write or
map call is simpler and equally performant — no need for batch.
Query Expressions
ReIndexed supports a rich query language for filtering records:
Basic Queries
All // All records
Get("id") // Single record by ID
In(["id1", "id2", "id3"]) // Records matching IDs
NotIn(["id1", "id2"]) // Records not matching IDs
NoOp // No operation (skip this store)Index Queries
Is(#name, "Aurora") // Exact match
NotNull(#flag) // Has non-null value
Lt(#age, "18") // Less than
Lte(#age, "18") // Less than or equal
Gt(#age, "65") // Greater than
Gte(#age, "18") // Greater than or equal
Between(#age, Incl("18"), Excl("65")) // Range (inclusive/exclusive bounds)
AnyOf(#flag, ["us", "ca", "uk"]) // Match any of values
NoneOf(#flag, ["de", "fr"]) // Match none of values
StartsWith(#name, "MS ") // String prefix matchAggregation Queries
Min(#age) // Record with minimum age
Max(#age) // Record with maximum ageCompound Queries
// AND - Records matching both conditions
And(
Gte(#age, "18"),
Lt(#age, "65")
)
// OR - Records matching either condition
Or(
Is(#flag, "us"),
Is(#flag, "ca")
)
// Complex combinations
And(
Or(
Is(#flag, "us"),
Is(#flag, "ca")
),
Gte(#age, "18")
)Pagination
// Limit results
Limit(10, All)
// Skip and limit
Offset(20, Limit(10, All))
// Can be combined with any query
Limit(5, And(
Gte(#age, "18"),
Is(#flag, "us")
))Error Handling
ReIndexed provides both unsafe (exception-throwing) and safe (Result-based) APIs:
Unsafe API (Default)
// Throws exception on error
let {vessels} = await Query.read({...Query.makeRead(), vessels: All})Safe API
// Returns Result<response, exn>
switch await Query.Safe.read({...Query.makeRead(), vessels: All}) {
| Ok({vessels}) => Console.log2("Success:", vessels)
| Error(exn) => Console.error2("Failed:", exn)
}
// All operations have Safe variants
switch await Query.Safe.write({...Query.makeWrite(), vessels: [...]}) {
| Ok(_) => Console.log("Write succeeded")
| Error(exn) => Console.error2("Write failed:", exn)
}
switch await Query.Safe.map({vessels: All, staff: NoOp}, {...}) {
| Ok() => Console.log("Map succeeded")
| Error(exn) => Console.error2("Map failed:", exn)
}
switch await Query.Safe.batch([...]) {
| Ok() => Console.log("Batch succeeded")
| Error(exn) => Console.error2("Batch failed:", exn)
}Advanced Topics
Database Connection Management
// Connect
switch await Database.connect("my-database") {
| Ok(db) => Console.log("Connected")
| Error(e) => Console.error2("Connection failed:", e)
}
// Disconnect
Database.disconnect()
// Drop database (⚠️ destroys all data)
switch await Database.drop() {
| Ok() => Console.log("Database dropped")
| Error(e) => Console.error2("Drop failed:", e)
}Unbound Queries
For working with multiple database instances:
module UnboundQuery = ReIndexed.MakeUnboundQuery(QueryDef)
// Use with specific database instance
let {vessels} = await UnboundQuery.read(
db,
{...UnboundQuery.makeRead(), vessels: All}
)Transaction Patterns
For lower-level transaction control, use Database.Patterns:
module VesselCounter = Patterns.MakeCounter({
type t = Vessel.t
let storeName = "vessels"
let predicate = _ => true
})
switch Patterns.transaction(["vessels"], #readonly) {
| Error(msg) => Console.error(msg)
| Ok(transaction) => {
let count = await VesselCounter.do(transaction)
Console.log2("Vessel count:", count)
}
}Custom Identifiers
Create custom ID types with validation:
module VesselId: ReIndexed.Identifier = {
type t
let fromString = str => {
// Validate format
if !Js.Re.test_(str, %re("/^v-[0-9a-f]+$/")) {
JsError.throwWithMessage("Invalid vessel ID format")
}
str->Obj.magic
}
external toString: t => string = "%identity"
let manyFromString = ids => ids->Array.map(fromString)
let manyToString = ids => ids->Array.map(toString)
}API Stability
The ReIndexed module API is stable and follows semantic versioning. Breaking changes will only occur in major version bumps.
The ReIndexedPatterns and IDB.Migration.Utils modules are experimental and may have breaking changes in minor versions.
Performance Tips
- Minimize transactions — batch writes into a single
writeorbatchcall instead of many individual calls - Create indexes on frequently queried fields
- Use
In()orAnyOf()instead ofOr()when possible (uses efficient cursor seeking) - Limit results early with
Limit()to avoid processing unnecessary records - Use
aggregateinstead of reading all records when you only need a computed value - Use
NoOpfor stores you don't need to query
Examples
See the test suite for comprehensive examples.
Live tests: https://kaiko-systems.gitlab.io/ReIndexed/
API Reference
Core Modules
ReIndexed.MakeModel- Create a model with string IDsReIndexed.MakeIdModel- Create a model with custom ID typesReIndexed.MakeDatabase- Create a database with migrationsDatabase.MakeQuery- Create bound query interfaceReIndexed.MakeUnboundQuery- Create unbound query interface
Query Operations
read(read)- Read from object storeswrite(write)- Write to object storesdo(array<query>)- Execute combined read/write operationsmap(read, mapper)- Transform and update recordsaggregate(read, state, aggregator)- Reduce records to a valuebatch(array<batchOp>)- Combine multiple write/map operations in a single transaction
Commands
Map commands:
Next- Continue without changesUpdate(record)- Update and continueDelete- Delete and continueStop- Stop iteration
Aggregate flow:
Next- ContinueStop- Stop and return
Write operations:
Save(record)- Insert or update recordDelete(id)- Delete by IDClear- Clear all records in store
License
MIT
Contributing
Issues and pull requests welcome at https://gitlab.com/kaiko-systems/ReIndexed
