@openmodex/react
v0.1.2
Published
React hooks for the OpenModex API — useChat, useCompletion, useModels
Downloads
186
Maintainers
Readme
OpenModex React
React hooks for the OpenModex API. Build AI-powered chat interfaces with streaming support, state management, and model discovery.
Installation
npm install @openmodex/reactRequires React 18+.
Setup
Wrap your app with OpenModexProvider to share configuration across all hooks:
import { OpenModexProvider } from '@openmodex/react';
function App() {
return (
<OpenModexProvider apiKey="omx_sk_..." baseURL="https://api.openmodex.com/v1">
<ChatApp />
</OpenModexProvider>
);
}When using the provider, hooks inherit apiKey and baseURL automatically — no need to pass them individually.
Hooks
useChat
Full-featured chat hook with streaming, message history, and abort support.
import { useChat } from '@openmodex/react';
function ChatApp() {
const { messages, input, setInput, sendMessage, isLoading, stop } = useChat({
apiKey: 'omx_sk_...',
model: 'gpt-4o',
});
return (
<div>
{messages.map((m) => (
<div key={m.id}>
<strong>{m.role}:</strong> {m.content}
</div>
))}
<input value={input} onChange={(e) => setInput(e.target.value)} />
<button onClick={() => sendMessage()} disabled={isLoading}>
Send
</button>
{isLoading && <button onClick={stop}>Stop</button>}
</div>
);
}Options
| Option | Type | Description |
|--------|------|-------------|
| apiKey | string | API key for authentication |
| baseURL | string | API base URL (default: https://api.openmodex.com/v1) |
| model | string | Model to use (default: gpt-4o) |
| initialMessages | ChatMessage[] | Pre-populate conversation |
| system | string | System prompt |
| temperature | number | Sampling temperature (0-2) |
| max_tokens | number | Maximum tokens in the response |
| top_p | number | Nucleus sampling parameter |
| routing | RoutingConfig | OpenModex routing strategy (cost_optimized, latency_optimized, quality_optimized) |
| onFinish | (message) => void | Called when response completes |
| onError | (error) => void | Called on error |
Returns
| Property | Type | Description |
|----------|------|-------------|
| messages | ChatMessage[] | All messages in the conversation |
| setMessages | Dispatch | Manually set messages |
| input | string | Current input value |
| setInput | Dispatch | Update input |
| sendMessage | (content?) => Promise | Send a message |
| reload | () => Promise | Re-send the last user message |
| append | (message) => Promise | Append a message and trigger a response |
| isLoading | boolean | Request in progress |
| error | Error \| null | Last error |
| stop | () => void | Abort current request |
| metadata | object \| null | OpenModex response metadata (request ID, provider, latency, etc.) |
useCompletion
Single-turn completion hook with streaming.
import { useCompletion } from '@openmodex/react';
function CompletionApp() {
const { completion, input, setInput, complete, isLoading } = useCompletion({
apiKey: 'omx_sk_...',
model: 'gpt-4o',
temperature: 0.8,
max_tokens: 256,
routing: { strategy: 'cost_optimized' },
});
return (
<div>
<input value={input} onChange={(e) => setInput(e.target.value)} />
<button onClick={() => complete()} disabled={isLoading}>Complete</button>
<p>{completion}</p>
</div>
);
}Additional Options
| Option | Type | Description |
|--------|------|-------------|
| temperature | number | Sampling temperature (0-2) |
| max_tokens | number | Maximum tokens in the response |
| routing | RoutingConfig | OpenModex routing strategy |
useModels
Fetch and filter available models from the API.
import { useModels } from '@openmodex/react';
function ModelList() {
const { models, isLoading, error } = useModels({
apiKey: 'omx_sk_...',
category: 'chat',
provider: 'openai',
search: 'gpt',
sort: 'name',
});
if (isLoading) return <p>Loading...</p>;
if (error) return <p>Error: {error.message}</p>;
return (
<ul>
{models.map((m) => (
<li key={m.id}>{m.name} — {m.provider}</li>
))}
</ul>
);
}Filtering Options
| Option | Type | Description |
|--------|------|-------------|
| category | string | Filter by model category (chat, completion, embedding) |
| provider | string | Filter by provider (openai, anthropic, google, etc.) |
| search | string | Search models by name |
| sort | string | Sort results (name, provider, created) |
useModelCompare
Compare two or more models side by side.
import { useModelCompare } from '@openmodex/react';
function ModelComparison() {
const { comparison, isLoading, error } = useModelCompare({
apiKey: 'omx_sk_...',
models: ['openai/gpt-4o', 'anthropic/claude-3.5-sonnet'],
});
if (isLoading) return <p>Comparing...</p>;
if (error) return <p>Error: {error.message}</p>;
return (
<div>
<p>Cheapest: {comparison.highlights?.cheapest}</p>
<p>Fastest: {comparison.highlights?.fastest}</p>
<table>
<thead>
<tr><th>Model</th><th>Provider</th><th>Quality</th><th>Cost</th></tr>
</thead>
<tbody>
{comparison.models.map((m) => (
<tr key={m.id}>
<td>{m.name}</td>
<td>{m.provider}</td>
<td>{m.quality_score}</td>
<td>{m.pricing?.per_1k_tokens}</td>
</tr>
))}
</tbody>
</table>
</div>
);
}useEmbedding
Generate embeddings for text input.
import { useEmbedding } from '@openmodex/react';
function EmbeddingApp() {
const { embedding, embed, isLoading, error } = useEmbedding({
apiKey: 'omx_sk_...',
model: 'text-embedding-3-small',
});
return (
<div>
<button onClick={() => embed('Hello world')} disabled={isLoading}>
Generate Embedding
</button>
{embedding && <p>Dimensions: {embedding.length}</p>}
</div>
);
}Links
License
MIT
