@smarthivelabs-devs/ai-nexus-expo
v1.0.0
Published
Expo / React Native SDK for SmartHive AI Nexus with audio recording and voice calls
Readme
@smarthivelabs-devs/ai-nexus-expo
Expo / React Native SDK for the SmartHive AI Nexus. Provides hooks, audio recording, TTS playback, and real-time voice calls backed by LiveKit WebRTC — all wired to the native audio session on iOS and Android.
What this package is: A React Native–specific layer built on @smarthivelabs-devs/ai-nexus. livekit-client and @livekit/react-native-webrtc install automatically as bundled dependencies.
Requirements
- React Native ≥ 0.73 or Expo SDK ≥ 50
@smarthivelabs-devs/ai-nexus≥ 1.0.0 (peer dependency)expo-av≥ 14.0.0 (peer dependency)
Installation
yarn add @smarthivelabs-devs/ai-nexus-expo @smarthivelabs-devs/ai-nexus expo-avlivekit-client and @livekit/react-native-webrtc come pre-bundled — you do not need to install them separately.
iOS permissions
Add to app.json / app.config.js:
{
"expo": {
"ios": {
"infoPlist": {
"NSMicrophoneUsageDescription": "Used for voice conversations with AI agents"
}
}
}
}For bare React Native (non-Expo), add to ios/YourApp/Info.plist:
<key>NSMicrophoneUsageDescription</key>
<string>Used for voice conversations with AI agents</string>Android permissions
In app.json:
{
"expo": {
"android": {
"permissions": ["android.permission.RECORD_AUDIO"]
}
}
}For bare React Native, add to android/app/src/main/AndroidManifest.xml:
<uses-permission android:name="android.permission.RECORD_AUDIO" />Environment Setup
# Your API key from the SmartHive AI Nexus dashboard
# Use EXPO_PUBLIC_ prefix only if you're pointing at a proxy — never expose the real key
AI_NEXUS_API_KEY=nexus_live_xxxxxxxxxxxxxxxxxxxx
# Optional — staging or self-hosted instance
AI_NEXUS_BASE_URL=https://api.smarthivelabs.devFor production mobile apps, do not ship your API key in the bundle. Either:
- Fetch a short-lived token from your backend, or
- Point the client at your own server which proxies requests and injects the key
// Option A — real key for internal/dev builds
const nexus = new AiNexus({ apiKey: process.env.AI_NEXUS_API_KEY! });
// Option B — proxy for production
const nexus = new AiNexus({ apiKey: 'mobile-client', baseUrl: 'https://yourapi.com/nexus' });Quick Start
import { AiNexus } from '@smarthivelabs-devs/ai-nexus';
import { useChat } from '@smarthivelabs-devs/ai-nexus-expo';
const nexus = new AiNexus({ apiKey: process.env.AI_NEXUS_API_KEY! });
export function ChatScreen() {
const { messages, isLoading, append } = useChat({
client: nexus,
model: 'gpt-4o',
});
return (
<View>
<FlatList
data={messages}
keyExtractor={(_, i) => String(i)}
renderItem={({ item }) => (
<Text>{item.role}: {String(item.content)}</Text>
)}
/>
{isLoading && <ActivityIndicator />}
<Button title="Send" onPress={() => append('Hello!')} />
</View>
);
}Hook: useChat
Streaming chat with real-time token-by-token rendering.
import { useChat } from '@smarthivelabs-devs/ai-nexus-expo';
function ChatScreen() {
const [input, setInput] = useState('');
const { messages, isLoading, error, append, stop, setMessages } = useChat({
client: nexus, // AiNexus instance
model: 'claude-sonnet-4-6',
// Optional — prepended as the system message on every request
systemPrompt: 'You are a helpful mobile assistant. Keep answers concise.',
onFinish: (msg) => console.log('Response complete:', msg.content),
onError: (err) => console.error('Chat error:', err.message),
});
return (
<KeyboardAvoidingView behavior="padding">
<FlatList
data={messages}
keyExtractor={(_, i) => String(i)}
renderItem={({ item }) => (
<View style={item.role === 'user' ? styles.userBubble : styles.agentBubble}>
<Text>{String(item.content)}</Text>
</View>
)}
/>
{isLoading && <Text>Thinking...</Text>}
{error && <Text style={styles.error}>{error.message}</Text>}
<View style={styles.inputRow}>
<TextInput value={input} onChangeText={setInput} placeholder="Type a message..." />
<Button title="Send" onPress={() => { append(input); setInput(''); }} disabled={isLoading} />
<Button title="Stop" onPress={stop} disabled={!isLoading} />
</View>
</KeyboardAvoidingView>
);
}Options:
| Option | Type | Description |
|--------|------|-------------|
| client | AiNexus | Required. The AI Nexus client instance. |
| model | string | Required. Model slug to use (e.g. 'gpt-4o', 'claude-sonnet-4-6'). |
| systemPrompt | string? | Prepended as a system message. |
| onFinish | (msg: ChatMessage) => void | Called when the assistant finishes streaming. |
| onError | (err: Error) => void | Called on any error. |
Returns: { messages, isLoading, error, append(content), stop(), setMessages }
Hook: useVoiceCall
Full real-time voice session for React Native. Handles microphone permissions, iOS audio session configuration, LiveKit WebRTC connection, and transcript polling.
import { useVoiceCall } from '@smarthivelabs-devs/ai-nexus-expo';
import { useRef, useEffect } from 'react';
import { Room } from 'livekit-client';
function VoiceCallScreen() {
const roomRef = useRef<Room | null>(null);
const {
state, // 'idle' | 'connecting' | 'active' | 'ending' | 'ended' | 'error'
sessionId,
livekitUrl, // wss:// URL — connect your LiveKit Room here
livekitToken, // JWT — participant token for the LiveKit room
isMuted,
toggleMute,
transcript, // { role: 'user'|'agent', text: string, timestamp: string }[]
error,
start,
end,
startRecording,
stopRecording,
} = useVoiceCall({
client: nexus,
agentId: 'agent_abc123',
mode: 'voice', // 'voice' | 'text' | 'duplex'
features: ['transcription', 'tts', 'vad'],
// How often to poll for new transcript entries (ms). Default: 2000
pollIntervalMs: 2_000,
onTranscript: (entry) => {
console.log(`${entry.role}: ${entry.text}`);
},
onError: (err) => Alert.alert('Voice call error', err.message),
});
// Connect to LiveKit once the session starts
useEffect(() => {
if (state === 'active' && livekitUrl && livekitToken) {
const room = new Room();
roomRef.current = room;
room.connect(livekitUrl, livekitToken).catch(console.error);
return () => { void room.disconnect(); };
}
}, [state, livekitUrl, livekitToken]);
const handleStop = async () => {
const url = await stopRecording('mp4');
if (url) console.log('Recording saved at:', url);
};
return (
<View>
<Text>Status: {state}</Text>
{state === 'idle' && (
<TouchableOpacity onPress={start} style={styles.startBtn}>
<Text>Start Voice Call</Text>
</TouchableOpacity>
)}
{state === 'connecting' && <ActivityIndicator />}
{state === 'active' && (
<View>
<TouchableOpacity onPress={end} style={styles.endBtn}>
<Text>End Call</Text>
</TouchableOpacity>
<TouchableOpacity onPress={toggleMute}>
<Text>{isMuted ? '🔇 Unmute' : '🎤 Mute'}</Text>
</TouchableOpacity>
<TouchableOpacity onPress={startRecording}>
<Text>Record</Text>
</TouchableOpacity>
<TouchableOpacity onPress={handleStop}>
<Text>Stop Recording</Text>
</TouchableOpacity>
</View>
)}
{error && <Text style={styles.error}>{error.message}</Text>}
<ScrollView>
{transcript.map((t, i) => (
<View key={i} style={t.role === 'user' ? styles.userLine : styles.agentLine}>
<Text style={styles.role}>{t.role}</Text>
<Text>{t.text}</Text>
</View>
))}
</ScrollView>
</View>
);
}How it works:
start()— requests mic permission, configuresAVAudioSessionfor recording on iOS, callsnexus.realtime.sessions.create(), returnslivekitUrl+livekitToken- Your component connects to the LiveKit room using those credentials
- The hook polls
nexus.realtime.sessions.get()everypollIntervalMsms for new transcript entries end()— sends end request, resets iOS audio session, stops polling
VoiceCallState transitions:
idle → connecting → active → ending → ended
↘ errorOptions:
| Option | Type | Description |
|--------|------|-------------|
| client | AiNexus | Required. The AI Nexus client. |
| agentId | string | Required. Agent to connect to. |
| mode | RealtimeMode? | 'voice' (default), 'text', or 'duplex' |
| features | RealtimeFeature[]? | 'transcription', 'tts', 'vad', 'recording' |
| pollIntervalMs | number? | Transcript poll interval in ms. Default: 2000 |
| onTranscript | (entry) => void | Called for each new transcript entry |
| onError | (err: Error) => void | Called on any error |
Hook: useAudioRecording
Low-level hook for recording audio and sending it to Whisper for transcription.
import { useAudioRecording } from '@smarthivelabs-devs/ai-nexus-expo';
function VoiceNoteScreen() {
const {
isRecording,
uri, // local file path after recording
transcript, // transcribed text after calling transcribe()
isTranscribing,
error,
requestPermissions,
start,
stop,
transcribe,
} = useAudioRecording();
useEffect(() => {
// Request permissions on mount
requestPermissions();
}, []);
const handlePress = async () => {
if (isRecording) {
await stop();
// Now transcribe automatically
const result = await transcribe(nexus, 'whisper-large-v3');
console.log('Transcription:', result.text);
} else {
await start();
}
};
return (
<View>
<TouchableOpacity onPressIn={start} onPressOut={stop} style={styles.recordBtn}>
<Text>{isRecording ? '⏹ Recording...' : '🎙 Hold to record'}</Text>
</TouchableOpacity>
{isTranscribing && <Text>Transcribing...</Text>}
{transcript && <Text>{transcript}</Text>}
{uri && <Text style={styles.muted}>Saved: {uri}</Text>}
{error && <Text style={styles.error}>{error.message}</Text>}
</View>
);
}Returns:
| Field | Type | Description |
|-------|------|-------------|
| isRecording | boolean | true while recording is in progress |
| uri | string \| null | Local file path after stop() |
| transcript | string \| null | Transcribed text after transcribe() |
| isTranscribing | boolean | true while STT request is in flight |
| error | Error \| null | Last error |
| requestPermissions() | () => Promise<boolean> | Request mic permission, returns true if granted |
| start() | () => Promise<void> | Start recording |
| stop() | () => Promise<string \| null> | Stop recording, return local URI |
| transcribe(client, model?) | (AiNexus, string?) => Promise<AudioTranscribeResponse> | Upload and transcribe the last recording |
Class: ExpoAudioRecorder
Direct class API for recording — use when you need more control than the hook provides.
import { ExpoAudioRecorder } from '@smarthivelabs-devs/ai-nexus-expo';
const recorder = new ExpoAudioRecorder();
// Check/request microphone permission
const granted = await recorder.requestPermissions();
if (!granted) throw new Error('Mic permission denied');
// Start recording (configures AVAudioSession on iOS automatically)
await recorder.start();
// ... user speaks ...
// Stop and get local file URI
const uri = await recorder.stop();
console.log('Recorded to:', uri);
// Upload to SmartHive storage and transcribe
const transcript = await recorder.uploadAndTranscribe(nexus, {
model: 'whisper-large-v3',
language: 'en',
});
console.log(transcript.text);
// Helpers
recorder.isRecording(); // boolean
recorder.getUri(); // string | null — path of the last recordingDefault recording settings:
- iOS / Android:
.m4a, 44100 Hz, mono, 128 kbps AAC - Web: browser default
Class: ExpoAudioPlayer
Play TTS audio from an ArrayBuffer (returned by nexus.audio.speak()).
import { ExpoAudioPlayer } from '@smarthivelabs-devs/ai-nexus-expo';
const player = new ExpoAudioPlayer();
// Option A — play raw ArrayBuffer
const tts = await nexus.audio.speak({
text: 'Hello from SmartHive!',
model: 'tts-1-hd',
voiceProfile: 'nova',
response_format: 'mp3',
});
await player.play(tts.data);
// Option B — speak shortcut (fetches + plays in one call)
await player.speak(nexus, {
text: 'How can I help you today?',
voiceProfile: 'nova',
});
// Controls
await player.pause();
await player.resume();
await player.stop(); // unloads the sound
player.isPlaying(); // booleanClass: LiveKitSession
Direct wrapper around livekit-client's Room for cases where you want to manage the WebRTC session manually (instead of using the hook).
import { LiveKitSession } from '@smarthivelabs-devs/ai-nexus-expo';
// 1. Create a realtime session via the Nexus API
const session = await nexus.realtime.sessions.create({
agentId: 'agent_abc123',
mode: 'voice',
features: ['transcription', 'tts', 'vad'],
});
// 2. Connect LiveKit
const livekit = new LiveKitSession();
await livekit.connect({
url: session.livekitUrl,
token: session.livekitToken,
onConnected: () => console.log('Connected to LiveKit room'),
onDisconnected: () => console.log('Disconnected'),
onError: (err) => console.error('LiveKit error:', err),
});
// 3. Mic control
await livekit.setMicrophoneEnabled(true); // start publishing audio
await livekit.setMicrophoneEnabled(false); // mute
// 4. Session state
console.log(livekit.getState());
// 'idle' | 'connecting' | 'connected' | 'disconnected' | 'error'
// 5. Disconnect
livekit.disconnect();
// 6. End the Nexus session
await nexus.realtime.sessions.end(session.sessionId);Complete Example: Voice-to-Text Note Taking
import React, { useState, useEffect } from 'react';
import { View, Text, TouchableOpacity, FlatList, StyleSheet } from 'react-native';
import { AiNexus } from '@smarthivelabs-devs/ai-nexus';
import { useAudioRecording } from '@smarthivelabs-devs/ai-nexus-expo';
const nexus = new AiNexus({ apiKey: process.env.EXPO_PUBLIC_AI_NEXUS_KEY! });
export function VoiceNotesScreen() {
const [notes, setNotes] = useState<string[]>([]);
const { isRecording, transcript, isTranscribing, error, requestPermissions, start, stop, transcribe } = useAudioRecording();
useEffect(() => {
requestPermissions();
}, []);
useEffect(() => {
if (transcript) setNotes(prev => [transcript, ...prev]);
}, [transcript]);
const handlePress = async () => {
if (isRecording) {
await stop();
await transcribe(nexus);
} else {
await start();
}
};
return (
<View style={styles.container}>
<TouchableOpacity style={[styles.btn, isRecording && styles.recording]} onPress={handlePress}>
<Text style={styles.btnText}>
{isRecording ? '⏹ Stop' : isTranscribing ? 'Transcribing...' : '🎙 Record'}
</Text>
</TouchableOpacity>
{error && <Text style={styles.error}>{error.message}</Text>}
<FlatList
data={notes}
keyExtractor={(_, i) => String(i)}
renderItem={({ item }) => <Text style={styles.note}>{item}</Text>}
/>
</View>
);
}
const styles = StyleSheet.create({
container: { flex: 1, padding: 16 },
btn: { backgroundColor: '#007AFF', padding: 16, borderRadius: 8, alignItems: 'center' },
recording: { backgroundColor: '#FF3B30' },
btnText: { color: 'white', fontWeight: 'bold', fontSize: 16 },
error: { color: 'red', marginTop: 8 },
note: { padding: 12, borderBottomWidth: 1, borderBottomColor: '#E5E5EA' },
});TypeScript
All types are exported:
import type {
// Hooks
UseExpoChatOptions, UseExpoChatReturn,
UseExpoVoiceCallOptions, UseExpoVoiceCallReturn,
VoiceCallState, TranscriptEntry,
UseAudioRecordingReturn,
// Audio
RecorderOptions,
// LiveKit
LiveKitSessionOptions, LiveKitSessionState,
} from '@smarthivelabs-devs/ai-nexus-expo';
// Core SDK types
import type {
ChatMessage,
AudioTranscribeResponse,
RealtimeMode,
RealtimeFeature,
} from '@smarthivelabs-devs/ai-nexus';License
MIT — © SmartHive Labs
