telygent-ui
v0.1.5
Published
Telygent UI CLI
Downloads
295
Readme
telygent-ui
A shadcn-style component registry + CLI for Telygent UI.
CLI workflow
npx telygent-ui setup
npx telygent-ui add chat-interfaceAdapter pattern
Wrap your app with ChatProvider and supply an adapter using your data library of choice.
import {ChatProvider, type SendMessageInput, type SendMessageResult, type ChatMessage} from "@/components/ai/ChatProvider";
import {ChatInterface} from "@/components/ai/ChatInterface";
const adapter = {
async sendMessage({message, conversationId}: SendMessageInput): Promise<SendMessageResult> {
// Use React Query, Axios, RTK Query, etc.
const response = await api.send({message, conversationId});
return {
conversationId: response.conversationId,
message: {
role: "assistant",
content: response.content,
createdAt: new Date(),
visualizations: response.visualizations,
},
};
},
async getHistory(conversationId: string): Promise<ChatMessage[]> {
const history = await api.history(conversationId);
return history.messages.map((item) => ({
role: item.role === "assistant" ? "assistant" : "user",
content: item.content,
createdAt: new Date(item.timestamp),
visualizations: item.visualizations ?? [],
summaryCards: item.summaryCards ?? [],
}));
},
};
export default function Page() {
const conversationId = "your-conversation-id";
return (
<ChatProvider adapter={adapter}>
<ChatInterface conversationId={conversationId} aiName="Telygent" />
</ChatProvider>
);
}SSE adapter example
If your backend streams SSE updates, implement sendMessageStream:
import {ChatProvider, type SendMessageInput, type ChatStreamEvent} from "@/components/ai/ChatProvider";
import {ChatInterface} from "@/components/ai/ChatInterface";
async function* streamChat(input: SendMessageInput): AsyncIterable<ChatStreamEvent> {
const response = await fetch("/api/chat/stream", {
method: "POST",
headers: {"Content-Type": "application/json"},
body: JSON.stringify({
question: input.message,
conversationId: input.conversationId,
}),
});
if (!response.body) {
throw new Error("No stream body");
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = "";
const flush = async function* () {
const lines = buffer.split("\n");
buffer = lines.pop() ?? "";
for (const line of lines) {
const trimmed = line.trim();
if (!trimmed.startsWith("data:")) continue;
const payload = trimmed.replace(/^data:\s*/, "");
if (!payload) continue;
yield JSON.parse(payload) as ChatStreamEvent;
}
};
while (true) {
const {value, done} = await reader.read();
if (done) break;
buffer += decoder.decode(value, {stream: true});
for await (const event of flush()) {
yield event;
}
}
}
const adapter = {
async sendMessage() {
throw new Error("sendMessage not used when streaming");
},
sendMessageStream: streamChat,
};
export default function Page() {
return (
<ChatProvider adapter={adapter}>
<ChatInterface conversationId="conv_123" />
</ChatProvider>
);
}RTK Query SSE example
If you already have an RTK Query SSE endpoint, you can bridge it with the helper hook:
import {ChatProvider} from "@/components/ai/ChatProvider";
import {ChatInterface} from "@/components/ai/ChatInterface";
import {useRtkStreamAdapter} from "@/components/ai/hooks/use-rtk-stream-adapter";
import {useSendAiStreamMessageQuery} from "@/redux/api/generalApi";
export default function Page({conversationId}: {conversationId: string}) {
const {sendMessageStream} = useRtkStreamAdapter({useSendAiStreamMessageQuery});
return (
<ChatProvider adapter={{sendMessageStream} as any}>
<ChatInterface conversationId={conversationId} />
</ChatProvider>
);
}Styles
Import the MDX styles once (for example in your global CSS):
@import "@/components/ai/ai-mdx.css";