@streamstraight/client
v0.3.4
Published
Browser client SDK for consuming Streamstraight streams.
Maintainers
Readme
@streamstraight/client
Streamstraight improves long-running LLM streaming to your frontend by replacing server sent events with websockets. We ensure streams resume upon reconnection, enable LLM requests in async jobs, and provide reliability to user-facing AI loading states.
@streamstraight/client contains Streamstraight's browser client SDK. A LLM stream provided to @streamstraight/server can be fetched and tailed using this client SDK.
npm install --save @streamstraight/clientUsage
In a React app:
import { connectStreamstraightClient } from "@streamstraight/client";
import type OpenAI from "openai";
async function fetchStreamstraightToken(): Promise<string> {
const response = await fetch("/api/streamstraight-token", { method: "POST" });
if (!response.ok) {
throw new Error("Failed to fetch Streamstraight token");
}
const { jwtToken } = (await response.json()) as { jwtToken: string };
return jwtToken;
}
export default function AiChatPage() {
useEffect(() => {
// Obtain your streamId from your backend here
const streamId = "chat-123";
handleLlmStream({ streamId });
}, []);
async function handleLlmStream({ streamId }: { streamId: string }) {
const ssClient =
await connectStreamstraightClient<OpenAI.Responses.ResponseStreamEvent>(
{ fetchToken: fetchStreamstraightToken },
{ streamId },
);
for await (const chunk of ssClient.toAsyncIterable()) {
// Handle each chunk as it's streamed
}
}
// Render your UI here
}For more information, visit docs.streamstraight.com.
