Skip to main content
React Router v7 and Remix v2 are ways to client and server code in a single framework. Streamstraight integrates easily, allowing you to stream LLM responses from your server to your client, all within React Router/Remix.

1. Install Streamstraight packages

Install the Streamstraight server SDK inside your React Router app so you can forward LLM responses to Streamstraight.
npm install --save @streamstraight/server @streamstraight/client
Create a .env entry for the API key you generate in the Streamstraight dashboard. In production, make sure to add this API key to your server secrets.
STREAMSTRAIGHT_API_KEY=your-api-key

2. Modify your React Router server action

Modify your server route to kick off your LLM generation and pipe its stream into Streamstraight. This should be whichever route receives and handles the user’s prompt.
https://reactrouter.com/_brand/React%20Router%20Brand%20Assets/React%20Router%20Logo/Light.svgapp/routes/api/new-user-message.ts
import { streamstraightServer } from "@streamstraight/server";
import OpenAI from "openai";

import type { Route } from "./+types/new-user-message";

// Not using the OpenAI SDK? Use any ReadableStream or AsyncIterable.
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });

export async function action({ request }: Route.ActionArgs) {
  if (request.method !== "POST") {
    return new Response("Method not allowed", { status: 405 });
  }

  const formData = await request.formData();
  const messageId = formData.get("messageId");
  const userInput = formData.get("userInput");

  const llmResponseStream = await openai.responses.stream({
    model: "gpt-4.1-mini",
    input: [
      {
        role: "user",
        content: [{ type: "text", text: userInput }],
      },
    ],
  });

  const ssServer = await streamstraightServer(
    { apiKey: process.env.STREAMSTRAIGHT_API_KEY },
    { streamId: messageId },
  );

  // Run this in the background so you don't block your network request.
  // Note that if you're running this on serverless functions, you will
  // need to wrap this in a `after` or `waitUntil` function.
  void ssServer.stream(llmResponseStream);

  return { messageId };
}

3. Mint a client token in a server action

Create an action (for example, app/routes/api/streamstraight-token.ts) that mints a Streamstraight client JWT.Your frontend will call this action before connecting to the stream.
Because a new JWT is generated every time you call fetchClientToken, we don’t recommend generating a new token in your loader function.
https://reactrouter.com/_brand/React%20Router%20Brand%20Assets/React%20Router%20Logo/Light.svgapp/routes/api/streamstraight-token.ts
import { fetchClientToken } from "@streamstraight/server";

import type { Route } from "./+types/streamstraight-token";

export async function action({ request }: Route.ActionArgs) {
  if (request.method !== "POST") {
    return new Response("Method not allowed", { status: 405 });
  }

  // Ensure your user is authenticated before minting tokens
  const token = await fetchClientToken({
    apiKey: process.env.STREAMSTRAIGHT_API_KEY,
  });

  return { token };
}

4. Connect to the stream in your route component

In your chat UI, you’ll need to ensure a few things happen:
  • Your client fetches a Streamstraight client JWT
  • Your client has a unique streamId or messageId that is the same as what’s on the server
  • Your client connects to the Streamstraight and handles the resulting chunks
You can do these however you want; below is an example:
app/routes/chat-page.tsx
import { createId } from "@paralleldrive/cuid2";
import { connectStreamstraightClient } from "@streamstraight/client";
import type OpenAI from "openai";
import { useCallback, useState } from "react";
import { useFetcher } from "react-router";

import type { Route } from "./+types/chat-page";
import { action as newUserMessageAction } from "./api/new-user-message";
import { action as ssTokenAction } from "./api/streamstraight-token";

export async function loader({ request }: Route.LoaderArgs) {
  // Your loader
}

export async function action({ request }: Route.ActionArgs) {
  // Your action
}

async function fetchStreamstraightToken() {
  const response = await fetch("/api/streamstraight-token", { method: "POST" });
  if (!response.ok) {
    throw new Error("Failed to fetch Streamstraight token");
  }
  const data = (await response.json()) as { token: string };
  return data.token;
}

export default function ChatPage({ loaderData, actionData }: Route.ComponentProps) {
  const [textInput, setTextInput] = useState<string>("");
  const [assistantResponse, setAssistantResponse] = useState<string>("");
  const newUserMessageFetcher = useFetcher<typeof newUserMessageAction>();

  function handleSendMessage({ userInput }: { userInput: string }) {
    const messageId = createId(); // Assign a unique ID for this message stream

    // You can start listening for the stream in the background before
    // making the network request to your backend.
    void handleMessageStream({ messageId });

    // Make the network request to your backend to start LLM generation.
    newUserMessageFetcher.submit({ messageId, userInput }, { method: "POST" });
  }

  async function handleMessageStream({ messageId }: { messageId: string }) {
    const ssClient =
      // Type each chunk however you want; you're not tied to the OpenAI example here
      // Just make sure the type matches whats on your server
      await connectStreamstraightClient<OpenAI.Responses.ResponseStreamEvent>(
        { fetchToken: fetchStreamstraightToken },
        { streamId: messageId },
      );

    for await (const chunk of ssClient.toAsyncIterable()) {
      // Handle each chunk as it's streamed. The example code below
      // demonstrates handling the OpenAI response stream.
      if (chunk.type !== "response.output_text.delta") continue;
      setAssistantResponse((prev) => prev + chunk.delta);
    }
  }

  return (
    <div>
      {/* Your chat UI */}
      <input value={textInput} onChange={(e) => setTextInput(e.target.value)} />
      <Button onClick={() => handleSendMessage({ userInput: textInput })}>Send</Button>
    </div>
  );
}