Skip to main content
Often, you’ll want to process a LLM stream into your own format before passing it to Streamstraight. Here’s some example code demonstrating how to do so with an example OpenAI stream. You can similarly do this with any LLM provider SDK:
async function startLlmStream({
  userPrompt,
  messageId,
}: {
  userPrompt: string;
  messageId: string;
}) {
  const llmResponseStream = await openai.responses.stream({
    model: "gpt-4.1-mini",
    input: [
      {
        role: "user",
        content: [{ type: "text", text: userPrompt }],
      },
    ],
  });

  const transformedStream = await transformStream(llmResponseStream);

  const ssServer = await streamstraightServer(
    { apiKey: process.env.STREAMSTRAIGHT_API_KEY },
    { streamId: messageId },
  );

  // Stream to Streamstraight in the background asynchronously
  void ssServer.stream(transformedStream);
}

type Chunk =
  | { type: "text"; textDelta: string }
  | { type: "toolCall"; toolName: string; toolArgsDelta: string }
  | { type: "toolArgs"; toolArgsDelta: string };

async function* transformStream(
  stream: AsyncIterable<OpenAI.Responses.ResponseStreamEvent>,
): AsyncIterable<Chunk> {
  for await (const chunk of stream) {
    if (chunk.type === "response.output_text.delta") {
      yield { type: "text", textDelta: chunk.delta };
    } else if (chunk.type === "response.function_call_arguments.delta") {
      yield { type: "toolArgs", toolArgsDelta: chunk.delta };
    } else if (
      chunk.type === "response.output_item.added" &&
      chunk.item.type === "function_call"
    ) {
      yield {
        type: "toolCall",
        toolName: chunk.item.name,
        toolArgsDelta: chunk.item.arguments,
      };
    }
  }
}