I tried openai-streams + nextjs-openai, they only works for Node 18+, however, they failed on Node 17 and lower. I'm restricted to Node 17 and lower as Digital Oceans App Platform currently not supporting Node 18.
I also tried this method which works well on client side, but it exposes the API key. I want to implement within the NextJS API route, but I'm unable to pass the streaming response to the client.
With the code below, I can only get the first chunk of response from the API route, and not able to get the streaming response to have the ChatGPT effect. Please kindly help.
// /api/prompt.js
import { Configuration, OpenAIApi } from "openai";
import { Readable } from "readable-stream";
const configuration = new Configuration({
apiKey: process.env.NEXT_PUBLIC_OPENAI_API_KEY,
});
const openai = new OpenAIApi(configuration);
export default async function handler(req, res) {
const completion = await openai.createCompletion(
{
model: "text-davinci-003",
prompt: "tell me a story",
max_tokens: 500,
stream: true,
},
{ responseType: "stream" }
);
completion.data.on("data", async (data) => {
const lines = data
.toString()
.split("\n")
.filter((line) => line.trim() !== "");
for (const line of lines) {
const message = line.replace(/^data: /, "");
if (message === "[DONE]") {
return;
}
try {
const parsed = JSON.parse(message);
const string = parsed.choices[0].text;
Readable.from(string).pipe(res);
} catch (error) {
console.error("Could not JSON parse stream message", message, error);
}
}
});
// /components/Completion.js
export default function Completion() {
const [text, setText] = useState();
const generate = async () => {
const response = await fetch("/api/prompt");
console.log("response: ", response);
const text = await response.text();
console.log("text: ", text);
setText((state) => state + text);
};
// ... rest
}
await openai.chat.completions.create
. Is it awaiting a response from OpenAI API? It's not clear to me whether our stream to the end client starts once we have the whole response from OpenAI servers, or are we sending chunks as soon as we have the first chunk of data from OpenAI? Perhaps you can clarify this doubt @vedant-agarwala, thank you! – Corriecorriedale