Skip to content

Commit 4048ce3

Browse files
authored
fix (ai): add tests and examples for openai responses (#6825)
## background frontend testing was needed to reasoning for openai response model ## summary - add frontend test pages for openai response reasoning - added reasoning start delta end to ui-message-stream-parts ## tasks - [x] test pages with useChat and DefaultChatTransport - [x] raw streaming verification tests ## future work * add any other providers if needed
1 parent f77bc38 commit 4048ce3

File tree

5 files changed

+187
-0
lines changed

5 files changed

+187
-0
lines changed

‎.changeset/four-gorillas-sell.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'ai': patch
3+
---
4+
5+
fix (ai): add tests and examples for openai responses
Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
import { openai } from '@ai-sdk/openai';
2+
import 'dotenv/config';
3+
4+
async function main() {
5+
const model = openai.responses('o3-mini');
6+
7+
console.log('=== OPENAI RESPONSES RAW STREAMING CHUNKS ===');
8+
9+
const { stream } = await model.doStream({
10+
prompt: [
11+
{
12+
role: 'user',
13+
content: [
14+
{ type: 'text', text: 'How many "r"s are in the word "strawberry"?' },
15+
],
16+
},
17+
],
18+
providerOptions: {
19+
openai: {
20+
reasoningEffort: 'low',
21+
reasoningSummary: 'auto',
22+
},
23+
},
24+
includeRawChunks: true,
25+
});
26+
27+
let textChunkCount = 0;
28+
let reasoningChunkCount = 0;
29+
let rawChunkCount = 0;
30+
let fullText = '';
31+
let fullReasoning = '';
32+
33+
const reader = stream.getReader();
34+
35+
try {
36+
while (true) {
37+
const { done, value: chunk } = await reader.read();
38+
if (done) break;
39+
40+
if (chunk.type === 'raw') {
41+
rawChunkCount++;
42+
console.log(
43+
'Raw chunk',
44+
rawChunkCount,
45+
':',
46+
JSON.stringify(chunk.rawValue),
47+
);
48+
} else {
49+
console.log('Processed chunk:', chunk.type, JSON.stringify(chunk));
50+
}
51+
52+
if (chunk.type === 'text-delta') {
53+
textChunkCount++;
54+
fullText += chunk.delta;
55+
}
56+
57+
if (chunk.type === 'reasoning-delta') {
58+
reasoningChunkCount++;
59+
fullReasoning += chunk.delta;
60+
}
61+
}
62+
} finally {
63+
reader.releaseLock();
64+
}
65+
66+
console.log();
67+
console.log('Text chunks:', textChunkCount);
68+
console.log('Reasoning chunks:', reasoningChunkCount);
69+
console.log('Raw chunks:', rawChunkCount);
70+
console.log('Final text:', fullText);
71+
console.log('Final reasoning:', fullReasoning);
72+
}
73+
74+
main().catch(console.error);
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
import { openai } from '@ai-sdk/openai';
2+
import { convertToModelMessages, streamText, UIMessage } from 'ai';
3+
4+
export const maxDuration = 30;
5+
6+
export async function POST(req: Request) {
7+
const { messages }: { messages: UIMessage[] } = await req.json();
8+
9+
const prompt = convertToModelMessages(messages);
10+
11+
const result = streamText({
12+
model: openai.responses('o3-mini'),
13+
prompt,
14+
providerOptions: {
15+
openai: {
16+
reasoningEffort: 'low',
17+
reasoningSummary: 'auto',
18+
},
19+
},
20+
});
21+
22+
return result.toUIMessageStreamResponse();
23+
}
Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
'use client';
2+
3+
import { useChat } from '@ai-sdk/react';
4+
import { DefaultChatTransport } from 'ai';
5+
import ChatInput from '@/component/chat-input';
6+
7+
export default function TestOpenAIResponses() {
8+
const { error, status, sendMessage, messages, regenerate, stop } = useChat({
9+
transport: new DefaultChatTransport({ api: '/api/chat-openai-responses' }),
10+
});
11+
12+
return (
13+
<div className="flex flex-col w-full max-w-md py-24 mx-auto stretch">
14+
<h1 className="mb-4 text-xl font-bold">
15+
OpenAI Responses Block-Based Streaming Test
16+
</h1>
17+
18+
{messages.map(m => (
19+
<div key={m.id} className="whitespace-pre-wrap mb-4">
20+
<div className="font-semibold mb-1">
21+
{m.role === 'user' ? 'User:' : 'AI:'}
22+
</div>
23+
{m.parts.map((part, index) => {
24+
if (part.type === 'text') {
25+
return <div key={index}>{part.text}</div>;
26+
} else if (part.type === 'reasoning') {
27+
return (
28+
<div
29+
key={index}
30+
className="mt-2 p-2 bg-blue-50 border-l-2 border-blue-300 text-blue-800 text-sm"
31+
>
32+
<strong>Reasoning:</strong> {part.text}
33+
</div>
34+
);
35+
}
36+
})}
37+
</div>
38+
))}
39+
40+
{(status === 'submitted' || status === 'streaming') && (
41+
<div className="mt-4 text-gray-500">
42+
{status === 'submitted' && <div>Loading...</div>}
43+
<button
44+
type="button"
45+
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
46+
onClick={stop}
47+
>
48+
Stop
49+
</button>
50+
</div>
51+
)}
52+
53+
{error && (
54+
<div className="mt-4">
55+
<div className="text-red-500">An error occurred.</div>
56+
<button
57+
type="button"
58+
className="px-4 py-2 mt-4 text-blue-500 border border-blue-500 rounded-md"
59+
onClick={() => regenerate()}
60+
>
61+
Retry
62+
</button>
63+
</div>
64+
)}
65+
66+
<ChatInput status={status} onSubmit={text => sendMessage({ text })} />
67+
</div>
68+
);
69+
}

‎packages/ai/src/ui-message-stream/ui-message-stream-parts.ts

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,22 @@ export const uiMessageStreamPartSchema = z.union([
5757
text: z.string(),
5858
providerMetadata: z.record(z.any()).optional(),
5959
}),
60+
z.strictObject({
61+
type: z.literal('reasoning-start'),
62+
id: z.string(),
63+
providerMetadata: z.record(z.any()).optional(),
64+
}),
65+
z.strictObject({
66+
type: z.literal('reasoning-delta'),
67+
id: z.string(),
68+
delta: z.string(),
69+
providerMetadata: z.record(z.any()).optional(),
70+
}),
71+
z.strictObject({
72+
type: z.literal('reasoning-end'),
73+
id: z.string(),
74+
providerMetadata: z.record(z.any()).optional(),
75+
}),
6076
z.strictObject({
6177
type: z.literal('reasoning-part-finish'),
6278
}),

0 commit comments

Comments
 (0)