Skip to content

Commit 742b7be

Browse files
lgrammeldancer
andauthored
feat: forward id, streaming start, streaming end of content blocks (#6812)
## Background When streaming, it was not possible to distinguish different content of the same type (e.g. 2 text content parts) sent from Anthropic and OpenAI APIs. This prevents building UIs where e.g. multiple reasoning blocks stream at the same time, or where a text part is followed by sources and then by further text parts. ## Summary * text and reasoning parts have ids, and start/delta/end events when streaming ## Verification Source streaming with anthropic server side tool call in UI. ## Future Work * add state field to ui message reasoning and text parts --------- Co-authored-by: josh <josh@afterima.ge>
1 parent 23f4aad commit 742b7be

File tree

96 files changed

+11504
-7901
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

96 files changed

+11504
-7901
lines changed

‎.changeset/angry-dragons-tan.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'@ai-sdk/provider': major
3+
---
4+
5+
feat: forward id, streaming start, streaming end of content blocks

‎examples/ai-core/src/middleware/your-log-middleware.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@ export const yourLogMiddleware: LanguageModelV2Middleware = {
2929
LanguageModelV2StreamPart
3030
>({
3131
transform(chunk, controller) {
32-
if (chunk.type === 'text') {
33-
generatedText += chunk.text;
32+
if (chunk.type === 'text-delta') {
33+
generatedText += chunk.delta;
3434
}
3535

3636
controller.enqueue(chunk);

‎examples/ai-core/src/stream-object/mock.ts

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,14 @@ async function main() {
88
model: new MockLanguageModelV2({
99
doStream: async () => ({
1010
stream: convertArrayToReadableStream([
11-
{ type: 'text', text: '{ ' },
12-
{ type: 'text', text: '"content": ' },
13-
{ type: 'text', text: `"Hello, ` },
14-
{ type: 'text', text: `world` },
15-
{ type: 'text', text: `!"` },
16-
{ type: 'text', text: ' }' },
11+
{ type: 'text-start', id: '0' },
12+
{ type: 'text-delta', id: '0', delta: '{ ' },
13+
{ type: 'text-delta', id: '0', delta: '"content": ' },
14+
{ type: 'text-delta', id: '0', delta: `"Hello, ` },
15+
{ type: 'text-delta', id: '0', delta: `world` },
16+
{ type: 'text-delta', id: '0', delta: `!"` },
17+
{ type: 'text-delta', id: '0', delta: ' }' },
18+
{ type: 'text-end', id: '0' },
1719
{
1820
type: 'finish',
1921
finishReason: 'stop',
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
import { cohere } from '@ai-sdk/cohere';
2+
import 'dotenv/config';
3+
4+
async function main() {
5+
const model = cohere('command-r-plus');
6+
7+
console.log('=== COHERE RAW STREAMING CHUNKS ===');
8+
9+
const { stream } = await model.doStream({
10+
prompt: [
11+
{
12+
role: 'user',
13+
content: [{ type: 'text', text: 'Count from 1 to 3 slowly.' }],
14+
},
15+
],
16+
includeRawChunks: true,
17+
});
18+
19+
let textChunkCount = 0;
20+
let rawChunkCount = 0;
21+
let fullText = '';
22+
23+
const reader = stream.getReader();
24+
25+
try {
26+
while (true) {
27+
const { done, value: chunk } = await reader.read();
28+
if (done) break;
29+
30+
if (chunk.type === 'raw') {
31+
rawChunkCount++;
32+
console.log(
33+
'Raw chunk',
34+
rawChunkCount,
35+
':',
36+
JSON.stringify(chunk.rawValue),
37+
);
38+
} else {
39+
console.log('Processed chunk:', chunk.type, JSON.stringify(chunk));
40+
}
41+
}
42+
} finally {
43+
reader.releaseLock();
44+
}
45+
46+
console.log();
47+
console.log('Text chunks:', textChunkCount);
48+
console.log('Raw chunks:', rawChunkCount);
49+
console.log('Final text:', fullText);
50+
}
51+
52+
main().catch(console.error);
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
import { mistral } from '@ai-sdk/mistral';
2+
import { streamText } from 'ai';
3+
import 'dotenv/config';
4+
5+
async function main() {
6+
const result = streamText({
7+
model: mistral('mistral-small-latest'),
8+
prompt: 'Count from 1 to 3 slowly.',
9+
includeRawChunks: true,
10+
});
11+
12+
let textChunkCount = 0;
13+
let rawChunkCount = 0;
14+
15+
for await (const chunk of result.fullStream) {
16+
if (chunk.type === 'text') {
17+
textChunkCount++;
18+
console.log('Text chunk', textChunkCount, ':', chunk.text);
19+
} else if (chunk.type === 'raw') {
20+
rawChunkCount++;
21+
console.log(
22+
'Raw chunk',
23+
rawChunkCount,
24+
':',
25+
JSON.stringify(chunk.rawValue),
26+
);
27+
}
28+
}
29+
30+
console.log();
31+
console.log('Text chunks:', textChunkCount);
32+
console.log('Raw chunks:', rawChunkCount);
33+
console.log('Final text:', await result.text);
34+
}
35+
36+
main().catch(console.error);

‎examples/ai-core/src/stream-text/mock.ts

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,11 @@ async function main() {
77
model: new MockLanguageModelV2({
88
doStream: async () => ({
99
stream: convertArrayToReadableStream([
10-
{ type: 'text', text: 'Hello' },
11-
{ type: 'text', text: ', ' },
12-
{ type: 'text', text: `world!` },
10+
{ type: 'text-start', id: '0' },
11+
{ type: 'text-delta', id: '0', delta: 'Hello' },
12+
{ type: 'text-delta', id: '0', delta: ', ' },
13+
{ type: 'text-delta', id: '0', delta: `world!` },
14+
{ type: 'text-end', id: '0' },
1315
{
1416
type: 'finish',
1517
finishReason: 'stop',
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
import { createOpenAICompatible } from '@ai-sdk/openai-compatible';
2+
import { streamText } from 'ai';
3+
import 'dotenv/config';
4+
5+
async function main() {
6+
const openaiCompatible = createOpenAICompatible({
7+
baseURL: 'https://api.openai.com/v1',
8+
name: 'openai-compatible',
9+
headers: {
10+
Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
11+
},
12+
});
13+
14+
const result = streamText({
15+
model: openaiCompatible.completionModel('gpt-3.5-turbo-instruct'),
16+
prompt: 'Hello, World!',
17+
includeRawChunks: true,
18+
});
19+
20+
let textChunkCount = 0;
21+
let rawChunkCount = 0;
22+
let otherChunkCount = 0;
23+
24+
for await (const chunk of result.fullStream) {
25+
console.log('Chunk type:', chunk.type, 'Chunk:', JSON.stringify(chunk));
26+
27+
if (chunk.type === 'text') {
28+
textChunkCount++;
29+
console.log('Text chunk', textChunkCount, ':', chunk.text);
30+
} else if (chunk.type === 'raw') {
31+
rawChunkCount++;
32+
console.log(
33+
'Raw chunk',
34+
rawChunkCount,
35+
':',
36+
JSON.stringify(chunk.rawValue),
37+
);
38+
} else {
39+
otherChunkCount++;
40+
console.log('Other chunk', otherChunkCount, ':', chunk.type);
41+
}
42+
}
43+
44+
console.log();
45+
console.log('Text chunks:', textChunkCount);
46+
console.log('Raw chunks:', rawChunkCount);
47+
console.log('Other chunks:', otherChunkCount);
48+
console.log('Final text:', await result.text);
49+
}
50+
51+
main().catch(console.error);

‎examples/ai-core/src/stream-text/smooth-stream-chinese.ts

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,13 @@ async function main() {
77
doStream: async () => ({
88
stream: simulateReadableStream({
99
chunks: [
10-
{ type: 'text', text: '你好你好你好你好你好' },
11-
{ type: 'text', text: '你好你好你好你好你好' },
12-
{ type: 'text', text: '你好你好你好你好你好' },
13-
{ type: 'text', text: '你好你好你好你好你好' },
14-
{ type: 'text', text: '你好你好你好你好你好' },
10+
{ type: 'text-start', id: '0' },
11+
{ type: 'text-delta', id: '0', delta: '你好你好你好你好你好' },
12+
{ type: 'text-delta', id: '0', delta: '你好你好你好你好你好' },
13+
{ type: 'text-delta', id: '0', delta: '你好你好你好你好你好' },
14+
{ type: 'text-delta', id: '0', delta: '你好你好你好你好你好' },
15+
{ type: 'text-delta', id: '0', delta: '你好你好你好你好你好' },
16+
{ type: 'text-end', id: '0' },
1517
{
1618
type: 'finish',
1719
finishReason: 'stop',

‎examples/ai-core/src/stream-text/smooth-stream-japanese.ts

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,12 @@ async function main() {
77
doStream: async () => ({
88
stream: simulateReadableStream({
99
chunks: [
10-
{ type: 'text', text: 'こんにちは' },
11-
{ type: 'text', text: 'こんにちは' },
12-
{ type: 'text', text: 'こんにちは' },
13-
{ type: 'text', text: 'こんにちは' },
14-
{ type: 'text', text: 'こんにちは' },
10+
{ type: 'text-start', id: '0' },
11+
{ type: 'text-delta', id: '0', delta: 'こんにちは' },
12+
{ type: 'text-delta', id: '0', delta: 'こんにちは' },
13+
{ type: 'text-delta', id: '0', delta: 'こんにちは' },
14+
{ type: 'text-delta', id: '0', delta: 'こんにちは' },
15+
{ type: 'text-end', id: '0' },
1516
{
1617
type: 'finish',
1718
finishReason: 'stop',
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
import { xai } from '@ai-sdk/xai';
2+
import { streamText } from 'ai';
3+
import 'dotenv/config';
4+
5+
async function main() {
6+
const result = streamText({
7+
model: xai('grok-beta'),
8+
prompt: 'Count from 1 to 3 slowly.',
9+
includeRawChunks: true,
10+
});
11+
12+
let textChunkCount = 0;
13+
let rawChunkCount = 0;
14+
15+
for await (const chunk of result.fullStream) {
16+
if (chunk.type === 'text') {
17+
textChunkCount++;
18+
console.log('Text chunk', textChunkCount, ':', chunk.text);
19+
} else if (chunk.type === 'raw') {
20+
rawChunkCount++;
21+
console.log(
22+
'Raw chunk',
23+
rawChunkCount,
24+
':',
25+
JSON.stringify(chunk.rawValue),
26+
);
27+
}
28+
}
29+
30+
console.log();
31+
console.log('Text chunks:', textChunkCount);
32+
console.log('Raw chunks:', rawChunkCount);
33+
console.log('Final text:', await result.text);
34+
}
35+
36+
main().catch(console.error);

0 commit comments

Comments
 (0)