Skip to content

Commit d1a1aa1

Browse files
authored
chore (provider): merge rawRequest into request (language model v2) (#5604)
1 parent d91b50d commit d1a1aa1

36 files changed

+719
-335
lines changed

‎.changeset/cool-buckets-shout.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'@ai-sdk/provider': major
3+
---
4+
5+
chore (provider): merge rawRequest into request (language model v2)

‎packages/ai/core/generate-object/generate-object.test.ts

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -308,8 +308,6 @@ describe('output = "object"', () => {
308308
id: 'test-id-from-model',
309309
timestamp: new Date(10000),
310310
modelId: 'test-response-model-id',
311-
},
312-
rawResponse: {
313311
headers: {
314312
'custom-response-header': 'response-header-value',
315313
},
@@ -350,8 +348,6 @@ describe('output = "object"', () => {
350348
id: 'test-id-from-model',
351349
timestamp: new Date(10000),
352350
modelId: 'test-response-model-id',
353-
},
354-
rawResponse: {
355351
headers: {
356352
'custom-response-header': 'response-header-value',
357353
},

‎packages/ai/core/generate-object/generate-object.ts

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -464,9 +464,6 @@ export async function generateObject<SCHEMA, RESULT>({
464464
let finishReason: FinishReason;
465465
let usage: Parameters<typeof calculateLanguageModelUsage>[0];
466466
let warnings: CallWarning[] | undefined;
467-
let rawResponse:
468-
| { headers?: Record<string, string>; body?: unknown }
469-
| undefined;
470467
let response: LanguageModelResponseMetadata;
471468
let request: LanguageModelRequestMetadata;
472469
let logprobs: LogProbs | undefined;
@@ -548,6 +545,8 @@ export async function generateObject<SCHEMA, RESULT>({
548545
id: result.response?.id ?? generateId(),
549546
timestamp: result.response?.timestamp ?? currentDate(),
550547
modelId: result.response?.modelId ?? model.modelId,
548+
headers: result.response?.headers,
549+
body: result.response?.body,
551550
};
552551

553552
if (result.text === undefined) {
@@ -596,7 +595,6 @@ export async function generateObject<SCHEMA, RESULT>({
596595
finishReason = generateResult.finishReason;
597596
usage = generateResult.usage;
598597
warnings = generateResult.warnings;
599-
rawResponse = generateResult.rawResponse;
600598
logprobs = generateResult.logprobs;
601599
resultProviderMetadata = generateResult.providerMetadata;
602600
request = generateResult.request ?? {};
@@ -675,6 +673,8 @@ export async function generateObject<SCHEMA, RESULT>({
675673
id: result.response?.id ?? generateId(),
676674
timestamp: result.response?.timestamp ?? currentDate(),
677675
modelId: result.response?.modelId ?? model.modelId,
676+
headers: result.response?.headers,
677+
body: result.response?.body,
678678
};
679679

680680
if (objectText === undefined) {
@@ -722,7 +722,6 @@ export async function generateObject<SCHEMA, RESULT>({
722722
finishReason = generateResult.finishReason;
723723
usage = generateResult.usage;
724724
warnings = generateResult.warnings;
725-
rawResponse = generateResult.rawResponse;
726725
logprobs = generateResult.logprobs;
727726
resultProviderMetadata = generateResult.providerMetadata;
728727
request = generateResult.request ?? {};
@@ -753,7 +752,7 @@ export async function generateObject<SCHEMA, RESULT>({
753752
text: result,
754753
response,
755754
usage: calculateLanguageModelUsage(usage),
756-
finishReason: finishReason,
755+
finishReason,
757756
});
758757
}
759758

@@ -773,7 +772,7 @@ export async function generateObject<SCHEMA, RESULT>({
773772
text: result,
774773
response,
775774
usage: calculateLanguageModelUsage(usage),
776-
finishReason: finishReason,
775+
finishReason,
777776
});
778777
}
779778

@@ -827,11 +826,7 @@ export async function generateObject<SCHEMA, RESULT>({
827826
usage: calculateLanguageModelUsage(usage),
828827
warnings,
829828
request,
830-
response: {
831-
...response,
832-
headers: rawResponse?.headers,
833-
body: rawResponse?.body,
834-
},
829+
response,
835830
logprobs,
836831
providerMetadata: resultProviderMetadata,
837832
});

‎packages/ai/core/generate-object/stream-object.test.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -701,7 +701,7 @@ describe('streamObject', () => {
701701
},
702702
]),
703703
rawCall: { rawPrompt: 'prompt', rawSettings: {} },
704-
rawResponse: { headers: { call: '2' } },
704+
response: { headers: { call: '2' } },
705705
}),
706706
}),
707707
schema: z.object({ content: z.string() }),
@@ -746,7 +746,7 @@ describe('streamObject', () => {
746746
},
747747
]),
748748
rawCall: { rawPrompt: 'prompt', rawSettings: {} },
749-
rawResponse: { headers: { call: '2' } },
749+
response: { headers: { call: '2' } },
750750
}),
751751
}),
752752
schema: z.object({ content: z.string() }),

‎packages/ai/core/generate-object/stream-object.ts

Lines changed: 22 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -699,7 +699,7 @@ class DefaultStreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
699699
}
700700

701701
const {
702-
result: { stream, warnings, rawResponse, request },
702+
result: { stream, warnings, response, request },
703703
doStreamSpan,
704704
startTimestampMs,
705705
} = await retry(() =>
@@ -754,7 +754,7 @@ class DefaultStreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
754754
// pipe chunks through a transformation stream that extracts metadata:
755755
let accumulatedText = '';
756756
let textDelta = '';
757-
let response: {
757+
let fullResponse: {
758758
id: string;
759759
timestamp: Date;
760760
modelId: string;
@@ -846,10 +846,10 @@ class DefaultStreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
846846

847847
switch (chunk.type) {
848848
case 'response-metadata': {
849-
response = {
850-
id: chunk.id ?? response.id,
851-
timestamp: chunk.timestamp ?? response.timestamp,
852-
modelId: chunk.modelId ?? response.modelId,
849+
fullResponse = {
850+
id: chunk.id ?? fullResponse.id,
851+
timestamp: chunk.timestamp ?? fullResponse.timestamp,
852+
modelId: chunk.modelId ?? fullResponse.modelId,
853853
};
854854
break;
855855
}
@@ -867,22 +867,26 @@ class DefaultStreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
867867
usage = calculateLanguageModelUsage(chunk.usage);
868868
providerMetadata = chunk.providerMetadata;
869869

870-
controller.enqueue({ ...chunk, usage, response });
870+
controller.enqueue({
871+
...chunk,
872+
usage,
873+
response: fullResponse,
874+
});
871875

872876
// resolve promises that can be resolved now:
873877
self.usagePromise.resolve(usage);
874878
self.providerMetadataPromise.resolve(providerMetadata);
875879
self.responsePromise.resolve({
876-
...response,
877-
headers: rawResponse?.headers,
880+
...fullResponse,
881+
headers: response?.headers,
878882
});
879883

880884
// resolve the object promise with the latest object:
881885
const validationResult = outputStrategy.validateFinalResult(
882886
latestObjectJson,
883887
{
884888
text: accumulatedText,
885-
response,
889+
response: fullResponse,
886890
usage,
887891
},
888892
);
@@ -896,7 +900,7 @@ class DefaultStreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
896900
'No object generated: response did not match schema.',
897901
cause: validationResult.error,
898902
text: accumulatedText,
899-
response,
903+
response: fullResponse,
900904
usage,
901905
finishReason: finishReason,
902906
});
@@ -930,19 +934,19 @@ class DefaultStreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
930934
'ai.response.object': {
931935
output: () => JSON.stringify(object),
932936
},
933-
'ai.response.id': response.id,
934-
'ai.response.model': response.modelId,
937+
'ai.response.id': fullResponse.id,
938+
'ai.response.model': fullResponse.modelId,
935939
'ai.response.timestamp':
936-
response.timestamp.toISOString(),
940+
fullResponse.timestamp.toISOString(),
937941

938942
'ai.usage.promptTokens': finalUsage.promptTokens,
939943
'ai.usage.completionTokens':
940944
finalUsage.completionTokens,
941945

942946
// standardized gen-ai llm span attributes:
943947
'gen_ai.response.finish_reasons': [finishReason],
944-
'gen_ai.response.id': response.id,
945-
'gen_ai.response.model': response.modelId,
948+
'gen_ai.response.id': fullResponse.id,
949+
'gen_ai.response.model': fullResponse.modelId,
946950
'gen_ai.usage.input_tokens': finalUsage.promptTokens,
947951
'gen_ai.usage.output_tokens':
948952
finalUsage.completionTokens,
@@ -974,8 +978,8 @@ class DefaultStreamObjectResult<PARTIAL, RESULT, ELEMENT_STREAM>
974978
object,
975979
error,
976980
response: {
977-
...response,
978-
headers: rawResponse?.headers,
981+
...fullResponse,
982+
headers: response?.headers,
979983
},
980984
warnings,
981985
providerMetadata,

‎packages/ai/core/generate-text/generate-text.test.ts

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -465,8 +465,6 @@ describe('result.response', () => {
465465
id: 'test-id-from-model',
466466
timestamp: new Date(10000),
467467
modelId: 'test-response-model-id',
468-
},
469-
rawResponse: {
470468
headers: {
471469
'custom-response-header': 'response-header-value',
472470
},
@@ -608,8 +606,6 @@ describe('options.maxSteps', () => {
608606
id: 'test-id-2-from-model',
609607
timestamp: new Date(10000),
610608
modelId: 'test-response-model-id',
611-
},
612-
rawResponse: {
613609
headers: {
614610
'custom-response-header': 'response-header-value',
615611
},
@@ -737,6 +733,10 @@ describe('options.maxSteps', () => {
737733
id: 'test-id-2-from-model',
738734
timestamp: new Date(10000),
739735
modelId: 'test-response-model-id',
736+
// test handling of custom response headers:
737+
headers: {
738+
'custom-response-header': 'response-header-value',
739+
},
740740
},
741741
sources: [
742742
{
@@ -755,12 +755,6 @@ describe('options.maxSteps', () => {
755755
},
756756
],
757757
usage: { completionTokens: 5, promptTokens: 30 },
758-
// test handling of custom response headers:
759-
rawResponse: {
760-
headers: {
761-
'custom-response-header': 'response-header-value',
762-
},
763-
},
764758
};
765759
}
766760
case 2: {

‎packages/ai/core/generate-text/generate-text.ts

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -364,6 +364,8 @@ A function that attempts to repair a tool call that failed to parse.
364364
id: result.response?.id ?? generateId(),
365365
timestamp: result.response?.timestamp ?? currentDate(),
366366
modelId: result.response?.modelId ?? model.modelId,
367+
headers: result.response?.headers,
368+
body: result.response?.body,
367369
};
368370

369371
// Add response information to the span:
@@ -527,9 +529,6 @@ A function that attempts to repair a tool call that failed to parse.
527529
request: currentModelResponse.request ?? {},
528530
response: {
529531
...currentModelResponse.response,
530-
headers: currentModelResponse.rawResponse?.headers,
531-
body: currentModelResponse.rawResponse?.body,
532-
533532
// deep clone msgs to avoid mutating past messages in multi-step:
534533
messages: structuredClone(responseMessages),
535534
},
@@ -591,8 +590,6 @@ A function that attempts to repair a tool call that failed to parse.
591590
request: currentModelResponse.request ?? {},
592591
response: {
593592
...currentModelResponse.response,
594-
headers: currentModelResponse.rawResponse?.headers,
595-
body: currentModelResponse.rawResponse?.body,
596593
messages: responseMessages,
597594
},
598595
logprobs: currentModelResponse.logprobs,

‎packages/ai/core/generate-text/stream-text.test.ts

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -58,18 +58,18 @@ function createTestModel({
5858
},
5959
]),
6060
rawCall = { rawPrompt: 'prompt', rawSettings: {} },
61-
rawResponse = undefined,
61+
response = undefined,
6262
request = undefined,
6363
warnings,
6464
}: {
6565
stream?: ReadableStream<LanguageModelV2StreamPart>;
66-
rawResponse?: { headers: Record<string, string> };
66+
response?: { headers: Record<string, string> };
6767
rawCall?: { rawPrompt: string; rawSettings: Record<string, unknown> };
6868
request?: { body: string };
6969
warnings?: LanguageModelV2CallWarning[];
7070
} = {}): LanguageModelV2 {
7171
return new MockLanguageModelV2({
72-
doStream: async () => ({ stream, rawCall, rawResponse, request, warnings }),
72+
doStream: async () => ({ stream, rawCall, response, request, warnings }),
7373
});
7474
}
7575

@@ -1801,7 +1801,7 @@ describe('streamText', () => {
18011801
usage: { completionTokens: 10, promptTokens: 3 },
18021802
},
18031803
]),
1804-
rawResponse: { headers: { call: '2' } },
1804+
response: { headers: { call: '2' } },
18051805
}),
18061806
...defaultSettings(),
18071807
});
@@ -2152,7 +2152,7 @@ describe('streamText', () => {
21522152
},
21532153
},
21542154
]),
2155-
rawResponse: { headers: { call: '2' } },
2155+
response: { headers: { call: '2' } },
21562156
}),
21572157
tools: {
21582158
tool1: {
@@ -2364,7 +2364,7 @@ describe('streamText', () => {
23642364
},
23652365
]),
23662366
rawCall: { rawPrompt: 'prompt', rawSettings: {} },
2367-
rawResponse: { headers: { call: '1' } },
2367+
response: { headers: { call: '1' } },
23682368
};
23692369
}
23702370
case 1: {
@@ -2445,7 +2445,7 @@ describe('streamText', () => {
24452445
},
24462446
]),
24472447
rawCall: { rawPrompt: 'prompt', rawSettings: {} },
2448-
rawResponse: { headers: { call: '2' } },
2448+
response: { headers: { call: '2' } },
24492449
};
24502450
}
24512451
default:
@@ -2693,7 +2693,7 @@ describe('streamText', () => {
26932693
},
26942694
]),
26952695
rawCall: { rawPrompt: 'prompt', rawSettings: {} },
2696-
rawResponse: { headers: { call: '3' } },
2696+
response: { headers: { call: '3' } },
26972697
};
26982698
}
26992699
case 3: {
@@ -2752,7 +2752,7 @@ describe('streamText', () => {
27522752
},
27532753
]),
27542754
rawCall: { rawPrompt: 'prompt', rawSettings: {} },
2755-
rawResponse: { headers: { call: '3' } },
2755+
response: { headers: { call: '3' } },
27562756
};
27572757
}
27582758
default:
@@ -3842,7 +3842,7 @@ describe('streamText', () => {
38423842
},
38433843
},
38443844
]),
3845-
rawResponse: { headers: { call: '2' } },
3845+
response: { headers: { call: '2' } },
38463846
}),
38473847
tools: {
38483848
tool1: {
@@ -3897,7 +3897,7 @@ describe('streamText', () => {
38973897
},
38983898
},
38993899
]),
3900-
rawResponse: { headers: { call: '2' } },
3900+
response: { headers: { call: '2' } },
39013901
}),
39023902
tools: {
39033903
tool1: tool({

0 commit comments

Comments
 (0)