Skip to content

Commit f30b4b3

Browse files
authored
feat(new-compiler): add nebius support (#1900)
* feat: add support for @ai-sdk/openai v3.0.1 * Updated pnpm-lock.yaml to include @ai-sdk/openai version 3.0.1 with zod@4.1.12. * Modified package.json in new-compiler to include @ai-sdk/openai as a dependency. * Enhanced model-factory.ts to support OpenAI model creation with optional custom base URL. * feat: add support for OpenAI-compatible providers (Nebius, etc.) * feat: add support for Chat Completions API for OpenAI-compatible providers * feat: enhance model-factory tests and parsing logic * feat: update model-factory tests and fix error message in createAiModel function
1 parent ab675d1 commit f30b4b3

File tree

6 files changed

+298
-3
lines changed

6 files changed

+298
-3
lines changed

‎.changeset/lovely-badgers-serve.md‎

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"@lingo.dev/compiler": minor
3+
---
4+
5+
Add support for OpenAI-compatible providers (Nebius, Together AI, etc.) by using Chat Completions API when OPENAI_BASE_URL is set
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
"@lingo.dev/compiler": minor
3+
"@lingo.dev/_compiler": minor
4+
---
5+
6+
Add support for OpenAI-compatible providers (e.g., Nebius) via OPENAI_BASE_URL environment variable

‎packages/new-compiler/package.json‎

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,7 @@
158158
"@ai-sdk/google": "3.0.1",
159159
"@ai-sdk/groq": "3.0.1",
160160
"@ai-sdk/mistral": "3.0.1",
161+
"@ai-sdk/openai": "3.0.1",
161162
"@babel/core": "7.26.0",
162163
"@babel/generator": "7.28.5",
163164
"@babel/parser": "7.28.5",
Lines changed: 249 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,249 @@
1+
import { describe, expect, it, beforeEach, afterEach, vi } from "vitest";
2+
import {
3+
parseModelString,
4+
getLocaleModel,
5+
validateAndGetApiKeys,
6+
createAiModel,
7+
getKeyFromEnv,
8+
} from "./model-factory";
9+
10+
vi.mock("dotenv", () => ({
11+
config: vi.fn(() => ({ parsed: {} })),
12+
}));
13+
14+
describe("model-factory", () => {
15+
describe("parseModelString", () => {
16+
it("should parse provider:model format correctly", () => {
17+
const result = parseModelString("openai:gpt-4");
18+
expect(result).toEqual({ provider: "openai", name: "gpt-4" });
19+
});
20+
21+
it("should handle model names with colons", () => {
22+
const result = parseModelString("openai:ft:gpt-4:my-org:custom:id");
23+
expect(result).toEqual({
24+
provider: "openai",
25+
name: "ft:gpt-4:my-org:custom:id",
26+
});
27+
});
28+
29+
it("should handle simple model names with dashes", () => {
30+
const result = parseModelString("groq:llama3-8b-8192");
31+
expect(result).toEqual({ provider: "groq", name: "llama3-8b-8192" });
32+
});
33+
34+
it("should return undefined for invalid format", () => {
35+
expect(parseModelString("invalid")).toBeUndefined();
36+
expect(parseModelString("")).toBeUndefined();
37+
});
38+
});
39+
40+
describe("getLocaleModel", () => {
41+
it("should match exact locale pair", () => {
42+
const config = {
43+
"en:fr": "openai:gpt-4",
44+
"*:*": "groq:llama3-8b-8192",
45+
};
46+
47+
const result = getLocaleModel(config, "en", "fr");
48+
expect(result).toEqual({ provider: "openai", name: "gpt-4" });
49+
});
50+
51+
it("should fallback to *:targetLocale wildcard pattern", () => {
52+
const config = {
53+
"*:fr": "openai:gpt-3.5-turbo",
54+
"*:*": "groq:llama3-8b-8192",
55+
};
56+
57+
const result = getLocaleModel(config, "en", "fr");
58+
expect(result).toEqual({ provider: "openai", name: "gpt-3.5-turbo" });
59+
});
60+
61+
it("should fallback to sourceLocale:* wildcard pattern", () => {
62+
const config = {
63+
"en:*": "openai:gpt-4",
64+
"*:*": "groq:llama3-8b-8192",
65+
};
66+
67+
const result = getLocaleModel(config, "en", "de");
68+
expect(result).toEqual({ provider: "openai", name: "gpt-4" });
69+
});
70+
71+
it("should return undefined when no match found", () => {
72+
const config = {
73+
"en:fr": "openai:gpt-4",
74+
};
75+
76+
const result = getLocaleModel(config, "de", "es");
77+
expect(result).toBeUndefined();
78+
});
79+
});
80+
81+
describe("validateAndGetApiKeys", () => {
82+
const originalEnv = process.env;
83+
84+
beforeEach(() => {
85+
process.env = { ...originalEnv };
86+
});
87+
88+
afterEach(() => {
89+
process.env = originalEnv;
90+
});
91+
92+
it("should validate and return API keys for configured providers", () => {
93+
process.env.OPENAI_API_KEY = "test-openai-key";
94+
process.env.GROQ_API_KEY = "test-groq-key";
95+
96+
const config = {
97+
"*:fr": "openai:gpt-4",
98+
"*:es": "groq:llama3-8b-8192",
99+
};
100+
101+
const result = validateAndGetApiKeys(config);
102+
expect(result).toEqual({
103+
openai: "test-openai-key",
104+
groq: "test-groq-key",
105+
});
106+
});
107+
108+
it("should skip providers that don't require API keys (like Ollama)", () => {
109+
const config = {
110+
"*:*": "ollama:llama3",
111+
};
112+
113+
// Should not throw even without OLLAMA_API_KEY
114+
const result = validateAndGetApiKeys(config);
115+
expect(result).toEqual({});
116+
});
117+
118+
it("should throw error for missing required API keys", () => {
119+
const config = {
120+
"*:*": "openai:gpt-4",
121+
};
122+
123+
expect(() => validateAndGetApiKeys(config)).toThrow();
124+
});
125+
126+
it("should validate lingo.dev provider when specified", () => {
127+
process.env.LINGODOTDEV_API_KEY = "test-lingo-key";
128+
129+
const result = validateAndGetApiKeys("lingo.dev");
130+
expect(result).toEqual({
131+
"lingo.dev": "test-lingo-key",
132+
});
133+
});
134+
135+
it("should throw error for unknown provider", () => {
136+
const config = {
137+
"*:*": "unknownprovider:model",
138+
};
139+
140+
expect(() => validateAndGetApiKeys(config)).toThrow(
141+
/Unknown provider "unknownprovider"/,
142+
);
143+
});
144+
});
145+
146+
describe("createAiModel", () => {
147+
const originalEnv = process.env;
148+
149+
beforeEach(() => {
150+
process.env = { ...originalEnv };
151+
});
152+
153+
afterEach(() => {
154+
process.env = originalEnv;
155+
});
156+
157+
it("should support OpenAI with custom baseURL from env", () => {
158+
process.env.OPENAI_BASE_URL = "https://api.studio.nebius.ai/v1/";
159+
process.env.OPENAI_API_KEY = "test-key";
160+
161+
const model = { provider: "openai", name: "gpt-4" };
162+
const keys = { openai: "test-key" };
163+
164+
const result = createAiModel(model, keys);
165+
expect(result).toBeDefined();
166+
});
167+
168+
it("should create OpenAI model without baseURL when not set", () => {
169+
delete process.env.OPENAI_BASE_URL;
170+
process.env.OPENAI_API_KEY = "test-key";
171+
172+
const model = { provider: "openai", name: "gpt-4" };
173+
const keys = { openai: "test-key" };
174+
175+
const result = createAiModel(model, keys);
176+
expect(result).toBeDefined();
177+
});
178+
179+
it("should create Groq model", () => {
180+
const model = { provider: "groq", name: "llama3-8b-8192" };
181+
const keys = { groq: "test-groq-key" };
182+
183+
const result = createAiModel(model, keys);
184+
expect(result).toBeDefined();
185+
});
186+
187+
it("should create Google model", () => {
188+
const model = { provider: "google", name: "gemini-pro" };
189+
const keys = { google: "test-google-key" };
190+
191+
const result = createAiModel(model, keys);
192+
expect(result).toBeDefined();
193+
});
194+
195+
it("should create OpenRouter model", () => {
196+
const model = { provider: "openrouter", name: "anthropic/claude-3-opus" };
197+
const keys = { openrouter: "test-openrouter-key" };
198+
199+
const result = createAiModel(model, keys);
200+
expect(result).toBeDefined();
201+
});
202+
203+
it("should create Ollama model without API key", () => {
204+
const model = { provider: "ollama", name: "llama3" };
205+
const keys = {};
206+
207+
const result = createAiModel(model, keys);
208+
expect(result).toBeDefined();
209+
});
210+
211+
it("should create Mistral model", () => {
212+
const model = { provider: "mistral", name: "mistral-large-latest" };
213+
const keys = { mistral: "test-mistral-key" };
214+
215+
const result = createAiModel(model, keys);
216+
expect(result).toBeDefined();
217+
});
218+
219+
it("should throw error for unsupported provider", () => {
220+
const model = { provider: "unsupported", name: "model" };
221+
const keys = {};
222+
223+
expect(() => createAiModel(model, keys)).toThrow(
224+
/Provider "unsupported" is not supported/,
225+
);
226+
});
227+
});
228+
229+
describe("getKeyFromEnv", () => {
230+
const originalEnv = process.env;
231+
232+
beforeEach(() => {
233+
process.env = { ...originalEnv };
234+
});
235+
236+
afterEach(() => {
237+
process.env = originalEnv;
238+
});
239+
240+
it("should get key from process.env", () => {
241+
process.env.TEST_KEY = "test-value";
242+
expect(getKeyFromEnv("TEST_KEY")).toBe("test-value");
243+
});
244+
245+
it("should return undefined for missing key", () => {
246+
expect(getKeyFromEnv("NONEXISTENT_KEY")).toBeUndefined();
247+
});
248+
});
249+
});

‎packages/new-compiler/src/translators/lingo/model-factory.ts‎

Lines changed: 22 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import { createGoogleGenerativeAI } from "@ai-sdk/google";
77
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
88
import { ollama } from "ai-sdk-ollama";
99
import { createMistral } from "@ai-sdk/mistral";
10+
import { createOpenAI } from "@ai-sdk/openai";
1011
import type { LanguageModel } from "ai";
1112
import * as dotenv from "dotenv";
1213
import * as path from "path";
@@ -148,8 +149,14 @@ export function getLocaleModel(
148149
* @throws Error if format is invalid
149150
*/
150151
export function parseModelString(modelString: string): LocaleModel | undefined {
151-
// Split on first colon only
152-
const [provider, name] = modelString.split(":", 2);
152+
// Split on first colon only to allow colons in model names
153+
const colonIndex = modelString.indexOf(":");
154+
if (colonIndex === -1) {
155+
return undefined;
156+
}
157+
158+
const provider = modelString.substring(0, colonIndex);
159+
const name = modelString.substring(colonIndex + 1);
153160

154161
if (!provider || !name) {
155162
return undefined;
@@ -251,7 +258,7 @@ export function createAiModel(
251258
if (providerConfig.apiKeyEnvVar && !apiKey) {
252259
throw new Error(
253260
`⚠️ ${providerConfig.name} API key not found. Please set ${providerConfig.apiKeyEnvVar} environment variable.\n\n` +
254-
`This should not happen if validateAndFetchApiKeys() was called. Please restart the service.`,
261+
`This should not happen if validateAndGetApiKeys() was called. Please restart the service.`,
255262
);
256263
}
257264

@@ -263,6 +270,18 @@ export function createAiModel(
263270
case "google":
264271
return createGoogleGenerativeAI({ apiKey: apiKey! })(model.name);
265272

273+
case "openai": {
274+
// Support custom base URL for OpenAI-compatible providers (e.g., Nebius)
275+
const baseURL = getKeyFromEnv("OPENAI_BASE_URL");
276+
277+
const provider = createOpenAI({
278+
apiKey: apiKey!,
279+
...(baseURL && { baseURL }),
280+
});
281+
282+
return provider.chat(model.name);
283+
}
284+
266285
case "openrouter":
267286
return createOpenRouter({ apiKey: apiKey! })(model.name);
268287

‎pnpm-lock.yaml‎

Lines changed: 15 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)