Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion js/plugins/evaluators/src/metrics/answer_relevancy.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import { loadPromptFile } from '@genkit-ai/dotprompt';
import similarity from 'compute-cosine-similarity';
import path from 'path';
import * as z from 'zod';
import { getDirName } from './helper.js';

const AnswerRelevancyResponseSchema = z.object({
question: z.string(),
Expand All @@ -47,7 +48,7 @@ export async function answerRelevancyScore<
throw new Error('Output was not provided');
}
const prompt = await loadPromptFile(
path.resolve(__dirname, '../../prompts/answer_relevancy.prompt')
path.resolve(getDirName(), '../../prompts/answer_relevancy.prompt')
);
const response = await generate({
model: judgeLlm,
Expand Down
5 changes: 3 additions & 2 deletions js/plugins/evaluators/src/metrics/faithfulness.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import { ModelArgument } from '@genkit-ai/ai/model';
import { loadPromptFile } from '@genkit-ai/dotprompt';
import path from 'path';
import * as z from 'zod';
import { getDirName } from './helper.js';

const LongFormResponseSchema = z.object({ statements: z.array(z.string()) });

Expand Down Expand Up @@ -54,7 +55,7 @@ export async function faithfulnessScore<
throw new Error('Output was not provided');
}
const longFormPrompt = await loadPromptFile(
path.resolve(__dirname, '../../prompts/faithfulness_long_form.prompt')
path.resolve(getDirName(), '../../prompts/faithfulness_long_form.prompt')
);
const longFormResponse = await generate({
model: judgeLlm,
Expand All @@ -75,7 +76,7 @@ export async function faithfulnessScore<
const allStatements = statements.map((s) => `statement: ${s}`).join('\n');
const allContext = context.join('\n');
const nliPrompt = await loadPromptFile(
path.resolve(__dirname, '../../prompts/faithfulness_nli.prompt')
path.resolve(getDirName(), '../../prompts/faithfulness_nli.prompt')
);
const response = await generate({
model: judgeLlm,
Expand Down
20 changes: 20 additions & 0 deletions js/plugins/evaluators/src/metrics/helper.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
/**
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

/** Helper function to get current directory, isolated in a separate file to work with ESM */
export function getDirName() {
return __dirname;
}
7 changes: 4 additions & 3 deletions js/plugins/evaluators/src/metrics/maliciousness.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import { ModelArgument } from '@genkit-ai/ai/model';
import { loadPromptFile } from '@genkit-ai/dotprompt';
import path from 'path';
import * as z from 'zod';
import { getDirName } from './helper.js';

const MaliciousnessResponseSchema = z.object({
reason: z.string(),
Expand All @@ -34,12 +35,12 @@ export async function maliciousnessScore<
): Promise<Score> {
const d = dataPoint;
try {
if (!d.context || !d.output) {
throw new Error('contexts and output are required');
if (!d.input || !d.output) {
throw new Error('input and output are required');
}

const prompt = await loadPromptFile(
path.resolve(__dirname, '../../prompts/maliciousness.prompt')
path.resolve(getDirName(), '../../prompts/maliciousness.prompt')
);
//TODO: safetySettings are gemini specific - pull these out so they are tied to the LLM
const response = await generate({
Expand Down
1 change: 1 addition & 0 deletions js/plugins/evaluators/tsup.config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,5 @@ import { defaultOptions } from '../../tsup.common';

export default defineConfig({
...(defaultOptions as Options),
shims: false,
});
20 changes: 20 additions & 0 deletions js/testapps/rag/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,26 @@ export default configureGenkit({
googleAI({ apiVersion: ['v1', 'v1beta'] }),
genkitEval({
judge: geminiPro,
judgeConfig: {
safetySettings: [
{
category: 'HARM_CATEGORY_HATE_SPEECH',
threshold: 'BLOCK_NONE',
},
{
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
threshold: 'BLOCK_NONE',
},
{
category: 'HARM_CATEGORY_HARASSMENT',
threshold: 'BLOCK_NONE',
},
{
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
threshold: 'BLOCK_NONE',
},
],
} as any,
metrics: [GenkitMetric.FAITHFULNESS, GenkitMetric.MALICIOUSNESS],
}),
langchain({
Expand Down
21 changes: 11 additions & 10 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.