diff --git a/js/.changeset/little-sheep-watch.md b/js/.changeset/little-sheep-watch.md new file mode 100644 index 000000000..0355e8956 --- /dev/null +++ b/js/.changeset/little-sheep-watch.md @@ -0,0 +1,5 @@ +--- +"@arizeai/openinference-vercel": patch +--- + +Updated the OpenInference semantic convention mapping to account for changes to the Vercel AI SDK semantic conventions diff --git a/js/packages/openinference-vercel/src/AISemanticConventions.ts b/js/packages/openinference-vercel/src/AISemanticConventions.ts index f2e01f984..67835f148 100644 --- a/js/packages/openinference-vercel/src/AISemanticConventions.ts +++ b/js/packages/openinference-vercel/src/AISemanticConventions.ts @@ -13,7 +13,7 @@ const AIPrefixes = { telemetry: "telemetry", prompt: "prompt", toolCall: "toolCall", - result: "response", + response: "response", } as const; const AIUsagePostfixes = { @@ -21,7 +21,7 @@ const AIUsagePostfixes = { promptTokens: "promptTokens", } as const; -const AIResultPostfixes = { +const AIResponsePostfixes = { text: "text", toolCalls: "toolCalls", object: "object", @@ -50,14 +50,14 @@ const TOKEN_COUNT_COMPLETION = const TOKEN_COUNT_PROMPT = `${AI_PREFIX}.${AIPrefixes.usage}.${AIUsagePostfixes.promptTokens}` as const; -const RESULT_TEXT = - `${AI_PREFIX}.${AIPrefixes.result}.${AIResultPostfixes.text}` as const; +const RESPONSE_TEXT = + `${AI_PREFIX}.${AIPrefixes.response}.${AIResponsePostfixes.text}` as const; -const RESULT_TOOL_CALLS = - `${AI_PREFIX}.${AIPrefixes.result}.${AIResultPostfixes.toolCalls}` as const; +const RESPONSE_TOOL_CALLS = + `${AI_PREFIX}.${AIPrefixes.response}.${AIResponsePostfixes.toolCalls}` as const; -const RESULT_OBJECT = - `${AI_PREFIX}.${AIPrefixes.result}.${AIResultPostfixes.object}` as const; +const RESPONSE_OBJECT = + `${AI_PREFIX}.${AIPrefixes.response}.${AIResponsePostfixes.object}` as const; const PROMPT = `${AI_PREFIX}.${AIPrefixes.prompt}` as const; @@ -88,9 +88,9 @@ export const AISemanticConventions = { SETTINGS, TOKEN_COUNT_COMPLETION, TOKEN_COUNT_PROMPT, - RESULT_TEXT, - RESULT_TOOL_CALLS, - RESULT_OBJECT, + RESPONSE_TEXT, + RESPONSE_TOOL_CALLS, + RESPONSE_OBJECT, PROMPT, PROMPT_MESSAGES, EMBEDDING_TEXT, diff --git a/js/packages/openinference-vercel/src/constants.ts b/js/packages/openinference-vercel/src/constants.ts index 99c64742f..7b3419c6f 100644 --- a/js/packages/openinference-vercel/src/constants.ts +++ b/js/packages/openinference-vercel/src/constants.ts @@ -44,10 +44,10 @@ export const AISemConvToOISemConvMap: Record< SemanticConventions.LLM_TOKEN_COUNT_COMPLETION, [AISemanticConventions.TOKEN_COUNT_PROMPT]: SemanticConventions.LLM_TOKEN_COUNT_PROMPT, - [AISemanticConventions.RESULT_TEXT]: SemanticConventions.OUTPUT_VALUE, - [AISemanticConventions.RESULT_TOOL_CALLS]: + [AISemanticConventions.RESPONSE_TEXT]: SemanticConventions.OUTPUT_VALUE, + [AISemanticConventions.RESPONSE_TOOL_CALLS]: SemanticConventions.MESSAGE_TOOL_CALLS, - [AISemanticConventions.RESULT_OBJECT]: SemanticConventions.OUTPUT_VALUE, + [AISemanticConventions.RESPONSE_OBJECT]: SemanticConventions.OUTPUT_VALUE, [AISemanticConventions.PROMPT]: SemanticConventions.INPUT_VALUE, [AISemanticConventions.PROMPT_MESSAGES]: SemanticConventions.LLM_INPUT_MESSAGES, diff --git a/js/packages/openinference-vercel/src/utils.ts b/js/packages/openinference-vercel/src/utils.ts index c008a7050..1e8e2625c 100644 --- a/js/packages/openinference-vercel/src/utils.ts +++ b/js/packages/openinference-vercel/src/utils.ts @@ -457,8 +457,8 @@ const getOpenInferenceAttributes = (attributes: Attributes): Attributes => { ...safelyGetInvocationParamAttributes(attributes), }; case AISemanticConventions.PROMPT: - case AISemanticConventions.RESULT_OBJECT: - case AISemanticConventions.RESULT_TEXT: { + case AISemanticConventions.RESPONSE_OBJECT: + case AISemanticConventions.RESPONSE_TEXT: { return { ...openInferenceAttributes, ...safelyGetIOValueAttributes({ @@ -468,7 +468,7 @@ const getOpenInferenceAttributes = (attributes: Attributes): Attributes => { }), }; } - case AISemanticConventions.RESULT_TOOL_CALLS: + case AISemanticConventions.RESPONSE_TOOL_CALLS: return { ...openInferenceAttributes, ...safelyGetToolCallMessageAttributes(attributes[convention]), diff --git a/js/packages/openinference-vercel/test/OpenInferenceSpanProcessor.test.ts b/js/packages/openinference-vercel/test/OpenInferenceSpanProcessor.test.ts index 6c40b1102..d80347523 100644 --- a/js/packages/openinference-vercel/test/OpenInferenceSpanProcessor.test.ts +++ b/js/packages/openinference-vercel/test/OpenInferenceSpanProcessor.test.ts @@ -165,7 +165,7 @@ const generateVercelAttributeTestCases = (): SpanProcessorTestCase[] => { ], ); break; - case AISemanticConventions.RESULT_TEXT: + case AISemanticConventions.RESPONSE_TEXT: testCases.push([ `${vercelSemanticConvention} to ${SemanticConventions.OUTPUT_VALUE} with MIME type ${MimeType.TEXT}`, { @@ -182,7 +182,7 @@ const generateVercelAttributeTestCases = (): SpanProcessorTestCase[] => { }, ]); break; - case AISemanticConventions.RESULT_OBJECT: + case AISemanticConventions.RESPONSE_OBJECT: testCases.push([ `${vercelSemanticConvention} to ${SemanticConventions.OUTPUT_VALUE} with MIME type ${MimeType.JSON}`, { @@ -201,7 +201,7 @@ const generateVercelAttributeTestCases = (): SpanProcessorTestCase[] => { }, ]); break; - case AISemanticConventions.RESULT_TOOL_CALLS: { + case AISemanticConventions.RESPONSE_TOOL_CALLS: { const firstOutputMessageToolPrefix = `${SemanticConventions.LLM_OUTPUT_MESSAGES}.0.${SemanticConventions.MESSAGE_TOOL_CALLS}`; testCases.push([ `${vercelSemanticConvention} to ${SemanticConventions.MESSAGE_TOOL_CALLS} on ${SemanticConventions.LLM_OUTPUT_MESSAGES}`,