diff --git a/js/.changeset/cool-wasps-float.md b/js/.changeset/cool-wasps-float.md new file mode 100644 index 000000000..2a4ef28ef --- /dev/null +++ b/js/.changeset/cool-wasps-float.md @@ -0,0 +1,7 @@ +--- +"@arizeai/openinference-instrumentation-openai": minor +"@arizeai/openinference-semantic-conventions": minor +"@arizeai/openinference-vercel": minor +--- + +Support tool_call_id and tool_call.id diff --git a/js/packages/openinference-instrumentation-openai/src/instrumentation.ts b/js/packages/openinference-instrumentation-openai/src/instrumentation.ts index 580b95f4b..7bdfd4219 100644 --- a/js/packages/openinference-instrumentation-openai/src/instrumentation.ts +++ b/js/packages/openinference-instrumentation-openai/src/instrumentation.ts @@ -503,15 +503,21 @@ function getChatCompletionInputMessageAttributes( case "assistant": if (message.tool_calls) { message.tool_calls.forEach((toolCall, index) => { + const toolCallIndexPrefix = `${SemanticConventions.MESSAGE_TOOL_CALLS}.${index}.`; + + // Add the tool call id if it exists + if (toolCall.id) { + attributes[ + `${toolCallIndexPrefix}${SemanticConventions.TOOL_CALL_ID}` + ] = toolCall.id; + } // Make sure the tool call has a function if (toolCall.function) { - const toolCallIndexPrefix = `${SemanticConventions.MESSAGE_TOOL_CALLS}.${index}.`; attributes[ - toolCallIndexPrefix + SemanticConventions.TOOL_CALL_FUNCTION_NAME + `${toolCallIndexPrefix}${SemanticConventions.TOOL_CALL_FUNCTION_NAME}` ] = toolCall.function.name; attributes[ - toolCallIndexPrefix + - SemanticConventions.TOOL_CALL_FUNCTION_ARGUMENTS_JSON + `${toolCallIndexPrefix}${SemanticConventions.TOOL_CALL_FUNCTION_ARGUMENTS_JSON}` ] = toolCall.function.arguments; } }); @@ -521,8 +527,10 @@ function getChatCompletionInputMessageAttributes( attributes[SemanticConventions.MESSAGE_FUNCTION_CALL_NAME] = message.name; break; case "tool": - // There's nothing to add for the tool. There is a tool_id, but there are no - // semantic conventions for it + if (message.tool_call_id) { + attributes[`${SemanticConventions.MESSAGE_TOOL_CALL_ID}`] = + message.tool_call_id; + } break; case "system": // There's nothing to add for the system. Content is captured above @@ -613,6 +621,12 @@ function getChatCompletionOutputMessageAttributes( if (message.tool_calls) { message.tool_calls.forEach((toolCall, index) => { const toolCallIndexPrefix = `${SemanticConventions.MESSAGE_TOOL_CALLS}.${index}.`; + // Add the tool call id if it exists + if (toolCall.id) { + attributes[ + `${toolCallIndexPrefix}${SemanticConventions.TOOL_CALL_ID}` + ] = toolCall.id; + } // Double check that the tool call has a function // NB: OpenAI only supports tool calls with functions right now but this may change if (toolCall.function) { @@ -759,6 +773,12 @@ function getToolAndFunctionCallAttributesFromStreamChunk( if (choice.delta.tool_calls) { choice.delta.tool_calls.forEach((toolCall, index) => { const toolCallIndexPrefix = `${SemanticConventions.MESSAGE_TOOL_CALLS}.${index}.`; + // Add the tool call id if it exists + if (toolCall.id) { + attributes[ + `${toolCallIndexPrefix}${SemanticConventions.TOOL_CALL_ID}` + ] = toolCall.id; + } // Double check that the tool call has a function // NB: OpenAI only supports tool calls with functions right now but this may change if (toolCall.function) { diff --git a/js/packages/openinference-instrumentation-openai/test/openai.test.ts b/js/packages/openinference-instrumentation-openai/test/openai.test.ts index c39630336..85a826176 100644 --- a/js/packages/openinference-instrumentation-openai/test/openai.test.ts +++ b/js/packages/openinference-instrumentation-openai/test/openai.test.ts @@ -398,6 +398,7 @@ describe("OpenAIInstrumentation", () => { "llm.output_messages.0.message.role": "assistant", "llm.output_messages.0.message.tool_calls.0.tool_call.function.arguments": "{}", "llm.output_messages.0.message.tool_calls.0.tool_call.function.name": "getCurrentLocation", + "llm.output_messages.0.message.tool_calls.0.tool_call.id": "call_5ERYvu4iTGSvDlcDQjDP3g3J", "llm.provider": "openai", "llm.system": "openai", "llm.token_count.completion": 7, @@ -420,8 +421,10 @@ describe("OpenAIInstrumentation", () => { "llm.input_messages.1.message.role": "assistant", "llm.input_messages.1.message.tool_calls.0.tool_call.function.arguments": "{}", "llm.input_messages.1.message.tool_calls.0.tool_call.function.name": "getCurrentLocation", + "llm.input_messages.1.message.tool_calls.0.tool_call.id": "call_5ERYvu4iTGSvDlcDQjDP3g3J", "llm.input_messages.2.message.content": "Boston", "llm.input_messages.2.message.role": "tool", + "llm.input_messages.2.message.tool_call_id": "call_5ERYvu4iTGSvDlcDQjDP3g3J", "llm.invocation_parameters": "{"model":"gpt-3.5-turbo","tools":[{"type":"function","function":{"name":"getCurrentLocation","parameters":{"type":"object","properties":{}},"description":"Get the current location of the user."}},{"type":"function","function":{"name":"getWeather","parameters":{"type":"object","properties":{"location":{"type":"string"}}},"description":"Get the weather for a location."}}],"tool_choice":"auto","stream":false}", "llm.model_name": "gpt-3.5-turbo-0613", "llm.output_messages.0.message.role": "assistant", @@ -429,6 +432,7 @@ describe("OpenAIInstrumentation", () => { "location": "Boston" }", "llm.output_messages.0.message.tool_calls.0.tool_call.function.name": "getWeather", + "llm.output_messages.0.message.tool_calls.0.tool_call.id": "call_0LCdYLkdRUt3rV3dawoIFHBf", "llm.provider": "openai", "llm.system": "openai", "llm.token_count.completion": 15, @@ -451,15 +455,19 @@ describe("OpenAIInstrumentation", () => { "llm.input_messages.1.message.role": "assistant", "llm.input_messages.1.message.tool_calls.0.tool_call.function.arguments": "{}", "llm.input_messages.1.message.tool_calls.0.tool_call.function.name": "getCurrentLocation", + "llm.input_messages.1.message.tool_calls.0.tool_call.id": "call_5ERYvu4iTGSvDlcDQjDP3g3J", "llm.input_messages.2.message.content": "Boston", "llm.input_messages.2.message.role": "tool", + "llm.input_messages.2.message.tool_call_id": "call_5ERYvu4iTGSvDlcDQjDP3g3J", "llm.input_messages.3.message.role": "assistant", "llm.input_messages.3.message.tool_calls.0.tool_call.function.arguments": "{ "location": "Boston" }", "llm.input_messages.3.message.tool_calls.0.tool_call.function.name": "getWeather", + "llm.input_messages.3.message.tool_calls.0.tool_call.id": "call_0LCdYLkdRUt3rV3dawoIFHBf", "llm.input_messages.4.message.content": "{"temperature":52,"precipitation":"rainy"}", "llm.input_messages.4.message.role": "tool", + "llm.input_messages.4.message.tool_call_id": "call_0LCdYLkdRUt3rV3dawoIFHBf", "llm.invocation_parameters": "{"model":"gpt-3.5-turbo","tools":[{"type":"function","function":{"name":"getCurrentLocation","parameters":{"type":"object","properties":{}},"description":"Get the current location of the user."}},{"type":"function","function":{"name":"getWeather","parameters":{"type":"object","properties":{"location":{"type":"string"}}},"description":"Get the weather for a location."}}],"tool_choice":"auto","stream":false}", "llm.model_name": "gpt-3.5-turbo-0613", "llm.output_messages.0.message.content": "The weather in Boston this week is expected to be rainy with a temperature of 52 degrees.", @@ -598,6 +606,7 @@ describe("OpenAIInstrumentation", () => { "llm.output_messages.0.message.role": "assistant", "llm.output_messages.0.message.tool_calls.0.tool_call.function.arguments": "{}", "llm.output_messages.0.message.tool_calls.0.tool_call.function.name": "getWeather", + "llm.output_messages.0.message.tool_calls.0.tool_call.id": "call_PGkcUg2u6vYrCpTn0e9ofykY", "llm.provider": "openai", "llm.system": "openai", "llm.tools.0.tool.json_schema": "{"type":"function","function":{"name":"getCurrentLocation","parameters":{"type":"object","properties":{}},"description":"Get the current location of the user."}}", diff --git a/js/packages/openinference-semantic-conventions/src/trace/SemanticConventions.ts b/js/packages/openinference-semantic-conventions/src/trace/SemanticConventions.ts index 0c36669e9..97c6d66fe 100644 --- a/js/packages/openinference-semantic-conventions/src/trace/SemanticConventions.ts +++ b/js/packages/openinference-semantic-conventions/src/trace/SemanticConventions.ts @@ -74,6 +74,7 @@ export const MessageAttributePostfixes = { function_call_name: "function_call_name", function_call_arguments_json: "function_call_arguments_json", tool_calls: "tool_calls", + tool_call_id: "tool_call_id", } as const; export const MessageContentsAttributePostfixes = { @@ -89,6 +90,7 @@ export const ImageAttributesPostfixes = { export const ToolCallAttributePostfixes = { function_name: "function.name", function_arguments_json: "function.arguments", + id: "id", } as const; export const DocumentAttributePostfixes = { @@ -202,6 +204,12 @@ export const MESSAGE_NAME = export const MESSAGE_TOOL_CALLS = `${SemanticAttributePrefixes.message}.${MessageAttributePostfixes.tool_calls}` as const; +/** + * The id of the tool call on a "tool" role message + */ +export const MESSAGE_TOOL_CALL_ID = + `${SemanticAttributePrefixes.message}.${MessageAttributePostfixes.tool_call_id}` as const; + /** * tool_call.function.name */ @@ -214,6 +222,12 @@ export const TOOL_CALL_FUNCTION_NAME = export const TOOL_CALL_FUNCTION_ARGUMENTS_JSON = `${SemanticAttributePrefixes.tool_call}.${ToolCallAttributePostfixes.function_arguments_json}` as const; +/** + * The id of the tool call + */ +export const TOOL_CALL_ID = + `${SemanticAttributePrefixes.tool_call}.${ToolCallAttributePostfixes.id}` as const; + /** * The LLM function call function name */ @@ -430,6 +444,8 @@ export const SemanticConventions = { MESSAGE_ROLE, MESSAGE_NAME, MESSAGE_TOOL_CALLS, + MESSAGE_TOOL_CALL_ID, + TOOL_CALL_ID, TOOL_CALL_FUNCTION_NAME, TOOL_CALL_FUNCTION_ARGUMENTS_JSON, MESSAGE_FUNCTION_CALL_NAME, diff --git a/js/packages/openinference-vercel/src/AISemanticConventions.ts b/js/packages/openinference-vercel/src/AISemanticConventions.ts index e854f739a..0590f1ea7 100644 --- a/js/packages/openinference-vercel/src/AISemanticConventions.ts +++ b/js/packages/openinference-vercel/src/AISemanticConventions.ts @@ -32,6 +32,7 @@ const AIPromptPostfixes = { } as const; const AIToolCallPostfixes = { + id: "id", name: "name", args: "args", result: "result", @@ -68,6 +69,8 @@ const EMBEDDING_VECTOR = `${AI_PREFIX}.embedding` as const; const EMBEDDING_TEXTS = `${AI_PREFIX}.values` as const; const EMBEDDING_VECTORS = `${AI_PREFIX}.embeddings` as const; +const TOOL_CALL_ID = + `${AI_PREFIX}.${AIPrefixes.toolCall}.${AIToolCallPostfixes.id}` as const; const TOOL_CALL_NAME = `${AI_PREFIX}.${AIPrefixes.toolCall}.${AIToolCallPostfixes.name}` as const; const TOOL_CALL_ARGS = @@ -94,6 +97,7 @@ export const AISemanticConventions = { EMBEDDING_VECTOR, EMBEDDING_TEXTS, EMBEDDING_VECTORS, + TOOL_CALL_ID, TOOL_CALL_NAME, TOOL_CALL_ARGS, TOOL_CALL_RESULT, diff --git a/js/packages/openinference-vercel/src/constants.ts b/js/packages/openinference-vercel/src/constants.ts index 13c58f085..99c64742f 100644 --- a/js/packages/openinference-vercel/src/constants.ts +++ b/js/packages/openinference-vercel/src/constants.ts @@ -57,6 +57,7 @@ export const AISemConvToOISemConvMap: Record< [AISemanticConventions.EMBEDDING_TEXTS]: SemanticConventions.EMBEDDING_TEXT, [AISemanticConventions.EMBEDDING_VECTORS]: SemanticConventions.EMBEDDING_VECTOR, + [AISemanticConventions.TOOL_CALL_ID]: SemanticConventions.TOOL_CALL_ID, [AISemanticConventions.TOOL_CALL_NAME]: SemanticConventions.TOOL_NAME, [AISemanticConventions.TOOL_CALL_ARGS]: SemanticConventions.TOOL_PARAMETERS, [AISemanticConventions.TOOL_CALL_RESULT]: SemanticConventions.OUTPUT_VALUE, diff --git a/js/packages/openinference-vercel/src/utils.ts b/js/packages/openinference-vercel/src/utils.ts index 18bef0023..c008a7050 100644 --- a/js/packages/openinference-vercel/src/utils.ts +++ b/js/packages/openinference-vercel/src/utils.ts @@ -400,6 +400,11 @@ const getOpenInferenceAttributes = (attributes: Attributes): Attributes => { ...openInferenceAttributes, [openInferenceKey]: attributes[convention], }; + case AISemanticConventions.TOOL_CALL_ID: + return { + ...openInferenceAttributes, + [openInferenceKey]: attributes[convention], + }; case AISemanticConventions.TOOL_CALL_NAME: return { ...openInferenceAttributes, diff --git a/js/packages/openinference-vercel/test/OpenInferenceSpanProcessor.test.ts b/js/packages/openinference-vercel/test/OpenInferenceSpanProcessor.test.ts index 7ed0fdd48..6c40b1102 100644 --- a/js/packages/openinference-vercel/test/OpenInferenceSpanProcessor.test.ts +++ b/js/packages/openinference-vercel/test/OpenInferenceSpanProcessor.test.ts @@ -402,6 +402,22 @@ const generateVercelAttributeTestCases = (): SpanProcessorTestCase[] => { }, ]); break; + case AISemanticConventions.TOOL_CALL_ID: + testCases.push([ + `${vercelSemanticConvention} to ${SemanticConventions.TOOL_CALL_ID}`, + { + vercelFunctionName: "ai.toolCall", + vercelAttributes: { + [vercelSemanticConvention]: "test-tool-id", + }, + addedOpenInferenceAttributes: { + [SemanticConventions.TOOL_CALL_ID]: "test-tool-id", + [SemanticConventions.OPENINFERENCE_SPAN_KIND]: + OpenInferenceSpanKind.TOOL, + }, + }, + ]); + break; case AISemanticConventions.TOOL_CALL_NAME: testCases.push([ `${vercelSemanticConvention} to ${SemanticConventions.TOOL_NAME}`,