diff --git a/js/.changeset/honest-chefs-hang.md b/js/.changeset/honest-chefs-hang.md new file mode 100644 index 000000000..bd2d149e4 --- /dev/null +++ b/js/.changeset/honest-chefs-hang.md @@ -0,0 +1,5 @@ +--- +"@arizeai/openinference-semantic-conventions": minor +--- + +Add llm.system and llm.provider to LLMAttributePostfixes record diff --git a/js/packages/openinference-semantic-conventions/src/trace/SemanticConventions.ts b/js/packages/openinference-semantic-conventions/src/trace/SemanticConventions.ts index 97c6d66fe..cda84a9d1 100644 --- a/js/packages/openinference-semantic-conventions/src/trace/SemanticConventions.ts +++ b/js/packages/openinference-semantic-conventions/src/trace/SemanticConventions.ts @@ -24,6 +24,8 @@ export const SemanticAttributePrefixes = { } as const; export const LLMAttributePostfixes = { + provider: "provider", + system: "system", model_name: "model_name", token_count: "token_count", input_messages: "input_messages", @@ -165,12 +167,13 @@ export const LLM_MODEL_NAME = * The provider of the inferences. E.g. the cloud provider */ export const LLM_PROVIDER = - `${SemanticAttributePrefixes.llm}.provider` as const; + `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.provider}` as const; /** * The AI product as identified by the client or server */ -export const LLM_SYSTEM = `${SemanticAttributePrefixes.llm}.system` as const; +export const LLM_SYSTEM = + `${SemanticAttributePrefixes.llm}.${LLMAttributePostfixes.system}` as const; /** Token count for the completion by the llm */ export const LLM_TOKEN_COUNT_COMPLETION =