From f015bca24ce5757e8c7c604487c81889e3e84027 Mon Sep 17 00:00:00 2001 From: Nate Mar <67926244+nate-mar@users.noreply.github.com> Date: Fri, 17 Jan 2025 00:40:38 -0800 Subject: [PATCH] fix: remove token on crewai kickoff chain span (#1213) --- .../instrumentation/crewai/_wrappers.py | 17 ----------------- .../instrumentation/crewai/test_instrumentor.py | 9 ++++++--- python/tox.ini | 4 ++-- 3 files changed, 8 insertions(+), 22 deletions(-) diff --git a/python/instrumentation/openinference-instrumentation-crewai/src/openinference/instrumentation/crewai/_wrappers.py b/python/instrumentation/openinference-instrumentation-crewai/src/openinference/instrumentation/crewai/_wrappers.py index dc510b425..a13865b28 100644 --- a/python/instrumentation/openinference-instrumentation-crewai/src/openinference/instrumentation/crewai/_wrappers.py +++ b/python/instrumentation/openinference-instrumentation-crewai/src/openinference/instrumentation/crewai/_wrappers.py @@ -214,20 +214,6 @@ def __call__( ) try: crew_output = wrapped(*args, **kwargs) - usage_metrics = crew.usage_metrics - if isinstance(usage_metrics, dict): - if (prompt_tokens := usage_metrics.get("prompt_tokens")) is not None: - span.set_attribute(LLM_TOKEN_COUNT_PROMPT, int(prompt_tokens)) - if (completion_tokens := usage_metrics.get("completion_tokens")) is not None: - span.set_attribute(LLM_TOKEN_COUNT_COMPLETION, int(completion_tokens)) - if (total_tokens := usage_metrics.get("total_tokens")) is not None: - span.set_attribute(LLM_TOKEN_COUNT_TOTAL, int(total_tokens)) - else: - # version 0.51 and onwards - span.set_attribute(LLM_TOKEN_COUNT_PROMPT, usage_metrics.prompt_tokens) - span.set_attribute(LLM_TOKEN_COUNT_COMPLETION, usage_metrics.completion_tokens) - span.set_attribute(LLM_TOKEN_COUNT_TOTAL, usage_metrics.total_tokens) - except Exception as exception: span.set_status(trace_api.Status(trace_api.StatusCode.ERROR, str(exception))) span.record_exception(exception) @@ -298,6 +284,3 @@ def __call__( OPENINFERENCE_SPAN_KIND = SpanAttributes.OPENINFERENCE_SPAN_KIND OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE OUTPUT_MIME_TYPE = SpanAttributes.OUTPUT_MIME_TYPE -LLM_TOKEN_COUNT_PROMPT = SpanAttributes.LLM_TOKEN_COUNT_PROMPT -LLM_TOKEN_COUNT_COMPLETION = SpanAttributes.LLM_TOKEN_COUNT_COMPLETION -LLM_TOKEN_COUNT_TOTAL = SpanAttributes.LLM_TOKEN_COUNT_TOTAL diff --git a/python/instrumentation/openinference-instrumentation-crewai/tests/openinference/instrumentation/crewai/test_instrumentor.py b/python/instrumentation/openinference-instrumentation-crewai/tests/openinference/instrumentation/crewai/test_instrumentor.py index eb52357e1..30b2005c1 100644 --- a/python/instrumentation/openinference-instrumentation-crewai/tests/openinference/instrumentation/crewai/test_instrumentor.py +++ b/python/instrumentation/openinference-instrumentation-crewai/tests/openinference/instrumentation/crewai/test_instrumentor.py @@ -113,9 +113,12 @@ def test_crewai_instrumentation( checked_spans += 1 assert attributes.get("openinference.span.kind") == "CHAIN" assert attributes.get("output.value") - assert attributes.get("llm.token_count.prompt") == 5751 - assert attributes.get("llm.token_count.completion") == 1793 - assert attributes.get("llm.token_count.total") == 7544 + # assert that there are no tokens on the kickoff chain so that we do not + # double count token when a user is also instrumenting with another instrumentor + # that provides token counts via the spans. + assert attributes.get("llm.token_count.prompt") is None + assert attributes.get("llm.token_count.completion") is None + assert attributes.get("llm.token_count.total") is None assert span.status.is_ok elif span.name == "ToolUsage._use": checked_spans += 1 diff --git a/python/tox.ini b/python/tox.ini index d8ca0c2f4..6caf4410b 100644 --- a/python/tox.ini +++ b/python/tox.ini @@ -12,7 +12,7 @@ envlist = py3{9,12}-ci-{dspy,dspy-latest} py3{9,12}-ci-{langchain,langchain-latest} ; py3{9,12}-ci-{guardrails,guardrails-latest} - ; py3{10,12}-ci-{crewai,crewai-latest} + py3{10,12}-ci-{crewai,crewai-latest} py3{9,12}-ci-{haystack,haystack-latest} py3{8,12}-ci-{groq,groq-latest} py3{8,12}-ci-{litellm,litellm-latest} @@ -74,7 +74,7 @@ commands_pre = guardrails: uv pip install -r test-requirements.txt guardrails-latest: uv pip install -U 'guardrails-ai<0.5.1' 'httpx<0.28' crewai: uv pip install --reinstall {toxinidir}/instrumentation/openinference-instrumentation-crewai[test] - crewai-latest: uv pip install -U crewai 'httpx<0.28' + crewai-latest: uv pip install -U 'crewai==0.55.2' 'httpx<0.28' haystack: uv pip install --reinstall {toxinidir}/instrumentation/openinference-instrumentation-haystack[test] haystack-latest: uv pip install -U haystack-ai 'httpx<0.28' groq: uv pip install --reinstall {toxinidir}/instrumentation/openinference-instrumentation-groq[test]