Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: remove token on crewai kickoff chain span #1213

Merged
merged 12 commits into from
Jan 17, 2025
Original file line number Diff line number Diff line change
Expand Up @@ -214,20 +214,6 @@ def __call__(
)
try:
crew_output = wrapped(*args, **kwargs)
usage_metrics = crew.usage_metrics
if isinstance(usage_metrics, dict):
if (prompt_tokens := usage_metrics.get("prompt_tokens")) is not None:
span.set_attribute(LLM_TOKEN_COUNT_PROMPT, int(prompt_tokens))
if (completion_tokens := usage_metrics.get("completion_tokens")) is not None:
span.set_attribute(LLM_TOKEN_COUNT_COMPLETION, int(completion_tokens))
if (total_tokens := usage_metrics.get("total_tokens")) is not None:
span.set_attribute(LLM_TOKEN_COUNT_TOTAL, int(total_tokens))
else:
# version 0.51 and onwards
span.set_attribute(LLM_TOKEN_COUNT_PROMPT, usage_metrics.prompt_tokens)
span.set_attribute(LLM_TOKEN_COUNT_COMPLETION, usage_metrics.completion_tokens)
span.set_attribute(LLM_TOKEN_COUNT_TOTAL, usage_metrics.total_tokens)

except Exception as exception:
span.set_status(trace_api.Status(trace_api.StatusCode.ERROR, str(exception)))
span.record_exception(exception)
Expand Down Expand Up @@ -298,6 +284,3 @@ def __call__(
OPENINFERENCE_SPAN_KIND = SpanAttributes.OPENINFERENCE_SPAN_KIND
OUTPUT_VALUE = SpanAttributes.OUTPUT_VALUE
OUTPUT_MIME_TYPE = SpanAttributes.OUTPUT_MIME_TYPE
LLM_TOKEN_COUNT_PROMPT = SpanAttributes.LLM_TOKEN_COUNT_PROMPT
LLM_TOKEN_COUNT_COMPLETION = SpanAttributes.LLM_TOKEN_COUNT_COMPLETION
LLM_TOKEN_COUNT_TOTAL = SpanAttributes.LLM_TOKEN_COUNT_TOTAL
Original file line number Diff line number Diff line change
Expand Up @@ -113,9 +113,12 @@ def test_crewai_instrumentation(
checked_spans += 1
assert attributes.get("openinference.span.kind") == "CHAIN"
assert attributes.get("output.value")
assert attributes.get("llm.token_count.prompt") == 5751
assert attributes.get("llm.token_count.completion") == 1793
assert attributes.get("llm.token_count.total") == 7544
# assert that there are no tokens on the kickoff chain so that we do not
# double count token when a user is also instrumenting with another instrumentor
# that provides token counts via the spans.
assert attributes.get("llm.token_count.prompt") is None
assert attributes.get("llm.token_count.completion") is None
assert attributes.get("llm.token_count.total") is None
assert span.status.is_ok
elif span.name == "ToolUsage._use":
checked_spans += 1
Expand Down
4 changes: 2 additions & 2 deletions python/tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ envlist =
py3{9,12}-ci-{dspy,dspy-latest}
py3{9,12}-ci-{langchain,langchain-latest}
; py3{9,12}-ci-{guardrails,guardrails-latest}
; py3{10,12}-ci-{crewai,crewai-latest}
py3{10,12}-ci-{crewai,crewai-latest}
py3{9,12}-ci-{haystack,haystack-latest}
py3{8,12}-ci-{groq,groq-latest}
py3{8,12}-ci-{litellm,litellm-latest}
Expand Down Expand Up @@ -74,7 +74,7 @@ commands_pre =
guardrails: uv pip install -r test-requirements.txt
guardrails-latest: uv pip install -U 'guardrails-ai<0.5.1' 'httpx<0.28'
crewai: uv pip install --reinstall {toxinidir}/instrumentation/openinference-instrumentation-crewai[test]
crewai-latest: uv pip install -U crewai 'httpx<0.28'
crewai-latest: uv pip install -U 'crewai==0.55.2' 'httpx<0.28'
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Works up to 0.55.2 -- running with this for now and will come back to fix it for versions after this.

haystack: uv pip install --reinstall {toxinidir}/instrumentation/openinference-instrumentation-haystack[test]
haystack-latest: uv pip install -U haystack-ai 'httpx<0.28'
groq: uv pip install --reinstall {toxinidir}/instrumentation/openinference-instrumentation-groq[test]
Expand Down
Loading