Skip to content

Commit

Permalink
Merge pull request #2012 from Giskard-AI/fix/gemini-token-computation
Browse files Browse the repository at this point in the history
Fixed token count on Gemini client
  • Loading branch information
kevinmessiaen authored Aug 29, 2024
2 parents bee132d + 487c0f0 commit f6c665e
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 3 deletions.
4 changes: 2 additions & 2 deletions giskard/llm/client/gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,8 @@ def complete(
raise LLMConfigurationError(AUTH_ERROR_MESSAGE) from err

self.logger.log_call(
prompt_tokens=self._client.count_tokens([m.content for m in messages]),
sampled_tokens=self._client.count_tokens(completion.text),
prompt_tokens=self._client.count_tokens([m.content for m in messages]).total_tokens,
sampled_tokens=self._client.count_tokens(completion.text).total_tokens,
model=self.model,
client_class=self.__class__.__name__,
caller_id=caller_id,
Expand Down
4 changes: 3 additions & 1 deletion tests/llm/test_llm_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,9 @@ def test_gemini_client():
return_value=Mock(text="This is a test!", candidates=[Mock(content=Mock(role="assistant"))])
)
gemini_api_client.count_tokens = MagicMock(
side_effect=lambda text: sum(len(t.split()) for t in text) if isinstance(text, list) else len(text.split())
side_effect=lambda text: Mock(
total_tokens=sum(len(t.split()) for t in text) if isinstance(text, list) else len(text.split())
)
)

# Initialize the GeminiClient with the mocked gemini_api_client
Expand Down

0 comments on commit f6c665e

Please sign in to comment.