Skip to content

Commit

Permalink
Make sure llm-prompt never returns bad characters to the monitor
Browse files Browse the repository at this point in the history
  • Loading branch information
jeremyfowers committed Jan 20, 2025
1 parent 0a9897e commit 1d20ec7
Showing 1 changed file with 5 additions and 1 deletion.
6 changes: 5 additions & 1 deletion src/lemonade/tools/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@
END_OF_STREAM = "</s>"


def sanitize_string(input_string):
return input_string.encode("utf-8", "ignore").decode("utf-8")


class LLMPrompt(Tool):
"""
Send a prompt to an LLM instance and print the response to the screen.
Expand Down Expand Up @@ -105,7 +109,7 @@ def run(
state.save_stat(Keys.PROMPT_TOKENS, len_tokens_in)
state.save_stat(Keys.PROMPT, prompt)
state.save_stat(Keys.RESPONSE_TOKENS, len_tokens_out)
state.save_stat(Keys.RESPONSE, response_text)
state.save_stat(Keys.RESPONSE, sanitize_string(response_text))

return state

Expand Down

0 comments on commit 1d20ec7

Please sign in to comment.