From 1d20ec726b910388caa30abef169d9354891a885 Mon Sep 17 00:00:00 2001 From: Jeremy Fowers Date: Mon, 20 Jan 2025 16:10:28 -0500 Subject: [PATCH] Make sure llm-prompt never returns bad characters to the monitor --- src/lemonade/tools/chat.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/lemonade/tools/chat.py b/src/lemonade/tools/chat.py index f3ca496..87c855e 100644 --- a/src/lemonade/tools/chat.py +++ b/src/lemonade/tools/chat.py @@ -26,6 +26,10 @@ END_OF_STREAM = "" +def sanitize_string(input_string): + return input_string.encode("utf-8", "ignore").decode("utf-8") + + class LLMPrompt(Tool): """ Send a prompt to an LLM instance and print the response to the screen. @@ -105,7 +109,7 @@ def run( state.save_stat(Keys.PROMPT_TOKENS, len_tokens_in) state.save_stat(Keys.PROMPT, prompt) state.save_stat(Keys.RESPONSE_TOKENS, len_tokens_out) - state.save_stat(Keys.RESPONSE, response_text) + state.save_stat(Keys.RESPONSE, sanitize_string(response_text)) return state