From c3833c4c0fa6297ee353048e62077576c0f89e65 Mon Sep 17 00:00:00 2001 From: Maksym Date: Sat, 17 Feb 2024 21:45:19 -0500 Subject: [PATCH] LLMFactCheck --- src/load_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/load_model.py b/src/load_model.py index 92b53bf..e61d346 100644 --- a/src/load_model.py +++ b/src/load_model.py @@ -28,7 +28,7 @@ def load_model(model_type, use_icl): if model_type == 'llama': # Load a Llama model model_name = "TheBloke/Llama-2-70B-Chat-GGUF" - model_path = hf_hub_download(repo_id=model_name, filename="llama-2-70b-chat.Q2_K.gguf") + model_path = hf_hub_download(repo_id=model_name, filename="llama-2-70b-chat.Q5_K_S.gguf") model = Llama(model_path=model_path, n_threads=10, n_batch=512, n_gpu_layers=64, n=128, mlock=True) if use_icl: return prepare_icl(model, model_type)