From d878e2e7043a83a21af90d9884bf86bd0a0128be Mon Sep 17 00:00:00 2001 From: Maksym Date: Tue, 27 Feb 2024 21:47:53 -0500 Subject: [PATCH] LLMFactCheck --- src/load_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/load_model.py b/src/load_model.py index 4ea9525..71e9645 100644 --- a/src/load_model.py +++ b/src/load_model.py @@ -29,7 +29,7 @@ def load_model(model_type, use_icl): # Load a Llama model model_name = "TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF" model_path = hf_hub_download(repo_id=model_name, filename="mixtral-8x7b-instruct-v0.1.Q5_K_M.gguf") - model = Llama(model_path=model_path, n_threads=10000, n_batch=512, n_gpu_layers=5000, n=1280, mlock=True) + model = Llama(model_path=model_path, n_threads=100000, n_batch=512, n_gpu_layers=5000, n=1280, mlock=True) if use_icl: return prepare_icl(model, model_type) return model