Skip to content

Commit

Permalink
LLMFactCheck
Browse files Browse the repository at this point in the history
  • Loading branch information
mlupei committed Feb 18, 2024
1 parent 1790b42 commit eff6beb
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
2 changes: 1 addition & 1 deletion src/get_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def get_result(model_info, prompt, model_type):
# If using a standalone Llama model
model = model_info
full_prompt = prompt
prompt = full_prompt[3:]
prompt = full_prompt
prompt_template=f'''SYSTEM: You are scientist. Read carefully and answer only yes or no.
USER: {prompt}
Expand Down
4 changes: 2 additions & 2 deletions src/load_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@ def load_model(model_type, use_icl):
"""
if model_type == 'llama':
# Load a Llama model
model_name = "TheBloke/Llama-2-70B-Chat-GGUF"
model_path = hf_hub_download(repo_id=model_name, filename="llama-2-70b-chat.Q4_K_M.gguf")
model_name = "TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF"
model_path = hf_hub_download(repo_id=model_name, filename="mixtral-8x7b-instruct-v0.1.Q5_K_M.gguf")
model = Llama(model_path=model_path, n_threads=10, n_batch=512, n_gpu_layers=128, n=128, mlock=True)
if use_icl:
return prepare_icl(model, model_type)
Expand Down

0 comments on commit eff6beb

Please sign in to comment.