From c907eae8eb368a1018360700c8f28172add54e64 Mon Sep 17 00:00:00 2001 From: Maksym Date: Wed, 28 Feb 2024 15:44:50 -0500 Subject: [PATCH] LLMFactCheck --- src/get_result.py | 12 ++---------- src/processing.py | 9 ++++++++- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/get_result.py b/src/get_result.py index d6189fd..bb05fc2 100644 --- a/src/get_result.py +++ b/src/get_result.py @@ -23,22 +23,14 @@ def get_result(model_info, prompt, model_type): else: # If using a standalone Llama model model = model_info - full_prompt = prompt - prompt = full_prompt - prompt_template=f'''SYSTEM: You are a computational biologist tasked with evaluating scientific claims. Your role requires you to apply critical thinking and your expertise to interpret data and research findings accurately. When responding, please start with 'Yes' or 'No' to directly address the query posed. Follow this with a comprehensive justification of your decision, integrating relevant scientific knowledge, the specifics of the case at hand, and any potential implications or nuances that may influence the interpretation of the evidence provided. - - USER: {prompt} - - ASSISTANT: - - ''' + prompt_template = prompt prompt_chunks = [prompt_template] result_text = "" for chunk in prompt_chunks: # Interact with the Llama model print(chunk) response = model(prompt=chunk, max_tokens=256, temperature=0.01, - top_p=0.95, repeat_penalty=1.2, top_k=150, echo=True) + top_p=0.95, repeat_penalty=1.2, top_k=150, echo=False) result_text += response["choices"][0]["text"] print(result_text) return result_text diff --git a/src/processing.py b/src/processing.py index 5412d0f..6ddc12e 100644 --- a/src/processing.py +++ b/src/processing.py @@ -70,9 +70,16 @@ def process_triple(model_info, sentence_id, sentence, triple, #time.sleep(5) # Sleep for 3 seconds if (triple_sentence_id, predicate_id) not in progress: prompt = create_prompt(triple_text, sentence, model_info[1] if use_icl else "") + prompt=f'''SYSTEM: You are a computational biologist tasked with evaluating scientific claims. Your role requires you to apply critical thinking and your expertise to interpret data and research findings accurately. When responding, please start with 'Yes' or 'No' to directly address the query posed. Follow this with a comprehensive justification of your decision, integrating relevant scientific knowledge, the specifics of the case at hand, and any potential implications or nuances that may influence the interpretation of the evidence provided. + + USER: {prompt} + + ASSISTANT: + + ''' result = get_result(model_info, prompt, model_key) - question = prompt + question=prompt is_correct, answer = process_result(result) write_result_to_csv(console_results_writer, predicate_id, triple_text, sentence_id, sentence, is_correct, question, answer.strip() if answer else None)